Set upper bound on metrics queue size.

This commit is contained in:
Dave Shawley 2021-03-30 07:22:22 -04:00
parent 5c947d6798
commit 0f0bae1199
No known key found for this signature in database
GPG key ID: F41A8A99298F8EED
3 changed files with 40 additions and 18 deletions

View file

@ -35,12 +35,14 @@ class Connector:
sends the metric payloads.
"""
logger: logging.Logger
processor: 'Processor'
def __init__(self,
host: str,
port: int = 8125,
**kwargs: typing.Any) -> None:
self.logger = logging.getLogger(__package__).getChild('Connector')
self.processor = Processor(host=host, port=port, **kwargs)
self._processor_task: typing.Optional[asyncio.Task[None]] = None
@ -124,7 +126,10 @@ class Connector:
"""
payload = f'{path}:{value}|{type_code}'
self.processor.enqueue(payload.encode('utf-8'))
try:
self.processor.enqueue(payload.encode('utf-8'))
except asyncio.QueueFull:
self.logger.warning('statsd queue is full, discarding metric')
class StatsdProtocol(asyncio.BaseProtocol):
@ -245,6 +250,8 @@ class Processor:
:param host: statsd server to send metrics to
:param port: TCP port that the server is listening on
:param max_queue_size: only allow this many elements to be
stored in the queue before discarding metrics
:param reconnect_sleep: number of seconds to sleep after socket
error occurs when connecting
:param wait_timeout: number os seconds to wait for a message to
@ -307,8 +314,9 @@ class Processor:
*,
host: str,
port: int = 8125,
reconnect_sleep: float = 1.0,
ip_protocol: int = socket.IPPROTO_TCP,
max_queue_size: int = 1000,
reconnect_sleep: float = 1.0,
wait_timeout: float = 0.1) -> None:
super().__init__()
if not host:
@ -344,7 +352,7 @@ class Processor:
self.logger = logging.getLogger(__package__).getChild('Processor')
self.should_terminate = False
self.protocol = None
self.queue = asyncio.Queue()
self.queue = asyncio.Queue(maxsize=max_queue_size)
@property
def connected(self) -> bool:

View file

@ -111,25 +111,17 @@ class Application(web.Application):
"""
if self.statsd_connector is None:
statsd_settings = self.settings['statsd']
kwargs = {
'host': statsd_settings['host'],
'port': statsd_settings['port'],
}
if 'reconnect_sleep' in statsd_settings:
kwargs['reconnect_sleep'] = statsd_settings['reconnect_sleep']
if 'wait_timeout' in statsd_settings:
kwargs['wait_timeout'] = statsd_settings['wait_timeout']
if statsd_settings['protocol'] == 'tcp':
kwargs = self.settings['statsd'].copy()
protocol = kwargs.pop('protocol', None)
if protocol == 'tcp':
kwargs['ip_protocol'] = socket.IPPROTO_TCP
elif statsd_settings['protocol'] == 'udp':
elif protocol == 'udp':
kwargs['ip_protocol'] = socket.IPPROTO_UDP
else:
raise RuntimeError(
f'statsd configuration error:'
f' {statsd_settings["protocol"]} is not a valid'
f' protocol')
raise RuntimeError(f'statsd configuration error: {protocol} '
f'is not a valid protocol')
kwargs.pop('prefix', None) # TODO move prefixing into Connector
self.statsd_connector = statsd.Connector(**kwargs)
await self.statsd_connector.start()

View file

@ -376,3 +376,25 @@ class ConnectorOptionTests(ProcessorTestCase):
statsd.Connector(host=self.statsd_server.host, port=port)
self.assertIn('port', str(context.exception))
self.assertIn(repr(port), str(context.exception))
async def test_that_metrics_are_dropped_when_queue_overflows(self):
connector = statsd.Connector(host=self.statsd_server.host,
port=1,
max_queue_size=10)
await connector.start()
self.addCleanup(connector.stop)
# fill up the queue with incr's
for expected_size in range(1, connector.processor.queue.maxsize + 1):
connector.incr('counter')
self.assertEqual(connector.processor.queue.qsize(), expected_size)
# the following decr's should be ignored
for _ in range(10):
connector.decr('counter')
self.assertEqual(connector.processor.queue.qsize(), 10)
# make sure that only the incr's are in the queue
for _ in range(connector.processor.queue.qsize()):
metric = await connector.processor.queue.get()
self.assertEqual(metric, b'counter:1|c')