Skip to content

Commit b45771f

Browse files
committed
use list comprehension
1 parent 954e288 commit b45771f

File tree

2 files changed

+5
-12
lines changed

2 files changed

+5
-12
lines changed

src/amplitude/processor.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,9 @@ def process_response(self, res, events):
2222
self.callback(events, res.code, res.error)
2323
self.log(events, res.code, res.error)
2424
else:
25-
# Only reduce if batch was at or below current limit
26-
# This prevents multiple reductions from same flush operation
25+
# Reduce only if batch didn't exceed current limit (was expected to work).
26+
# Batches larger than limit are from old limits already deemed too large,
27+
# so failing again doesn't provide new information - skip to avoid over-reduction.
2728
if len(events) <= self.configuration.flush_queue_size:
2829
self.configuration._increase_flush_divider()
2930
self.push_to_storage(events, 0, res)

src/amplitude/worker.py

Lines changed: 2 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -46,13 +46,8 @@ def flush(self):
4646
return None
4747

4848
batch_size = self.configuration.flush_queue_size
49-
batches = []
50-
for i in range(0, len(events), batch_size):
51-
batches.append(events[i:i + batch_size])
52-
53-
batch_futures = []
54-
for batch in batches:
55-
batch_futures.append(self.threads_pool.submit(self.send, batch))
49+
batches = [events[i:i + batch_size] for i in range(0, len(events), batch_size)]
50+
batch_futures = [self.threads_pool.submit(self.send, batch) for batch in batches]
5651

5752
if len(batch_futures) == 1:
5853
return batch_futures[0]
@@ -76,9 +71,6 @@ def wait_for_all():
7671
if errors:
7772
raise errors[0]
7873

79-
self.configuration.logger.info(
80-
f"Flush completed: {len(batch_futures)} batches sent"
81-
)
8274
return None
8375

8476
return self.threads_pool.submit(wait_for_all)

0 commit comments

Comments
 (0)