|
16 | 16 |
|
17 | 17 | from kafka.common import ( |
18 | 18 | ProduceRequest, TopicAndPartition, RetryOptions, |
19 | | - UnsupportedCodecError, FailedPayloadsError, |
20 | | - RequestTimedOutError, AsyncProducerQueueFull |
| 19 | + kafka_errors, UnsupportedCodecError, FailedPayloadsError, |
| 20 | + RequestTimedOutError, AsyncProducerQueueFull, UnknownError |
21 | 21 | ) |
22 | 22 | from kafka.common import ( |
23 | 23 | RETRY_ERROR_TYPES, RETRY_BACKOFF_ERROR_TYPES, RETRY_REFRESH_ERROR_TYPES) |
@@ -89,41 +89,46 @@ def _send_upstream(queue, client, codec, batch_time, batch_size, |
89 | 89 | if not reqs: |
90 | 90 | continue |
91 | 91 |
|
92 | | - reqs_to_retry, error_type = [], None |
| 92 | + reqs_to_retry, error_cls = [], None |
| 93 | + do_backoff, do_refresh = False, False |
| 94 | + |
| 95 | + def _handle_error(error_cls, reqs, all_retries): |
| 96 | + if ((error_cls == RequestTimedOutError and |
| 97 | + retry_options.retry_on_timeouts) or |
| 98 | + error_cls in RETRY_ERROR_TYPES): |
| 99 | + all_retries += reqs |
| 100 | + if error_cls in RETRY_BACKOFF_ERROR_TYPES: |
| 101 | + do_backoff += reqs |
| 102 | + if error_cls in RETRY_REFRESH_ERROR_TYPES: |
| 103 | + do_refresh = True |
93 | 104 |
|
94 | 105 | try: |
95 | 106 | reply = client.send_produce_request(reqs.keys(), |
96 | 107 | acks=req_acks, |
97 | 108 | timeout=ack_timeout, |
98 | 109 | fail_on_error=False) |
99 | | - reqs_to_retry = [req for broker_responses in reply |
100 | | - for response in broker_responses |
101 | | - for req in response.failed_payloads |
102 | | - if isinstance(response, FailedPayloadsError)] |
103 | | - if reqs_to_retry: |
104 | | - error_type = FailedPayloadsError |
105 | | - |
106 | | - except RequestTimedOutError: |
107 | | - error_type = RequestTimedOutError |
108 | | - if retry_options.retry_on_timeouts: |
109 | | - reqs_to_retry = reqs.keys() |
| 110 | + for i, response in enumerate(reply): |
| 111 | + if isinstance(response, FailedPayloadsError): |
| 112 | + _handle_error(FailedPayloadsError, response.failed_payloads, reqs_to_retry) |
| 113 | + elif isinstance(response, ProduceResponse) and response.error: |
| 114 | + error_cls = kafka_errors.get(response.error, UnknownError) |
| 115 | + _handle_error(error_cls, [reqs.keys()[i]], reqs_to_retry) |
110 | 116 |
|
111 | 117 | except Exception as ex: |
112 | | - error_type = type(ex) |
113 | | - if type(ex) in RETRY_ERROR_TYPES: |
114 | | - reqs_to_retry = reqs.keys() |
| 118 | + error_cls = kafka_errors.get(type(ex), UnknownError) |
| 119 | + _handle_error(error_cls, reqs.keys(), reqs_to_retry) |
115 | 120 |
|
116 | 121 | if not reqs_to_retry: |
117 | 122 | reqs = {} |
118 | 123 | continue |
119 | 124 |
|
120 | 125 | # doing backoff before next retry |
121 | | - if error_type in RETRY_BACKOFF_ERROR_TYPES and retry_options.backoff_ms: |
| 126 | + if do_backoff and retry_options.backoff_ms: |
122 | 127 | log.info("Doing backoff for %s(ms)." % retry_options.backoff_ms) |
123 | 128 | time.sleep(float(retry_options.backoff_ms) / 1000) |
124 | 129 |
|
125 | 130 | # refresh topic metadata before next retry |
126 | | - if error_type in RETRY_REFRESH_ERROR_TYPES: |
| 131 | + if do_refresh: |
127 | 132 | client.load_metadata_for_topics() |
128 | 133 |
|
129 | 134 | reqs = dict((key, count + 1) for (key, count) in reqs.items() |
|
0 commit comments