Skip to content

Commit 173b6ca

Browse files
committed
Merge PR #476 into 15.0
Signed-off-by lmignon
2 parents 215acb1 + 2bf26cf commit 173b6ca

26 files changed

Lines changed: 3147 additions & 169 deletions

queue_job/README.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ Job Queue
2323
:target: https://runbot.odoo-community.org/runbot/230/15.0
2424
:alt: Try me on Runbot
2525

26-
|badge1| |badge2| |badge3| |badge4| |badge5|
26+
|badge1| |badge2| |badge3| |badge4| |badge5|
2727

2828
This addon adds an integrated Job Queue to Odoo.
2929

@@ -385,7 +385,7 @@ promote its widespread use.
385385

386386
Current `maintainer <https://odoo-community.org/page/maintainer-role>`__:
387387

388-
|maintainer-guewen|
388+
|maintainer-guewen|
389389

390390
This module is part of the `OCA/queue <https://github.com/OCA/queue/tree/15.0/queue_job>`_ project on GitHub.
391391

queue_job/__manifest__.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
22

3-
43
{
54
"name": "Job Queue",
65
"version": "15.0.1.1.1",
@@ -23,6 +22,14 @@
2322
"data/queue_data.xml",
2423
"data/queue_job_function_data.xml",
2524
],
25+
"assets": {
26+
"web.assets_backend": [
27+
"/queue_job/static/lib/vis/vis-network.min.css",
28+
"/queue_job/static/src/scss/queue_job_fields.scss",
29+
"/queue_job/static/lib/vis/vis-network.min.js",
30+
"/queue_job/static/src/js/queue_job_fields.js",
31+
],
32+
},
2633
"installable": True,
2734
"development_status": "Mature",
2835
"maintainers": ["guewen"],

queue_job/controllers/main.py

Lines changed: 143 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,22 +3,27 @@
33
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
44

55
import logging
6+
import random
7+
import time
68
import traceback
79
from io import StringIO
810

9-
from psycopg2 import OperationalError
10-
from werkzeug.exceptions import Forbidden
11+
from psycopg2 import OperationalError, errorcodes
12+
from werkzeug.exceptions import BadRequest, Forbidden
1113

1214
from odoo import SUPERUSER_ID, _, api, http, registry, tools
1315
from odoo.service.model import PG_CONCURRENCY_ERRORS_TO_RETRY
1416

17+
from ..delay import chain, group
1518
from ..exception import FailedJobError, NothingToDoJob, RetryableJobError
1619
from ..job import ENQUEUED, Job
1720

1821
_logger = logging.getLogger(__name__)
1922

2023
PG_RETRY = 5 # seconds
2124

25+
DEPENDS_MAX_TRIES_ON_CONCURRENCY_FAILURE = 5
26+
2227

2328
class RunJobController(http.Controller):
2429
def _try_perform_job(self, env, job):
@@ -35,6 +40,35 @@ def _try_perform_job(self, env, job):
3540
env.cr.commit()
3641
_logger.debug("%s done", job)
3742

43+
def _enqueue_dependent_jobs(self, env, job):
44+
tries = 0
45+
while True:
46+
try:
47+
job.enqueue_waiting()
48+
except OperationalError as err:
49+
# Automatically retry the typical transaction serialization
50+
# errors
51+
if err.pgcode not in PG_CONCURRENCY_ERRORS_TO_RETRY:
52+
raise
53+
if tries >= DEPENDS_MAX_TRIES_ON_CONCURRENCY_FAILURE:
54+
_logger.info(
55+
"%s, maximum number of tries reached to update dependencies",
56+
errorcodes.lookup(err.pgcode),
57+
)
58+
raise
59+
wait_time = random.uniform(0.0, 2**tries)
60+
tries += 1
61+
_logger.info(
62+
"%s, retry %d/%d in %.04f sec...",
63+
errorcodes.lookup(err.pgcode),
64+
tries,
65+
DEPENDS_MAX_TRIES_ON_CONCURRENCY_FAILURE,
66+
wait_time,
67+
)
68+
time.sleep(wait_time)
69+
else:
70+
break
71+
3872
@http.route("/queue_job/runjob", type="http", auth="none", save_session=False)
3973
def runjob(self, db, job_uuid, **kw):
4074
http.request.session.db = db
@@ -111,6 +145,10 @@ def retry_postpone(job, message, seconds=None):
111145
buff.close()
112146
raise
113147

148+
_logger.debug("%s enqueue depends started", job)
149+
self._enqueue_dependent_jobs(env, job)
150+
_logger.debug("%s enqueue depends done", job)
151+
114152
return ""
115153

116154
def _get_failure_values(self, job, traceback_txt, orig_exception):
@@ -125,13 +163,35 @@ def _get_failure_values(self, job, traceback_txt, orig_exception):
125163
"exc_message": exc_message,
126164
}
127165

166+
# flake8: noqa: C901
128167
@http.route("/queue_job/create_test_job", type="http", auth="user")
129168
def create_test_job(
130-
self, priority=None, max_retries=None, channel=None, description="Test job"
169+
self,
170+
priority=None,
171+
max_retries=None,
172+
channel=None,
173+
description="Test job",
174+
size=1,
175+
failure_rate=0,
131176
):
132177
if not http.request.env.user.has_group("base.group_erp_manager"):
133178
raise Forbidden(_("Access Denied"))
134179

180+
if failure_rate is not None:
181+
try:
182+
failure_rate = float(failure_rate)
183+
except (ValueError, TypeError):
184+
failure_rate = 0
185+
186+
if not (0 <= failure_rate <= 1):
187+
raise BadRequest("failure_rate must be between 0 and 1")
188+
189+
if size is not None:
190+
try:
191+
size = int(size)
192+
except (ValueError, TypeError):
193+
size = 1
194+
135195
if priority is not None:
136196
try:
137197
priority = int(priority)
@@ -144,6 +204,35 @@ def create_test_job(
144204
except ValueError:
145205
max_retries = None
146206

207+
if size == 1:
208+
return self._create_single_test_job(
209+
priority=priority,
210+
max_retries=max_retries,
211+
channel=channel,
212+
description=description,
213+
failure_rate=failure_rate,
214+
)
215+
216+
if size > 1:
217+
return self._create_graph_test_jobs(
218+
size,
219+
priority=priority,
220+
max_retries=max_retries,
221+
channel=channel,
222+
description=description,
223+
failure_rate=failure_rate,
224+
)
225+
return ""
226+
227+
def _create_single_test_job(
228+
self,
229+
priority=None,
230+
max_retries=None,
231+
channel=None,
232+
description="Test job",
233+
size=1,
234+
failure_rate=0,
235+
):
147236
delayed = (
148237
http.request.env["queue.job"]
149238
.with_delay(
@@ -152,7 +241,56 @@ def create_test_job(
152241
channel=channel,
153242
description=description,
154243
)
155-
._test_job()
244+
._test_job(failure_rate=failure_rate)
156245
)
246+
return "job uuid: %s" % (delayed.db_record().uuid,)
247+
248+
TEST_GRAPH_MAX_PER_GROUP = 5
157249

158-
return delayed.db_record().uuid
250+
def _create_graph_test_jobs(
251+
self,
252+
size,
253+
priority=None,
254+
max_retries=None,
255+
channel=None,
256+
description="Test job",
257+
failure_rate=0,
258+
):
259+
model = http.request.env["queue.job"]
260+
current_count = 0
261+
262+
possible_grouping_methods = (chain, group)
263+
264+
tails = [] # we can connect new graph chains/groups to tails
265+
root_delayable = None
266+
while current_count < size:
267+
jobs_count = min(
268+
size - current_count, random.randint(1, self.TEST_GRAPH_MAX_PER_GROUP)
269+
)
270+
271+
jobs = []
272+
for __ in range(jobs_count):
273+
current_count += 1
274+
jobs.append(
275+
model.delayable(
276+
priority=priority,
277+
max_retries=max_retries,
278+
channel=channel,
279+
description="%s #%d" % (description, current_count),
280+
)._test_job(failure_rate=failure_rate)
281+
)
282+
283+
grouping = random.choice(possible_grouping_methods)
284+
delayable = grouping(*jobs)
285+
if not root_delayable:
286+
root_delayable = delayable
287+
else:
288+
tail_delayable = random.choice(tails)
289+
tail_delayable.on_done(delayable)
290+
tails.append(delayable)
291+
292+
root_delayable.delay()
293+
294+
return "graph uuid: %s" % (
295+
list(root_delayable._head())[0]._generated_job.graph_uuid,
296+
)

0 commit comments

Comments
 (0)