Skip to content

Commit 3d5b74b

Browse files
committed
Merge PR #593 into 17.0
Signed-off-by guewen
2 parents c065283 + 686691b commit 3d5b74b

93 files changed

Lines changed: 15873 additions & 0 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

queue_job/README.rst

Lines changed: 677 additions & 0 deletions
Large diffs are not rendered by default.

queue_job/__init__.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
from . import controllers
2+
from . import fields
3+
from . import models
4+
from . import wizards
5+
from . import jobrunner
6+
from .post_init_hook import post_init_hook
7+
from .post_load import post_load
8+
9+
# shortcuts
10+
from .job import identity_exact

queue_job/__manifest__.py

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
2+
3+
{
4+
"name": "Job Queue",
5+
"version": "17.0.1.0.0",
6+
"author": "Camptocamp,ACSONE SA/NV,Odoo Community Association (OCA)",
7+
"website": "https://github.com/OCA/queue",
8+
"license": "LGPL-3",
9+
"category": "Generic Modules",
10+
"depends": ["mail", "base_sparse_field", "web"],
11+
"external_dependencies": {"python": ["requests"]},
12+
"data": [
13+
"security/security.xml",
14+
"security/ir.model.access.csv",
15+
"views/queue_job_views.xml",
16+
"views/queue_job_channel_views.xml",
17+
"views/queue_job_function_views.xml",
18+
"wizards/queue_jobs_to_done_views.xml",
19+
"wizards/queue_jobs_to_cancelled_views.xml",
20+
"wizards/queue_requeue_job_views.xml",
21+
"views/queue_job_menus.xml",
22+
"data/queue_data.xml",
23+
"data/queue_job_function_data.xml",
24+
],
25+
"assets": {
26+
"web.assets_backend": [
27+
"/queue_job/static/src/views/**/*",
28+
],
29+
},
30+
"installable": True,
31+
"development_status": "Mature",
32+
"maintainers": ["guewen"],
33+
"post_init_hook": "post_init_hook",
34+
"post_load": "post_load",
35+
}

queue_job/controllers/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
from . import main

queue_job/controllers/main.py

Lines changed: 297 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,297 @@
1+
# Copyright (c) 2015-2016 ACSONE SA/NV (<http://acsone.eu>)
2+
# Copyright 2013-2016 Camptocamp SA
3+
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
4+
5+
import logging
6+
import random
7+
import time
8+
import traceback
9+
from io import StringIO
10+
11+
from psycopg2 import OperationalError, errorcodes
12+
from werkzeug.exceptions import BadRequest, Forbidden
13+
14+
from odoo import SUPERUSER_ID, _, api, http, registry, tools
15+
from odoo.service.model import PG_CONCURRENCY_ERRORS_TO_RETRY
16+
17+
from ..delay import chain, group
18+
from ..exception import FailedJobError, NothingToDoJob, RetryableJobError
19+
from ..job import ENQUEUED, Job
20+
21+
_logger = logging.getLogger(__name__)
22+
23+
PG_RETRY = 5 # seconds
24+
25+
DEPENDS_MAX_TRIES_ON_CONCURRENCY_FAILURE = 5
26+
27+
28+
class RunJobController(http.Controller):
29+
def _try_perform_job(self, env, job):
30+
"""Try to perform the job."""
31+
job.set_started()
32+
job.store()
33+
env.cr.commit()
34+
_logger.debug("%s started", job)
35+
36+
job.perform()
37+
job.set_done()
38+
job.store()
39+
env.flush_all()
40+
env.cr.commit()
41+
_logger.debug("%s done", job)
42+
43+
def _enqueue_dependent_jobs(self, env, job):
44+
tries = 0
45+
while True:
46+
try:
47+
job.enqueue_waiting()
48+
except OperationalError as err:
49+
# Automatically retry the typical transaction serialization
50+
# errors
51+
if err.pgcode not in PG_CONCURRENCY_ERRORS_TO_RETRY:
52+
raise
53+
if tries >= DEPENDS_MAX_TRIES_ON_CONCURRENCY_FAILURE:
54+
_logger.info(
55+
"%s, maximum number of tries reached to update dependencies",
56+
errorcodes.lookup(err.pgcode),
57+
)
58+
raise
59+
wait_time = random.uniform(0.0, 2**tries)
60+
tries += 1
61+
_logger.info(
62+
"%s, retry %d/%d in %.04f sec...",
63+
errorcodes.lookup(err.pgcode),
64+
tries,
65+
DEPENDS_MAX_TRIES_ON_CONCURRENCY_FAILURE,
66+
wait_time,
67+
)
68+
time.sleep(wait_time)
69+
else:
70+
break
71+
72+
@http.route("/queue_job/runjob", type="http", auth="none", save_session=False)
73+
def runjob(self, db, job_uuid, **kw):
74+
http.request.session.db = db
75+
env = http.request.env(user=SUPERUSER_ID)
76+
77+
def retry_postpone(job, message, seconds=None):
78+
job.env.clear()
79+
with registry(job.env.cr.dbname).cursor() as new_cr:
80+
job.env = api.Environment(new_cr, SUPERUSER_ID, {})
81+
job.postpone(result=message, seconds=seconds)
82+
job.set_pending(reset_retry=False)
83+
job.store()
84+
85+
# ensure the job to run is in the correct state and lock the record
86+
env.cr.execute(
87+
"SELECT state FROM queue_job WHERE uuid=%s AND state=%s FOR UPDATE",
88+
(job_uuid, ENQUEUED),
89+
)
90+
if not env.cr.fetchone():
91+
_logger.warning(
92+
"was requested to run job %s, but it does not exist, "
93+
"or is not in state %s",
94+
job_uuid,
95+
ENQUEUED,
96+
)
97+
return ""
98+
99+
job = Job.load(env, job_uuid)
100+
assert job and job.state == ENQUEUED
101+
102+
try:
103+
try:
104+
self._try_perform_job(env, job)
105+
except OperationalError as err:
106+
# Automatically retry the typical transaction serialization
107+
# errors
108+
if err.pgcode not in PG_CONCURRENCY_ERRORS_TO_RETRY:
109+
raise
110+
111+
_logger.debug("%s OperationalError, postponed", job)
112+
raise RetryableJobError(
113+
tools.ustr(err.pgerror, errors="replace"), seconds=PG_RETRY
114+
) from err
115+
116+
except NothingToDoJob as err:
117+
if str(err):
118+
msg = str(err)
119+
else:
120+
msg = _("Job interrupted and set to Done: nothing to do.")
121+
job.set_done(msg)
122+
job.store()
123+
env.cr.commit()
124+
125+
except RetryableJobError as err:
126+
# delay the job later, requeue
127+
retry_postpone(job, str(err), seconds=err.seconds)
128+
_logger.debug("%s postponed", job)
129+
# Do not trigger the error up because we don't want an exception
130+
# traceback in the logs we should have the traceback when all
131+
# retries are exhausted
132+
env.cr.rollback()
133+
return ""
134+
135+
except (FailedJobError, Exception) as orig_exception:
136+
buff = StringIO()
137+
traceback.print_exc(file=buff)
138+
traceback_txt = buff.getvalue()
139+
_logger.error(traceback_txt)
140+
job.env.clear()
141+
with registry(job.env.cr.dbname).cursor() as new_cr:
142+
job.env = job.env(cr=new_cr)
143+
vals = self._get_failure_values(job, traceback_txt, orig_exception)
144+
job.set_failed(**vals)
145+
job.store()
146+
buff.close()
147+
raise
148+
149+
_logger.debug("%s enqueue depends started", job)
150+
self._enqueue_dependent_jobs(env, job)
151+
_logger.debug("%s enqueue depends done", job)
152+
153+
return ""
154+
155+
def _get_failure_values(self, job, traceback_txt, orig_exception):
156+
"""Collect relevant data from exception."""
157+
exception_name = orig_exception.__class__.__name__
158+
if hasattr(orig_exception, "__module__"):
159+
exception_name = orig_exception.__module__ + "." + exception_name
160+
exc_message = getattr(orig_exception, "name", str(orig_exception))
161+
return {
162+
"exc_info": traceback_txt,
163+
"exc_name": exception_name,
164+
"exc_message": exc_message,
165+
}
166+
167+
# flake8: noqa: C901
168+
@http.route("/queue_job/create_test_job", type="http", auth="user")
169+
def create_test_job(
170+
self,
171+
priority=None,
172+
max_retries=None,
173+
channel=None,
174+
description="Test job",
175+
size=1,
176+
failure_rate=0,
177+
):
178+
if not http.request.env.user.has_group("base.group_erp_manager"):
179+
raise Forbidden(_("Access Denied"))
180+
181+
if failure_rate is not None:
182+
try:
183+
failure_rate = float(failure_rate)
184+
except (ValueError, TypeError):
185+
failure_rate = 0
186+
187+
if not (0 <= failure_rate <= 1):
188+
raise BadRequest("failure_rate must be between 0 and 1")
189+
190+
if size is not None:
191+
try:
192+
size = int(size)
193+
except (ValueError, TypeError):
194+
size = 1
195+
196+
if priority is not None:
197+
try:
198+
priority = int(priority)
199+
except ValueError:
200+
priority = None
201+
202+
if max_retries is not None:
203+
try:
204+
max_retries = int(max_retries)
205+
except ValueError:
206+
max_retries = None
207+
208+
if size == 1:
209+
return self._create_single_test_job(
210+
priority=priority,
211+
max_retries=max_retries,
212+
channel=channel,
213+
description=description,
214+
failure_rate=failure_rate,
215+
)
216+
217+
if size > 1:
218+
return self._create_graph_test_jobs(
219+
size,
220+
priority=priority,
221+
max_retries=max_retries,
222+
channel=channel,
223+
description=description,
224+
failure_rate=failure_rate,
225+
)
226+
return ""
227+
228+
def _create_single_test_job(
229+
self,
230+
priority=None,
231+
max_retries=None,
232+
channel=None,
233+
description="Test job",
234+
size=1,
235+
failure_rate=0,
236+
):
237+
delayed = (
238+
http.request.env["queue.job"]
239+
.with_delay(
240+
priority=priority,
241+
max_retries=max_retries,
242+
channel=channel,
243+
description=description,
244+
)
245+
._test_job(failure_rate=failure_rate)
246+
)
247+
return f"job uuid: {delayed.db_record().uuid}"
248+
249+
TEST_GRAPH_MAX_PER_GROUP = 5
250+
251+
def _create_graph_test_jobs(
252+
self,
253+
size,
254+
priority=None,
255+
max_retries=None,
256+
channel=None,
257+
description="Test job",
258+
failure_rate=0,
259+
):
260+
model = http.request.env["queue.job"]
261+
current_count = 0
262+
263+
possible_grouping_methods = (chain, group)
264+
265+
tails = [] # we can connect new graph chains/groups to tails
266+
root_delayable = None
267+
while current_count < size:
268+
jobs_count = min(
269+
size - current_count, random.randint(1, self.TEST_GRAPH_MAX_PER_GROUP)
270+
)
271+
272+
jobs = []
273+
for __ in range(jobs_count):
274+
current_count += 1
275+
jobs.append(
276+
model.delayable(
277+
priority=priority,
278+
max_retries=max_retries,
279+
channel=channel,
280+
description="%s #%d" % (description, current_count),
281+
)._test_job(failure_rate=failure_rate)
282+
)
283+
284+
grouping = random.choice(possible_grouping_methods)
285+
delayable = grouping(*jobs)
286+
if not root_delayable:
287+
root_delayable = delayable
288+
else:
289+
tail_delayable = random.choice(tails)
290+
tail_delayable.on_done(delayable)
291+
tails.append(delayable)
292+
293+
root_delayable.delay()
294+
295+
return "graph uuid: {}".format(
296+
list(root_delayable._head())[0]._generated_job.graph_uuid
297+
)

queue_job/data/queue_data.xml

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
<?xml version="1.0" encoding="utf-8" ?>
2+
<odoo>
3+
<data noupdate="1">
4+
<record id="ir_cron_queue_job_garbage_collector" model="ir.cron">
5+
<field name="name">Jobs Garbage Collector</field>
6+
<field name="interval_number">5</field>
7+
<field name="interval_type">minutes</field>
8+
<field name="numbercall">-1</field>
9+
<field ref="model_queue_job" name="model_id" />
10+
<field name="state">code</field>
11+
<field name="code">model.requeue_stuck_jobs()</field>
12+
</record>
13+
<!-- Queue-job-related subtypes for messaging / Chatter -->
14+
<record id="mt_job_failed" model="mail.message.subtype">
15+
<field name="name">Job failed</field>
16+
<field name="res_model">queue.job</field>
17+
<field name="default" eval="True" />
18+
</record>
19+
<record id="ir_cron_autovacuum_queue_jobs" model="ir.cron">
20+
<field name="name">AutoVacuum Job Queue</field>
21+
<field ref="model_queue_job" name="model_id" />
22+
<field eval="True" name="active" />
23+
<field name="user_id" ref="base.user_root" />
24+
<field name="interval_number">1</field>
25+
<field name="interval_type">days</field>
26+
<field name="numbercall">-1</field>
27+
<field eval="False" name="doall" />
28+
<field name="state">code</field>
29+
<field name="code">model.autovacuum()</field>
30+
</record>
31+
</data>
32+
<data noupdate="0">
33+
<record model="queue.job.channel" id="channel_root">
34+
<field name="name">root</field>
35+
</record>
36+
</data>
37+
</odoo>
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
<odoo noupdate="1">
2+
<record id="job_function_queue_job__test_job" model="queue.job.function">
3+
<field name="model_id" ref="queue_job.model_queue_job" />
4+
<field name="method">_test_job</field>
5+
</record>
6+
</odoo>

0 commit comments

Comments
 (0)