From ea2e9527f6477eafdddff49498c857f717cef1d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Bidoul?= Date: Tue, 18 Oct 2022 13:04:31 +0200 Subject: [PATCH 01/19] Update test with proper way to pass company to job --- test_queue_job/models/test_models.py | 2 +- test_queue_job/tests/test_job.py | 41 ++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 1 deletion(-) diff --git a/test_queue_job/models/test_models.py b/test_queue_job/models/test_models.py index ff9622106a..4c0dd6b2d3 100644 --- a/test_queue_job/models/test_models.py +++ b/test_queue_job/models/test_models.py @@ -38,7 +38,7 @@ class ModelTestQueueJob(models.Model): # to test the context is serialized/deserialized properly @api.model def _job_prepare_context_before_enqueue_keys(self): - return ("tz", "lang") + return ("tz", "lang", "allowed_company_ids") def testing_method(self, *args, **kwargs): """Method used for tests diff --git a/test_queue_job/tests/test_job.py b/test_queue_job/tests/test_job.py index 35884cd2b3..c4ec5081b3 100644 --- a/test_queue_job/tests/test_job.py +++ b/test_queue_job/tests/test_job.py @@ -185,6 +185,47 @@ def test_postpone(self): self.assertEqual(job_a.result, "test") self.assertFalse(job_a.exc_info) + def test_company_simple(self): + company = self.env.ref("base.main_company") + eta = datetime.now() + timedelta(hours=5) + test_job = Job( + self.env["test.queue.job"].with_company(company).testing_method, + args=("o", "k"), + kwargs={"return_context": 1}, + priority=15, + eta=eta, + description="My description", + ) + test_job.worker_pid = 99999 # normally set on "set_start" + test_job.store() + job_read = Job.load(self.env, test_job.uuid) + self.assertEqual(test_job.func.__func__, job_read.func.__func__) + result_ctx = job_read.func(*tuple(test_job.args), **test_job.kwargs) + self.assertEqual(result_ctx.get("allowed_company_ids"), company.ids) + + def test_company_complex(self): + company1 = self.env.ref("base.main_company") + company2 = company1.create({"name": "Queue job company"}) + companies = company1 | company2 + self.env.user.write({"company_ids": [(6, False, companies.ids)]}) + # Ensure the main company still the first + self.assertEqual(self.env.user.company_id, company1) + eta = datetime.now() + timedelta(hours=5) + test_job = Job( + self.env["test.queue.job"].with_company(company2).testing_method, + args=("o", "k"), + kwargs={"return_context": 1}, + priority=15, + eta=eta, + description="My description", + ) + test_job.worker_pid = 99999 # normally set on "set_start" + test_job.store() + job_read = Job.load(self.env, test_job.uuid) + self.assertEqual(test_job.func.__func__, job_read.func.__func__) + result_ctx = job_read.func(*tuple(test_job.args), **test_job.kwargs) + self.assertEqual(result_ctx.get("allowed_company_ids"), company2.ids) + def test_store(self): test_job = Job(self.method) test_job.store() From a428dfff2f834e79be874ea7ea349e37c5c50e2d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miquel=20Ra=C3=AFch?= Date: Thu, 16 Nov 2023 17:48:11 +0100 Subject: [PATCH 02/19] [IMP] queue_job: track error in chatter --- queue_job/models/queue_job.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/queue_job/models/queue_job.py b/queue_job/models/queue_job.py index 33dbf2346d..06f0a3a913 100644 --- a/queue_job/models/queue_job.py +++ b/queue_job/models/queue_job.py @@ -93,7 +93,7 @@ class QueueJob(models.Model): state = fields.Selection(STATES, readonly=True, required=True, index=True) priority = fields.Integer() exc_name = fields.Char(string="Exception", readonly=True) - exc_message = fields.Char(string="Exception Message", readonly=True) + exc_message = fields.Char(string="Exception Message", readonly=True, tracking=True) exc_info = fields.Text(string="Exception Info", readonly=True) result = fields.Text(readonly=True) From 02c52b6d4a6e2d43efe984be9d407c727e688443 Mon Sep 17 00:00:00 2001 From: "Laurent Mignon (ACSONE)" Date: Thu, 21 Dec 2023 13:41:47 +0100 Subject: [PATCH 03/19] [IMP] queue_job_cron: Avoid parallel run By default, odoo never runs the same cron job in parallel. This commit uses the identity key mechanism to enforce this mechanism when a cron job is run as a queue job. This behaviour can be controlled by a new setting on the cron definition but is activated by default to keep the original behaviour --- queue_job_cron/README.rst | 4 +- queue_job_cron/models/ir_cron.py | 37 ++++++++++++++----- .../readme/newsfragments/.gitignore | 0 .../readme/newsfragments/612.feature | 9 +++++ queue_job_cron/tests/test_queue_job_cron.py | 19 ++++++++++ queue_job_cron/views/ir_cron_view.xml | 4 ++ 6 files changed, 61 insertions(+), 12 deletions(-) create mode 100644 queue_job_cron/readme/newsfragments/.gitignore create mode 100644 queue_job_cron/readme/newsfragments/612.feature diff --git a/queue_job_cron/README.rst b/queue_job_cron/README.rst index 7943963cd1..de39b65e14 100644 --- a/queue_job_cron/README.rst +++ b/queue_job_cron/README.rst @@ -79,8 +79,8 @@ Authors Contributors ------------ -- Cédric Pigeon -- Nguyen Minh Chien +- Cédric Pigeon +- Nguyen Minh Chien Maintainers ----------- diff --git a/queue_job_cron/models/ir_cron.py b/queue_job_cron/models/ir_cron.py index 440740f164..7e4f5b848d 100644 --- a/queue_job_cron/models/ir_cron.py +++ b/queue_job_cron/models/ir_cron.py @@ -4,12 +4,23 @@ from odoo import api, fields, models +from odoo.addons.queue_job.job import identity_exact + _logger = logging.getLogger(__name__) class IrCron(models.Model): _inherit = "ir.cron" + no_parallel_queue_job_run = fields.Boolean( + help="Avoid parallel run. " + "If the cron job is already running, the new one will be skipped. " + "By default, odoo never runs the same cron job in parallel. This " + "option is therefore set to True by default when job is run as a " + "queue job.", + default=True, + ) + run_as_queue_job = fields.Boolean( help="Specify if this cron should be ran as a queue job" ) @@ -39,23 +50,29 @@ def method_direct_trigger(self): _cron = cron.with_user(cron.user_id).with_context( lastcall=cron.lastcall ) - _cron.with_delay( - priority=_cron.priority, - description=_cron.name, - channel=_cron.channel_id.complete_name, - )._run_job_as_queue_job(server_action=_cron.ir_actions_server_id) + _cron._delay_run_job_as_queue_job( + server_action=_cron.ir_actions_server_id + ) return True def _callback(self, cron_name, server_action_id, job_id): cron = self.env["ir.cron"].sudo().browse(job_id) if cron.run_as_queue_job: server_action = self.env["ir.actions.server"].browse(server_action_id) - return self.with_delay( - priority=cron.priority, - description=cron.name, - channel=cron.channel_id.complete_name, - )._run_job_as_queue_job(server_action=server_action) + return cron._delay_run_job_as_queue_job(server_action=server_action) else: return super()._callback( cron_name=cron_name, server_action_id=server_action_id, job_id=job_id ) + + def _delay_run_job_as_queue_job(self, server_action): + self.ensure_one() + identity_key = None + if self.no_parallel_queue_job_run: + identity_key = identity_exact + return self.with_delay( + priority=self.priority, + description=self.name, + channel=self.channel_id.complete_name, + identity_key=identity_key, + )._run_job_as_queue_job(server_action=server_action) diff --git a/queue_job_cron/readme/newsfragments/.gitignore b/queue_job_cron/readme/newsfragments/.gitignore new file mode 100644 index 0000000000..e69de29bb2 diff --git a/queue_job_cron/readme/newsfragments/612.feature b/queue_job_cron/readme/newsfragments/612.feature new file mode 100644 index 0000000000..9c521620a2 --- /dev/null +++ b/queue_job_cron/readme/newsfragments/612.feature @@ -0,0 +1,9 @@ +By default prevent parallel run of the same cron job when run as queue job. + +When a cron job is run by odoo, the odoo runner will prevent parallel run +of the same cron job. Before this change, this was not the case when the +cron job was run as a queue job. A new option is added to the cron job when +run as a queue job to prevent parallel run. This option is set to True by +default. In this way, the behavior is now the same as when the cron job is run +by odoo but you keep the possibility to disable this restriction when run as +a queue job. diff --git a/queue_job_cron/tests/test_queue_job_cron.py b/queue_job_cron/tests/test_queue_job_cron.py index 3eec55f7e9..d3cc18d636 100644 --- a/queue_job_cron/tests/test_queue_job_cron.py +++ b/queue_job_cron/tests/test_queue_job_cron.py @@ -39,3 +39,22 @@ def test_queue_job_cron_run(self): cron = self.env.ref("queue_job.ir_cron_autovacuum_queue_jobs") IrCron = self.env["ir.cron"] IrCron._run_job_as_queue_job(server_action=cron.ir_actions_server_id) + + def test_queue_job_no_parallelism(self): + cron = self.env.ref("queue_job.ir_cron_autovacuum_queue_jobs") + default_channel = self.env.ref("queue_job_cron.channel_root_ir_cron") + cron.write( + { + "no_parallel_queue_job_run": True, + "run_as_queue_job": True, + "channel_id": default_channel.id, + } + ) + cron.method_direct_trigger() + cron.method_direct_trigger() + nb_jobs = self.env["queue.job"].search_count([("name", "=", cron.name)]) + self.assertEqual(nb_jobs, 1) + cron.no_parallel_queue_job_run = False + cron.method_direct_trigger() + nb_jobs = self.env["queue.job"].search_count([("name", "=", cron.name)]) + self.assertEqual(nb_jobs, 2) diff --git a/queue_job_cron/views/ir_cron_view.xml b/queue_job_cron/views/ir_cron_view.xml index bd46c85289..33c0c85d33 100644 --- a/queue_job_cron/views/ir_cron_view.xml +++ b/queue_job_cron/views/ir_cron_view.xml @@ -7,6 +7,10 @@ + Date: Thu, 1 Feb 2024 10:07:01 +0100 Subject: [PATCH 04/19] queue_job: fix retry format with tuple values Configuration of randomized retry intervals is not possible due to the formatting checks not being updated. This should fix it. --- queue_job/models/queue_job_function.py | 25 +++++++++++----- test_queue_job/tests/__init__.py | 1 + test_queue_job/tests/test_job_function.py | 35 +++++++++++++++++++++++ 3 files changed, 54 insertions(+), 7 deletions(-) create mode 100644 test_queue_job/tests/test_job_function.py diff --git a/queue_job/models/queue_job_function.py b/queue_job/models/queue_job_function.py index 10b19345b7..7cf73ea370 100644 --- a/queue_job/models/queue_job_function.py +++ b/queue_job/models/queue_job_function.py @@ -155,10 +155,12 @@ def _parse_retry_pattern(self): try: # as json can't have integers as keys and the field is stored # as json, convert back to int - retry_pattern = { - int(try_count): postpone_seconds - for try_count, postpone_seconds in self.retry_pattern.items() - } + retry_pattern = {} + for try_count, postpone_value in self.retry_pattern.items(): + if isinstance(postpone_value, int): + retry_pattern[int(try_count)] = postpone_value + else: + retry_pattern[int(try_count)] = tuple(postpone_value) except ValueError: _logger.error( "Invalid retry pattern for job function %s," @@ -187,8 +189,9 @@ def job_config(self, name): def _retry_pattern_format_error_message(self): return _( "Unexpected format of Retry Pattern for {}.\n" - "Example of valid format:\n" - "{{1: 300, 5: 600, 10: 1200, 15: 3000}}" + "Example of valid formats:\n" + "{{1: 300, 5: 600, 10: 1200, 15: 3000}}\n" + "{{1: (1, 10), 5: (11, 20), 10: (21, 30), 15: (100, 300)}}" ).format(self.name) @api.constrains("retry_pattern") @@ -201,12 +204,20 @@ def _check_retry_pattern(self): all_values = list(retry_pattern) + list(retry_pattern.values()) for value in all_values: try: - int(value) + self._retry_value_type_check(value) except ValueError as ex: raise exceptions.UserError( record._retry_pattern_format_error_message() ) from ex + def _retry_value_type_check(self, value): + if isinstance(value, (tuple | list)): + if len(value) != 2: + raise ValueError + [self._retry_value_type_check(element) for element in value] + return + int(value) + def _related_action_format_error_message(self): return _( "Unexpected format of Related Action for {}.\n" diff --git a/test_queue_job/tests/__init__.py b/test_queue_job/tests/__init__.py index dc59429e71..0405022ce0 100644 --- a/test_queue_job/tests/__init__.py +++ b/test_queue_job/tests/__init__.py @@ -4,5 +4,6 @@ from . import test_job from . import test_job_auto_delay from . import test_job_channels +from . import test_job_function from . import test_related_actions from . import test_delay_mocks diff --git a/test_queue_job/tests/test_job_function.py b/test_queue_job/tests/test_job_function.py new file mode 100644 index 0000000000..17781ac475 --- /dev/null +++ b/test_queue_job/tests/test_job_function.py @@ -0,0 +1,35 @@ +import odoo.tests.common as common +from odoo import exceptions + + +class TestJobFunction(common.TransactionCase): + def setUp(self): + super(TestJobFunction, self).setUp() + self.test_function_model = self.env.ref( + "queue_job.job_function_queue_job__test_job" + ) + + def test_check_retry_pattern_randomized_case(self): + randomized_pattern = "{1: (10, 20), 2: (20, 40)}" + self.test_function_model.edit_retry_pattern = randomized_pattern + self.assertEqual( + self.test_function_model.edit_retry_pattern, randomized_pattern + ) + + def test_check_retry_pattern_fixed_case(self): + fixed_pattern = "{1: 10, 2: 20}" + self.test_function_model.edit_retry_pattern = fixed_pattern + self.assertEqual(self.test_function_model.edit_retry_pattern, fixed_pattern) + + def test_check_retry_pattern_invalid_cases(self): + invalid_time_value_pattern = "{1: a, 2: 20}" + with self.assertRaises(exceptions.UserError): + self.test_function_model.edit_retry_pattern = invalid_time_value_pattern + + invalid_retry_count_pattern = "{a: 10, 2: 20}" + with self.assertRaises(exceptions.UserError): + self.test_function_model.edit_retry_pattern = invalid_retry_count_pattern + + invalid_randomized_pattern = "{1: (1, 2, 3), 2: 20}" + with self.assertRaises(exceptions.UserError): + self.test_function_model.edit_retry_pattern = invalid_randomized_pattern From 6b95faaf873034dcb9e302d6de61e4ecd3022940 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Alix?= Date: Wed, 6 Mar 2024 12:03:03 +0100 Subject: [PATCH 05/19] queue_job: fix partial index to add 'wait_dependencies' state --- queue_job/__manifest__.py | 2 +- queue_job/migrations/17.0.1.3.2/pre-migration.py | 10 ++++++++++ queue_job/models/queue_job.py | 2 +- 3 files changed, 12 insertions(+), 2 deletions(-) create mode 100644 queue_job/migrations/17.0.1.3.2/pre-migration.py diff --git a/queue_job/__manifest__.py b/queue_job/__manifest__.py index 7433bdfdbe..3e4052d8ee 100644 --- a/queue_job/__manifest__.py +++ b/queue_job/__manifest__.py @@ -2,7 +2,7 @@ { "name": "Job Queue", - "version": "17.0.1.3.1", + "version": "17.0.1.3.2", "author": "Camptocamp,ACSONE SA/NV,Odoo Community Association (OCA)", "website": "https://github.com/OCA/queue", "license": "LGPL-3", diff --git a/queue_job/migrations/17.0.1.3.2/pre-migration.py b/queue_job/migrations/17.0.1.3.2/pre-migration.py new file mode 100644 index 0000000000..53d9690caa --- /dev/null +++ b/queue_job/migrations/17.0.1.3.2/pre-migration.py @@ -0,0 +1,10 @@ +# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html) + +from odoo.tools.sql import table_exists + + +def migrate(cr, version): + if table_exists(cr, "queue_job"): + # Drop index 'queue_job_identity_key_state_partial_index', + # it will be recreated during the update + cr.execute("DROP INDEX IF EXISTS queue_job_identity_key_state_partial_index;") diff --git a/queue_job/models/queue_job.py b/queue_job/models/queue_job.py index 06f0a3a913..5598a7895e 100644 --- a/queue_job/models/queue_job.py +++ b/queue_job/models/queue_job.py @@ -138,7 +138,7 @@ def init(self): self._cr.execute( "CREATE INDEX queue_job_identity_key_state_partial_index " "ON queue_job (identity_key) WHERE state in ('pending', " - "'enqueued') AND identity_key IS NOT NULL;" + "'enqueued', 'wait_dependencies') AND identity_key IS NOT NULL;" ) @api.depends("records") From 3ee1f0c70d8f58f44e647cfb357c6f232d763218 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Alix?= Date: Wed, 27 Mar 2024 12:35:26 +0100 Subject: [PATCH 06/19] queue_job: triggers stored computed fields before calling 'set_done()' So the time required to compute such fields by the ORM is taken into account when the 'date_done' and 'exec_time' values are set on the job. --- queue_job/controllers/main.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/queue_job/controllers/main.py b/queue_job/controllers/main.py index ca3e02acaa..8d4bf52a09 100644 --- a/queue_job/controllers/main.py +++ b/queue_job/controllers/main.py @@ -36,6 +36,9 @@ def _try_perform_job(self, env, job): _logger.debug("%s started", job) job.perform() + # Triggers any stored computed fields before calling 'set_done' + # so that will be part of the 'exec_time' + env["base"].flush() job.set_done() job.store() env.flush_all() From 100d58e55f1edf0740afc6f0ed216b02bdecc8a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Alix?= Date: Mon, 8 Apr 2024 10:54:38 +0200 Subject: [PATCH 07/19] queue_job: fix warning when triggering stored computed fields Starting from 16.0, we should call `env.flush_all()` instead of `env["base"].flush()`, like it is done few lines below. --- queue_job/controllers/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/queue_job/controllers/main.py b/queue_job/controllers/main.py index 8d4bf52a09..fce3049fa0 100644 --- a/queue_job/controllers/main.py +++ b/queue_job/controllers/main.py @@ -38,7 +38,7 @@ def _try_perform_job(self, env, job): job.perform() # Triggers any stored computed fields before calling 'set_done' # so that will be part of the 'exec_time' - env["base"].flush() + env.flush_all() job.set_done() job.store() env.flush_all() From f8fcceba13d9e78fea82bcca08a648f4b23c6664 Mon Sep 17 00:00:00 2001 From: Pierre Verkest Date: Wed, 10 Apr 2024 18:29:50 +0200 Subject: [PATCH 08/19] [FIX] queue_job_cron_jobrunner: use priority to select job * use FIFO, firt createad job will be treat first * if priority are different it take the precedent Yet we are not using channel priority into account --- queue_job_cron_jobrunner/models/queue_job.py | 4 +-- .../tests/test_queue_job.py | 30 ++++++++++++++++++- 2 files changed, 31 insertions(+), 3 deletions(-) diff --git a/queue_job_cron_jobrunner/models/queue_job.py b/queue_job_cron_jobrunner/models/queue_job.py index 2e19556b95..55a4b8a310 100644 --- a/queue_job_cron_jobrunner/models/queue_job.py +++ b/queue_job_cron_jobrunner/models/queue_job.py @@ -40,7 +40,7 @@ def _acquire_one_job(self): FROM queue_job WHERE state = 'pending' AND (eta IS NULL OR eta <= (now() AT TIME ZONE 'UTC')) - ORDER BY date_created DESC + ORDER BY priority, date_created LIMIT 1 FOR NO KEY UPDATE SKIP LOCKED """ ) @@ -59,7 +59,7 @@ def _process(self, commit=False): # while the job is processing. However, doing this will release the # lock on the db, so we need to find another way. # if commit: - # self.flush() + # self.env.flush_all() # self.env.cr.commit() # Actual processing diff --git a/queue_job_cron_jobrunner/tests/test_queue_job.py b/queue_job_cron_jobrunner/tests/test_queue_job.py index 3f2e0ef637..54800b792c 100644 --- a/queue_job_cron_jobrunner/tests/test_queue_job.py +++ b/queue_job_cron_jobrunner/tests/test_queue_job.py @@ -67,5 +67,33 @@ def test_queue_job_cron_trigger_enqueue_dependencies(self): self.assertEqual(job_record.state, "done", "Processed OK") # if the state is "waiting_dependencies", it means the "enqueue_waiting()" - # step has not been doen when the parent job has been done + # step has not been done when the parent job has been done self.assertEqual(job_record_depends.state, "done", "Processed OK") + + def test_acquire_one_job_use_priority(self): + with freeze_time("2024-01-01 10:01:01"): + self.env["res.partner"].with_delay(priority=3).create({"name": "test"}) + + with freeze_time("2024-01-01 10:02:01"): + job = ( + self.env["res.partner"].with_delay(priority=1).create({"name": "test"}) + ) + + with freeze_time("2024-01-01 10:03:01"): + self.env["res.partner"].with_delay(priority=2).create({"name": "test"}) + + self.assertEqual(self.env["queue.job"]._acquire_one_job(), job.db_record()) + + def test_acquire_one_job_consume_the_oldest_first(self): + with freeze_time("2024-01-01 10:01:01"): + job = ( + self.env["res.partner"].with_delay(priority=30).create({"name": "test"}) + ) + + with freeze_time("2024-01-01 10:02:01"): + self.env["res.partner"].with_delay(priority=30).create({"name": "test"}) + + with freeze_time("2024-01-01 10:03:01"): + self.env["res.partner"].with_delay(priority=30).create({"name": "test"}) + + self.assertEqual(self.env["queue.job"]._acquire_one_job(), job.db_record()) From 95ce35a990244603244af59530ac913953c461dd Mon Sep 17 00:00:00 2001 From: Florian Mounier Date: Tue, 19 Nov 2024 09:21:11 +0100 Subject: [PATCH 09/19] [IMP] queue_job: Add split method --- queue_job/README.rst | 33 +++++++++ queue_job/delay.py | 62 +++++++++++++--- queue_job/readme/USAGE.md | 32 +++++++++ queue_job/tests/__init__.py | 1 + queue_job/tests/test_delayable_split.py | 94 +++++++++++++++++++++++++ 5 files changed, 213 insertions(+), 9 deletions(-) create mode 100644 queue_job/tests/test_delayable_split.py diff --git a/queue_job/README.rst b/queue_job/README.rst index e9e2ea99fa..53aff19f4d 100644 --- a/queue_job/README.rst +++ b/queue_job/README.rst @@ -281,6 +281,39 @@ is at the top of the graph. In the example above, if it was called on ``group_a``, then ``group_b`` would never be delayed (but a warning would be shown). +It is also possible to split a job into several jobs, each one +processing a part of the work. This can be useful to avoid very long +jobs, parallelize some task and get more specific errors. Usage is as +follows: + +.. code:: python + + def button_split_delayable(self): + ( + self # Can be a big recordset, let's say 1000 records + .delayable() + .generate_thumbnail((50, 50)) + .set(priority=30) + .set(description=_("generate xxx")) + .split(50) # Split the job in 20 jobs of 50 records each + .delay() + ) + +The ``split()`` method takes a ``chain`` boolean keyword argument. If +set to True, the jobs will be chained, meaning that the next job will +only start when the previous one is done: + +.. code:: python + + def button_increment_var(self): + ( + self + .delayable() + .increment_counter() + .split(1, chain=True) # Will exceute the jobs one after the other + .delay() + ) + Enqueing Job Options ~~~~~~~~~~~~~~~~~~~~ diff --git a/queue_job/delay.py b/queue_job/delay.py index 9b596b1665..0ba54e48a9 100644 --- a/queue_job/delay.py +++ b/queue_job/delay.py @@ -232,7 +232,7 @@ def _ensure_same_graph_uuid(jobs): elif jobs_count == 1: if jobs[0].graph_uuid: raise ValueError( - f"Job {jobs[0]} is a single job, it should not" " have a graph uuid" + f"Job {jobs[0]} is a single job, it should not have a graph uuid" ) else: graph_uuids = {job.graph_uuid for job in jobs if job.graph_uuid} @@ -483,11 +483,10 @@ def _tail(self): return [self] def __repr__(self): - return "Delayable({}.{}({}, {}))".format( - self.recordset, - self._job_method.__name__ if self._job_method else "", - self._job_args, - self._job_kwargs, + return ( + f"Delayable({self.recordset}." + f"{self._job_method.__name__ if self._job_method else ''}" + f"({self._job_args}, {self._job_kwargs}))" ) def __del__(self): @@ -525,6 +524,51 @@ def delay(self): """Delay the whole graph""" self._graph.delay() + def split(self, size, chain=False): + """Split the Delayables. + + Use `DelayableGroup` or `DelayableChain` + if `chain` is True containing batches of size `size` + """ + if not self._job_method: + raise ValueError("No method set on the Delayable") + + total_records = len(self.recordset) + + delayables = [] + for index in range(0, total_records, size): + recordset = self.recordset[index : index + size] + delayable = Delayable( + recordset, + priority=self.priority, + eta=self.eta, + max_retries=self.max_retries, + description=self.description, + channel=self.channel, + identity_key=self.identity_key, + ) + # Update the __self__ + delayable._job_method = getattr(recordset, self._job_method.__name__) + delayable._job_args = self._job_args + delayable._job_kwargs = self._job_kwargs + + delayables.append(delayable) + + description = self.description or ( + self._job_method.__doc__.splitlines()[0].strip() + if self._job_method.__doc__ + else f"{self.recordset._name}.{self._job_method.__name__}" + ) + for index, delayable in enumerate(delayables): + delayable.set( + description=f"{description} (split {index + 1}/{len(delayables)})" + ) + + # Prevent warning on deletion + self._generated_job = True + + return (DelayableChain if chain else DelayableGroup)(*delayables) + def _build_job(self): if self._generated_job: return self._generated_job @@ -611,9 +655,9 @@ def _delay_delayable(*args, **kwargs): return _delay_delayable def __str__(self): - return "DelayableRecordset({}{})".format( - self.delayable.recordset._name, - getattr(self.delayable.recordset, "_ids", ""), + return ( + f"DelayableRecordset({self.delayable.recordset._name}" + f"{getattr(self.delayable.recordset, '_ids', '')})" ) __repr__ = __str__ diff --git a/queue_job/readme/USAGE.md b/queue_job/readme/USAGE.md index fb160bfa48..c08374b9fc 100644 --- a/queue_job/readme/USAGE.md +++ b/queue_job/readme/USAGE.md @@ -108,6 +108,38 @@ is at the top of the graph. In the example above, if it was called on `group_a`, then `group_b` would never be delayed (but a warning would be shown). +It is also possible to split a job into several jobs, each one processing +a part of the work. This can be useful to avoid very long jobs, parallelize +some task and get more specific errors. Usage is as follows: + +``` python +def button_split_delayable(self): + ( + self # Can be a big recordset, let's say 1000 records + .delayable() + .generate_thumbnail((50, 50)) + .set(priority=30) + .set(description=_("generate xxx")) + .split(50) # Split the job in 20 jobs of 50 records each + .delay() + ) +``` + +The `split()` method takes a `chain` boolean keyword argument. If set to +True, the jobs will be chained, meaning that the next job will only start +when the previous one is done: + +``` python +def button_increment_var(self): + ( + self + .delayable() + .increment_counter() + .split(1, chain=True) # Will exceute the jobs one after the other + .delay() + ) +``` + ### Enqueing Job Options - priority: default is 10, the closest it is to 0, the faster it will be diff --git a/queue_job/tests/__init__.py b/queue_job/tests/__init__.py index 047942bde4..2fdff496bc 100644 --- a/queue_job/tests/__init__.py +++ b/queue_job/tests/__init__.py @@ -1,6 +1,7 @@ from . import test_runner_channels from . import test_runner_runner from . import test_delayable +from . import test_delayable_split from . import test_json_field from . import test_model_job_channel from . import test_model_job_function diff --git a/queue_job/tests/test_delayable_split.py b/queue_job/tests/test_delayable_split.py new file mode 100644 index 0000000000..b761878b2e --- /dev/null +++ b/queue_job/tests/test_delayable_split.py @@ -0,0 +1,94 @@ +# Copyright 2024 Akretion (http://www.akretion.com). +# @author Florian Mounier +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). + +from odoo.tests import common + +# pylint: disable=odoo-addons-relative-import +from odoo.addons.queue_job.delay import Delayable + + +class TestDelayableSplit(common.BaseCase): + def setUp(self): + super().setUp() + + class FakeRecordSet(list): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._name = "recordset" + + def __getitem__(self, key): + if isinstance(key, slice): + return FakeRecordSet(super().__getitem__(key)) + return super().__getitem__(key) + + def method(self, arg, kwarg=None): + """Method to be called""" + return arg, kwarg + + self.FakeRecordSet = FakeRecordSet + + def test_delayable_split_no_method_call_beforehand(self): + dl = Delayable(self.FakeRecordSet(range(20))) + with self.assertRaises(ValueError): + dl.split(3) + + def test_delayable_split_10_3(self): + dl = Delayable(self.FakeRecordSet(range(10))) + dl.method("arg", kwarg="kwarg") + group = dl.split(3) + self.assertEqual(len(group._delayables), 4) + delayables = sorted(list(group._delayables), key=lambda x: x.description) + self.assertEqual(delayables[0].recordset, self.FakeRecordSet([0, 1, 2])) + self.assertEqual(delayables[1].recordset, self.FakeRecordSet([3, 4, 5])) + self.assertEqual(delayables[2].recordset, self.FakeRecordSet([6, 7, 8])) + self.assertEqual(delayables[3].recordset, self.FakeRecordSet([9])) + self.assertEqual(delayables[0].description, "Method to be called (split 1/4)") + self.assertEqual(delayables[1].description, "Method to be called (split 2/4)") + self.assertEqual(delayables[2].description, "Method to be called (split 3/4)") + self.assertEqual(delayables[3].description, "Method to be called (split 4/4)") + self.assertNotEqual(delayables[0]._job_method, dl._job_method) + self.assertNotEqual(delayables[1]._job_method, dl._job_method) + self.assertNotEqual(delayables[2]._job_method, dl._job_method) + self.assertNotEqual(delayables[3]._job_method, dl._job_method) + self.assertEqual(delayables[0]._job_method.__name__, dl._job_method.__name__) + self.assertEqual(delayables[1]._job_method.__name__, dl._job_method.__name__) + self.assertEqual(delayables[2]._job_method.__name__, dl._job_method.__name__) + self.assertEqual(delayables[3]._job_method.__name__, dl._job_method.__name__) + self.assertEqual(delayables[0]._job_args, ("arg",)) + self.assertEqual(delayables[1]._job_args, ("arg",)) + self.assertEqual(delayables[2]._job_args, ("arg",)) + self.assertEqual(delayables[3]._job_args, ("arg",)) + self.assertEqual(delayables[0]._job_kwargs, {"kwarg": "kwarg"}) + self.assertEqual(delayables[1]._job_kwargs, {"kwarg": "kwarg"}) + self.assertEqual(delayables[2]._job_kwargs, {"kwarg": "kwarg"}) + self.assertEqual(delayables[3]._job_kwargs, {"kwarg": "kwarg"}) + + def test_delayable_split_10_5(self): + dl = Delayable(self.FakeRecordSet(range(10))) + dl.method("arg", kwarg="kwarg") + group = dl.split(5) + self.assertEqual(len(group._delayables), 2) + delayables = sorted(list(group._delayables), key=lambda x: x.description) + self.assertEqual(delayables[0].recordset, self.FakeRecordSet([0, 1, 2, 3, 4])) + self.assertEqual(delayables[1].recordset, self.FakeRecordSet([5, 6, 7, 8, 9])) + self.assertEqual(delayables[0].description, "Method to be called (split 1/2)") + self.assertEqual(delayables[1].description, "Method to be called (split 2/2)") + + def test_delayable_split_10_10(self): + dl = Delayable(self.FakeRecordSet(range(10))) + dl.method("arg", kwarg="kwarg") + group = dl.split(10) + self.assertEqual(len(group._delayables), 1) + delayables = sorted(list(group._delayables), key=lambda x: x.description) + self.assertEqual(delayables[0].recordset, self.FakeRecordSet(range(10))) + self.assertEqual(delayables[0].description, "Method to be called (split 1/1)") + + def test_delayable_split_10_20(self): + dl = Delayable(self.FakeRecordSet(range(10))) + dl.method("arg", kwarg="kwarg") + group = dl.split(20) + self.assertEqual(len(group._delayables), 1) + delayables = sorted(list(group._delayables), key=lambda x: x.description) + self.assertEqual(delayables[0].recordset, self.FakeRecordSet(range(10))) + self.assertEqual(delayables[0].description, "Method to be called (split 1/1)") From 74ed19fba67f7fa6f8bb89a6b60d23cedd6f202a Mon Sep 17 00:00:00 2001 From: Quoc Duong Date: Thu, 1 Aug 2024 11:45:38 +0700 Subject: [PATCH 10/19] [IMP] queue_job: Cancel child jobs when the parent is cancelled --- queue_job/job.py | 12 +++++++++-- queue_job/models/queue_job.py | 2 ++ test_queue_job/tests/test_job.py | 37 ++++++++++++++++++++++++++++++++ 3 files changed, 49 insertions(+), 2 deletions(-) diff --git a/queue_job/job.py b/queue_job/job.py index e03dd2b517..a473be5cd0 100644 --- a/queue_job/job.py +++ b/queue_job/job.py @@ -594,8 +594,8 @@ def perform(self): return self.result - def enqueue_waiting(self): - sql = """ + def _get_common_dependent_jobs_query(self): + return """ UPDATE queue_job SET state = %s FROM ( @@ -623,9 +623,17 @@ def enqueue_waiting(self): AND %s = ALL(jobs.parent_states) AND state = %s; """ + + def enqueue_waiting(self): + sql = self._get_common_dependent_jobs_query() self.env.cr.execute(sql, (PENDING, self.uuid, DONE, WAIT_DEPENDENCIES)) self.env["queue.job"].invalidate_model(["state"]) + def cancel_dependent_jobs(self): + sql = self._get_common_dependent_jobs_query() + self.env.cr.execute(sql, (CANCELLED, self.uuid, CANCELLED, WAIT_DEPENDENCIES)) + self.env["queue.job"].invalidate_model(["state"]) + def store(self): """Store the Job""" job_model = self.env["queue.job"] diff --git a/queue_job/models/queue_job.py b/queue_job/models/queue_job.py index 5598a7895e..55ee7e526c 100644 --- a/queue_job/models/queue_job.py +++ b/queue_job/models/queue_job.py @@ -325,6 +325,8 @@ def _change_job_state(self, state, result=None): elif state == CANCELLED: job_.set_cancelled(result=result) job_.store() + record.env["queue.job"].flush_model() + job_.cancel_dependent_jobs() else: raise ValueError("State not supported: %s" % state) diff --git a/test_queue_job/tests/test_job.py b/test_queue_job/tests/test_job.py index c4ec5081b3..d7414ef7aa 100644 --- a/test_queue_job/tests/test_job.py +++ b/test_queue_job/tests/test_job.py @@ -15,6 +15,7 @@ RetryableJobError, ) from odoo.addons.queue_job.job import ( + CANCELLED, DONE, ENQUEUED, FAILED, @@ -530,6 +531,42 @@ def test_button_done(self): stored.result, "Manually set to done by %s" % self.env.user.name ) + def test_button_done_enqueue_waiting_dependencies(self): + job_root = Job(self.env["test.queue.job"].testing_method) + job_child = Job(self.env["test.queue.job"].testing_method) + job_child.add_depends({job_root}) + + DelayableGraph._ensure_same_graph_uuid([job_root, job_child]) + job_root.store() + job_child.store() + + self.assertEqual(job_child.state, WAIT_DEPENDENCIES) + record_root = job_root.db_record() + record_child = job_child.db_record() + # Trigger button done + record_root.button_done() + # Check the state + self.assertEqual(record_root.state, DONE) + self.assertEqual(record_child.state, PENDING) + + def test_button_cancel_dependencies(self): + job_root = Job(self.env["test.queue.job"].testing_method) + job_child = Job(self.env["test.queue.job"].testing_method) + job_child.add_depends({job_root}) + + DelayableGraph._ensure_same_graph_uuid([job_root, job_child]) + job_root.store() + job_child.store() + + self.assertEqual(job_child.state, WAIT_DEPENDENCIES) + record_root = job_root.db_record() + record_child = job_child.db_record() + # Trigger button cancelled + record_root.button_cancelled() + # Check the state + self.assertEqual(record_root.state, CANCELLED) + self.assertEqual(record_child.state, CANCELLED) + def test_requeue(self): stored = self._create_job() stored.write({"state": "failed"}) From eab6121763a4fca8eee5858e559dd059fbcfd3b4 Mon Sep 17 00:00:00 2001 From: Florent Xicluna Date: Mon, 16 Sep 2024 13:43:47 +0200 Subject: [PATCH 11/19] [FIX] queue_job: typo --- queue_job/README.rst | 2 +- queue_job/readme/USAGE.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/queue_job/README.rst b/queue_job/README.rst index 53aff19f4d..ac5f567d3d 100644 --- a/queue_job/README.rst +++ b/queue_job/README.rst @@ -468,7 +468,7 @@ running Odoo** When you are developing (ie: connector modules) you might want to bypass the queue job and run your code immediately. -To do so you can set QUEUE_JOB\__NO_DELAY=1 in your enviroment. +To do so you can set QUEUE_JOB\__NO_DELAY=1 in your environment. **Bypass jobs in tests** diff --git a/queue_job/readme/USAGE.md b/queue_job/readme/USAGE.md index c08374b9fc..deb6fe2aca 100644 --- a/queue_job/readme/USAGE.md +++ b/queue_job/readme/USAGE.md @@ -290,7 +290,7 @@ running Odoo** When you are developing (ie: connector modules) you might want to bypass the queue job and run your code immediately. -To do so you can set QUEUE_JOB\_\_NO_DELAY=1 in your enviroment. +To do so you can set QUEUE_JOB\_\_NO_DELAY=1 in your environment. **Bypass jobs in tests** From 4bf003ae9dafc9029d3d20e453a1d0c5941d59f9 Mon Sep 17 00:00:00 2001 From: Florent Xicluna Date: Mon, 16 Sep 2024 13:44:12 +0200 Subject: [PATCH 12/19] [IMP] queue_job: add filter on Date Created --- queue_job/views/queue_job_views.xml | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/queue_job/views/queue_job_views.xml b/queue_job/views/queue_job_views.xml index 3d7a368971..be12b4294b 100644 --- a/queue_job/views/queue_job_views.xml +++ b/queue_job/views/queue_job_views.xml @@ -246,6 +246,27 @@ string="Failed" domain="[('state', '=', 'failed')]" /> + + + + + + From 9dd9d197840e7f246474363389cb34303a27714b Mon Sep 17 00:00:00 2001 From: Florent Xicluna Date: Thu, 26 Dec 2024 17:28:17 +0100 Subject: [PATCH 13/19] [REF] remove explicit super() arguments --- test_queue_job/tests/test_job_function.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_queue_job/tests/test_job_function.py b/test_queue_job/tests/test_job_function.py index 17781ac475..320b4973c5 100644 --- a/test_queue_job/tests/test_job_function.py +++ b/test_queue_job/tests/test_job_function.py @@ -4,7 +4,7 @@ class TestJobFunction(common.TransactionCase): def setUp(self): - super(TestJobFunction, self).setUp() + super().setUp() self.test_function_model = self.env.ref( "queue_job.job_function_queue_job__test_job" ) From 7995de8f769dc334418a3ef0af74a95a4f5aa380 Mon Sep 17 00:00:00 2001 From: Lois Rilo Date: Fri, 11 Jun 2021 16:11:14 +0200 Subject: [PATCH 14/19] [FIX] queue_job_cron: channel_id must be storable. Otherwise, you cannot use any channel other than default ( root.ir_cron) --- queue_job_cron/models/ir_cron.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/queue_job_cron/models/ir_cron.py b/queue_job_cron/models/ir_cron.py index 7e4f5b848d..bb09ed075e 100644 --- a/queue_job_cron/models/ir_cron.py +++ b/queue_job_cron/models/ir_cron.py @@ -28,13 +28,16 @@ class IrCron(models.Model): comodel_name="queue.job.channel", compute="_compute_run_as_queue_job", readonly=False, + store=True, string="Channel", ) @api.depends("run_as_queue_job") def _compute_run_as_queue_job(self): for cron in self: - if cron.run_as_queue_job and not cron.channel_id: + if cron.channel_id: + continue + if cron.run_as_queue_job: cron.channel_id = self.env.ref("queue_job_cron.channel_root_ir_cron").id else: cron.channel_id = False From eb1cecfe1939e30af40ca3715d54fb26217681dd Mon Sep 17 00:00:00 2001 From: oca-ci Date: Fri, 6 Jun 2025 19:40:45 +0000 Subject: [PATCH 15/19] [UPD] Update queue_job.pot --- queue_job/i18n/queue_job.pot | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/queue_job/i18n/queue_job.pot b/queue_job/i18n/queue_job.pot index 8aaa602147..fc8e2bbbdb 100644 --- a/queue_job/i18n/queue_job.pot +++ b/queue_job/i18n/queue_job.pot @@ -97,6 +97,7 @@ msgstr "" #. module: queue_job #: model:ir.model.fields.selection,name:queue_job.selection__queue_job__state__cancelled +#: model_terms:ir.ui.view,arch_db:queue_job.view_queue_job_search msgid "Cancelled" msgstr "" @@ -172,6 +173,11 @@ msgstr "" msgid "Created by" msgstr "" +#. module: queue_job +#: model_terms:ir.ui.view,arch_db:queue_job.view_queue_job_search +msgid "Created date" +msgstr "" + #. module: queue_job #: model:ir.model.fields,field_description:queue_job.field_queue_job_channel__create_date #: model:ir.model.fields,field_description:queue_job.field_queue_job_lock__create_date @@ -477,6 +483,21 @@ msgstr "" msgid "Kwargs" msgstr "" +#. module: queue_job +#: model_terms:ir.ui.view,arch_db:queue_job.view_queue_job_search +msgid "Last 24 hours" +msgstr "" + +#. module: queue_job +#: model_terms:ir.ui.view,arch_db:queue_job.view_queue_job_search +msgid "Last 30 days" +msgstr "" + +#. module: queue_job +#: model_terms:ir.ui.view,arch_db:queue_job.view_queue_job_search +msgid "Last 7 days" +msgstr "" + #. module: queue_job #: model:ir.model.fields,field_description:queue_job.field_queue_job_channel__write_uid #: model:ir.model.fields,field_description:queue_job.field_queue_job_lock__write_uid @@ -882,8 +903,9 @@ msgstr "" #, python-format msgid "" "Unexpected format of Retry Pattern for {}.\n" -"Example of valid format:\n" -"{{1: 300, 5: 600, 10: 1200, 15: 3000}}" +"Example of valid formats:\n" +"{{1: 300, 5: 600, 10: 1200, 15: 3000}}\n" +"{{1: (1, 10), 5: (11, 20), 10: (21, 30), 15: (100, 300)}}" msgstr "" #. module: queue_job From 75b376b1ebda7b4010908cb3b44e6c9c0bd8c9a8 Mon Sep 17 00:00:00 2001 From: oca-ci Date: Fri, 6 Jun 2025 19:40:46 +0000 Subject: [PATCH 16/19] [UPD] Update queue_job_cron.pot --- queue_job_cron/i18n/queue_job_cron.pot | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/queue_job_cron/i18n/queue_job_cron.pot b/queue_job_cron/i18n/queue_job_cron.pot index 03beb90b6b..37d4ce2708 100644 --- a/queue_job_cron/i18n/queue_job_cron.pot +++ b/queue_job_cron/i18n/queue_job_cron.pot @@ -13,11 +13,24 @@ msgstr "" "Content-Transfer-Encoding: \n" "Plural-Forms: \n" +#. module: queue_job_cron +#: model:ir.model.fields,help:queue_job_cron.field_ir_cron__no_parallel_queue_job_run +msgid "" +"Avoid parallel run. If the cron job is already running, the new one will be " +"skipped. By default, odoo never runs the same cron job in parallel. This " +"option is therefore set to True by default when job is run as a queue job." +msgstr "" + #. module: queue_job_cron #: model:ir.model.fields,field_description:queue_job_cron.field_ir_cron__channel_id msgid "Channel" msgstr "" +#. module: queue_job_cron +#: model:ir.model.fields,field_description:queue_job_cron.field_ir_cron__no_parallel_queue_job_run +msgid "No Parallel Queue Job Run" +msgstr "" + #. module: queue_job_cron #: model:ir.model.fields,field_description:queue_job_cron.field_ir_cron__run_as_queue_job msgid "Run As Queue Job" From d512a58a4830ad8ca12b7e06d5493d7fb19e1e91 Mon Sep 17 00:00:00 2001 From: OCA-git-bot Date: Fri, 6 Jun 2025 19:42:39 +0000 Subject: [PATCH 17/19] [BOT] post-merge updates --- README.md | 8 +- queue_job/README.rst | 8 +- queue_job/__manifest__.py | 2 +- queue_job/static/description/index.html | 85 +++++++++++++------ queue_job_cron/README.rst | 29 ++++++- queue_job_cron/__manifest__.py | 2 +- queue_job_cron/readme/HISTORY.md | 13 +++ .../readme/newsfragments/612.feature | 9 -- queue_job_cron/static/description/index.html | 80 ++++++++++++----- queue_job_cron_jobrunner/README.rst | 34 ++++---- queue_job_cron_jobrunner/__manifest__.py | 2 +- .../static/description/index.html | 41 +++++---- test_queue_job/__manifest__.py | 2 +- 13 files changed, 217 insertions(+), 98 deletions(-) create mode 100644 queue_job_cron/readme/HISTORY.md delete mode 100644 queue_job_cron/readme/newsfragments/612.feature diff --git a/README.md b/README.md index 02ceb9d200..eca36e6e10 100644 --- a/README.md +++ b/README.md @@ -22,11 +22,11 @@ Available addons addon | version | maintainers | summary --- | --- | --- | --- [base_import_async](base_import_async/) | 17.0.1.0.0 | | Import CSV files in the background -[queue_job](queue_job/) | 17.0.1.3.1 | guewen | Job Queue -[queue_job_cron](queue_job_cron/) | 17.0.1.0.0 | | Scheduled Actions as Queue Jobs -[queue_job_cron_jobrunner](queue_job_cron_jobrunner/) | 17.0.1.0.0 | ivantodorovich | Run jobs without a dedicated JobRunner +[queue_job](queue_job/) | 17.0.1.4.0 | guewen | Job Queue +[queue_job_cron](queue_job_cron/) | 17.0.1.1.0 | | Scheduled Actions as Queue Jobs +[queue_job_cron_jobrunner](queue_job_cron_jobrunner/) | 17.0.1.1.0 | ivantodorovich | Run jobs without a dedicated JobRunner [queue_job_subscribe](queue_job_subscribe/) | 17.0.1.0.0 | | Control which users are subscribed to queue job notifications -[test_queue_job](test_queue_job/) | 17.0.1.0.1 | | Queue Job Tests +[test_queue_job](test_queue_job/) | 17.0.1.1.0 | | Queue Job Tests [//]: # (end addons) diff --git a/queue_job/README.rst b/queue_job/README.rst index ac5f567d3d..a0aa31e054 100644 --- a/queue_job/README.rst +++ b/queue_job/README.rst @@ -1,3 +1,7 @@ +.. image:: https://odoo-community.org/readme-banner-image + :target: https://odoo-community.org/get-involved?utm_source=readme + :alt: Odoo Community Association + ========= Job Queue ========= @@ -7,13 +11,13 @@ Job Queue !! This file is generated by oca-gen-addon-readme !! !! changes will be overwritten. !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - !! source digest: sha256:8a75c10ad3e4ec6ac8b6178e6b04dfc40ac1bbe4130128f9fe3946eed920228f + !! source digest: sha256:0e908a7e024c5995acea7783a1f9ad76851955d69d8a88236d8508f6301c38b4 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! .. |badge1| image:: https://img.shields.io/badge/maturity-Mature-brightgreen.png :target: https://odoo-community.org/page/development-status :alt: Mature -.. |badge2| image:: https://img.shields.io/badge/licence-LGPL--3-blue.png +.. |badge2| image:: https://img.shields.io/badge/license-LGPL--3-blue.png :target: http://www.gnu.org/licenses/lgpl-3.0-standalone.html :alt: License: LGPL-3 .. |badge3| image:: https://img.shields.io/badge/github-OCA%2Fqueue-lightgray.png?logo=github diff --git a/queue_job/__manifest__.py b/queue_job/__manifest__.py index 3e4052d8ee..76156b77d8 100644 --- a/queue_job/__manifest__.py +++ b/queue_job/__manifest__.py @@ -2,7 +2,7 @@ { "name": "Job Queue", - "version": "17.0.1.3.2", + "version": "17.0.1.4.0", "author": "Camptocamp,ACSONE SA/NV,Odoo Community Association (OCA)", "website": "https://github.com/OCA/queue", "license": "LGPL-3", diff --git a/queue_job/static/description/index.html b/queue_job/static/description/index.html index 522f4c3abd..0ea18c2065 100644 --- a/queue_job/static/description/index.html +++ b/queue_job/static/description/index.html @@ -3,7 +3,7 @@ -Job Queue +README.rst -
-

Job Queue

+
+ + +Odoo Community Association + +
+

Job Queue

-

Mature License: LGPL-3 OCA/queue Translate me on Weblate Try me on Runboat

+

Mature License: LGPL-3 OCA/queue Translate me on Weblate Try me on Runboat

This addon adds an integrated Job Queue to Odoo.

It allows to postpone method calls executed asynchronously.

Jobs are executed in the background by a Jobrunner, in their own @@ -445,7 +450,7 @@

Job Queue

-

Use Cases / Context

+

Use Cases / Context

Odoo treats task synchronously, like when you import a list of products it will treat each line in one big task. “Queue job” gives you the ability to detail big tasks in many smaller ones.

@@ -473,11 +478,11 @@

Use Cases / Context

-

Installation

+

Installation

Be sure to have the requests library.

-

Configuration

+

Configuration

  • Using environment variables and command line:
    • Adjust environment variables (optional):
-

Usage

+

Usage

To use this module, you need to:

  1. Go to Job Queue menu
-

Developers

+

Developers

-

Delaying jobs

+

Delaying jobs

The fast way to enqueue a job for a method is to use with_delay() on a record or model:

@@ -627,9 +632,38 @@ 

Delaying jobs

is at the top of the graph. In the example above, if it was called on group_a, then group_b would never be delayed (but a warning would be shown).

+

It is also possible to split a job into several jobs, each one +processing a part of the work. This can be useful to avoid very long +jobs, parallelize some task and get more specific errors. Usage is as +follows:

+
+def button_split_delayable(self):
+    (
+        self  # Can be a big recordset, let's say 1000 records
+        .delayable()
+        .generate_thumbnail((50, 50))
+        .set(priority=30)
+        .set(description=_("generate xxx"))
+        .split(50)  # Split the job in 20 jobs of 50 records each
+        .delay()
+    )
+
+

The split() method takes a chain boolean keyword argument. If +set to True, the jobs will be chained, meaning that the next job will +only start when the previous one is done:

+
+def button_increment_var(self):
+    (
+        self
+        .delayable()
+        .increment_counter()
+        .split(1, chain=True) # Will exceute the jobs one after the other
+        .delay()
+    )
+
-

Enqueing Job Options

+

Enqueing Job Options

  • priority: default is 10, the closest it is to 0, the faster it will be executed
  • @@ -648,7 +682,7 @@

    Enqueing Job Options

-

Testing

+

Testing

Asserting enqueued jobs

The recommended way to test jobs, rather than running them directly and synchronously is to split the tests in two parts:

@@ -891,7 +925,7 @@

Testing

synchronously

-

Patterns

+

Patterns

Through the time, two main patterns emerged:

  1. For data exposed to users, a model should store the data and the @@ -918,7 +952,7 @@

    Patterns

-

Known issues / Roadmap

+

Known issues / Roadmap

  • After creating a new database or installing queue_job on an existing database, Odoo must be restarted for the runner to detect it.
  • @@ -939,9 +973,9 @@

    Known issues / Roadmap

-

Changelog

+

Changelog

-

Bug Tracker

+

Bug Tracker

Bugs are tracked on GitHub Issues. In case of trouble, please check there if your issue has already been reported. If you spotted it first, help us to smash it by providing a detailed and welcomed @@ -960,16 +994,16 @@

Bug Tracker

Do not contact contributors directly about support or help with technical issues.

-

Credits

+

Credits

-

Authors

+

Authors

  • Camptocamp
  • ACSONE SA/NV
-

Contributors

+

Contributors

-

Maintainers

+

Maintainers

This module is maintained by the OCA.

Odoo Community Association @@ -1002,5 +1036,6 @@

Maintainers

+
diff --git a/queue_job_cron/README.rst b/queue_job_cron/README.rst index de39b65e14..f23cad17d1 100644 --- a/queue_job_cron/README.rst +++ b/queue_job_cron/README.rst @@ -1,3 +1,7 @@ +.. image:: https://odoo-community.org/readme-banner-image + :target: https://odoo-community.org/get-involved?utm_source=readme + :alt: Odoo Community Association + =============================== Scheduled Actions as Queue Jobs =============================== @@ -7,13 +11,13 @@ Scheduled Actions as Queue Jobs !! This file is generated by oca-gen-addon-readme !! !! changes will be overwritten. !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - !! source digest: sha256:c790b0e3494e59c709d57d7dbb0864ac37ce3af23801499f352df7528d5fe072 + !! source digest: sha256:654b8def2f0b0f8ac6979dd91c30e03760d9830bd9d39030f8d96e77af1b2326 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! .. |badge1| image:: https://img.shields.io/badge/maturity-Beta-yellow.png :target: https://odoo-community.org/page/development-status :alt: Beta -.. |badge2| image:: https://img.shields.io/badge/licence-AGPL--3-blue.png +.. |badge2| image:: https://img.shields.io/badge/license-AGPL--3-blue.png :target: http://www.gnu.org/licenses/agpl-3.0-standalone.html :alt: License: AGPL-3 .. |badge3| image:: https://img.shields.io/badge/github-OCA%2Fqueue-lightgray.png?logo=github @@ -58,6 +62,27 @@ Channels can be used to manage sequential jobs and prevent concurrency accesses. To do that you just have to define a channel per cron limited to 1 at time. +Changelog +========= + +17.0.1.1.0 (2025-06-06) +----------------------- + +Features +~~~~~~~~ + +- By default prevent parallel run of the same cron job when run as queue + job. + + When a cron job is run by odoo, the odoo runner will prevent parallel + run of the same cron job. Before this change, this was not the case + when the cron job was run as a queue job. A new option is added to the + cron job when run as a queue job to prevent parallel run. This option + is set to True by default. In this way, the behavior is now the same + as when the cron job is run by odoo but you keep the possibility to + disable this restriction when run as a queue job. + (`#612 `__) + Bug Tracker =========== diff --git a/queue_job_cron/__manifest__.py b/queue_job_cron/__manifest__.py index fee66d568a..370d2af101 100644 --- a/queue_job_cron/__manifest__.py +++ b/queue_job_cron/__manifest__.py @@ -3,7 +3,7 @@ { "name": "Scheduled Actions as Queue Jobs", - "version": "17.0.1.0.0", + "version": "17.0.1.1.0", "author": "ACSONE SA/NV,Odoo Community Association (OCA)", "website": "https://github.com/OCA/queue", "license": "AGPL-3", diff --git a/queue_job_cron/readme/HISTORY.md b/queue_job_cron/readme/HISTORY.md new file mode 100644 index 0000000000..da4d77904b --- /dev/null +++ b/queue_job_cron/readme/HISTORY.md @@ -0,0 +1,13 @@ +## 17.0.1.1.0 (2025-06-06) + +### Features + +- By default prevent parallel run of the same cron job when run as queue job. + + When a cron job is run by odoo, the odoo runner will prevent parallel run + of the same cron job. Before this change, this was not the case when the + cron job was run as a queue job. A new option is added to the cron job when + run as a queue job to prevent parallel run. This option is set to True by + default. In this way, the behavior is now the same as when the cron job is run + by odoo but you keep the possibility to disable this restriction when run as + a queue job. ([#612](https://github.com/OCA/queue/issues/612)) diff --git a/queue_job_cron/readme/newsfragments/612.feature b/queue_job_cron/readme/newsfragments/612.feature deleted file mode 100644 index 9c521620a2..0000000000 --- a/queue_job_cron/readme/newsfragments/612.feature +++ /dev/null @@ -1,9 +0,0 @@ -By default prevent parallel run of the same cron job when run as queue job. - -When a cron job is run by odoo, the odoo runner will prevent parallel run -of the same cron job. Before this change, this was not the case when the -cron job was run as a queue job. A new option is added to the cron job when -run as a queue job to prevent parallel run. This option is set to True by -default. In this way, the behavior is now the same as when the cron job is run -by odoo but you keep the possibility to disable this restriction when run as -a queue job. diff --git a/queue_job_cron/static/description/index.html b/queue_job_cron/static/description/index.html index 9815d66e41..1446e8240d 100644 --- a/queue_job_cron/static/description/index.html +++ b/queue_job_cron/static/description/index.html @@ -3,15 +3,16 @@ -Scheduled Actions as Queue Jobs +README.rst -
-

Scheduled Actions as Queue Jobs

+
+ + +Odoo Community Association + +
+

Scheduled Actions as Queue Jobs

-

Beta License: AGPL-3 OCA/queue Translate me on Weblate Try me on Runboat

+

Beta License: AGPL-3 OCA/queue Translate me on Weblate Try me on Runboat

This module extends the functionality of queue_job and allows to run an Odoo cron as a queue job.

Table of contents

@@ -376,24 +382,31 @@

Scheduled Actions as Queue Jobs

-

Installation

+

Installation

To install this module, you need to:

  1. Just install it.
-

Usage

+

Usage

To use this module, you need to:

#. Go to a scheduled action, a flag “Run as queue job” will allow you to run the action as a queue job. You will also allowed to select a channel @@ -404,8 +417,30 @@

Usage

accesses. To do that you just have to define a channel per cron limited to 1 at time.

+
+

Changelog

+
+

17.0.1.1.0 (2025-06-06)

+
+

Features

+
    +
  • By default prevent parallel run of the same cron job when run as queue +job.

    +

    When a cron job is run by odoo, the odoo runner will prevent parallel +run of the same cron job. Before this change, this was not the case +when the cron job was run as a queue job. A new option is added to the +cron job when run as a queue job to prevent parallel run. This option +is set to True by default. In this way, the behavior is now the same +as when the cron job is run by odoo but you keep the possibility to +disable this restriction when run as a queue job. +(#612)

    +
  • +
+
+
+
-

Bug Tracker

+

Bug Tracker

Bugs are tracked on GitHub Issues. In case of trouble, please check there if your issue has already been reported. If you spotted it first, help us to smash it by providing a detailed and welcomed @@ -413,24 +448,26 @@

Bug Tracker

Do not contact contributors directly about support or help with technical issues.

-

Credits

+

Credits

-

Authors

+

Authors

  • ACSONE SA/NV
-

Contributors

+

Contributors

-

Maintainers

+

Maintainers

This module is maintained by the OCA.

-Odoo Community Association + +Odoo Community Association +

OCA, or the Odoo Community Association, is a nonprofit organization whose mission is to support the collaborative development of Odoo features and promote its widespread use.

@@ -439,5 +476,6 @@

Maintainers

+
diff --git a/queue_job_cron_jobrunner/README.rst b/queue_job_cron_jobrunner/README.rst index 4a4dd3d7dd..a1fad7da5c 100644 --- a/queue_job_cron_jobrunner/README.rst +++ b/queue_job_cron_jobrunner/README.rst @@ -1,3 +1,7 @@ +.. image:: https://odoo-community.org/readme-banner-image + :target: https://odoo-community.org/get-involved?utm_source=readme + :alt: Odoo Community Association + ======================== Queue Job Cron Jobrunner ======================== @@ -7,13 +11,13 @@ Queue Job Cron Jobrunner !! This file is generated by oca-gen-addon-readme !! !! changes will be overwritten. !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - !! source digest: sha256:a9ff3bc27cff35398d24b6b1dc300a5b8acc9c06d417b1969b1c2e2974e39ec9 + !! source digest: sha256:7564ebfbee734b5b3a2f9f52ef96311086789ca36c6fb021b9f0ef674c35759e !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! .. |badge1| image:: https://img.shields.io/badge/maturity-Alpha-red.png :target: https://odoo-community.org/page/development-status :alt: Alpha -.. |badge2| image:: https://img.shields.io/badge/licence-AGPL--3-blue.png +.. |badge2| image:: https://img.shields.io/badge/license-AGPL--3-blue.png :target: http://www.gnu.org/licenses/agpl-3.0-standalone.html :alt: License: AGPL-3 .. |badge3| image:: https://img.shields.io/badge/github-OCA%2Fqueue-lightgray.png?logo=github @@ -38,9 +42,9 @@ Unlike the regular job runner, where jobs are dispatched to the HttpWorkers, jobs are processed on the CronWorker threads by the job runner crons. This is a design decision because: -- Odoo.sh puts HttpWorkers to sleep when there's no network activity -- HttpWorkers are meant for traffic. Users shouldn't pay the price of - background tasks. +- Odoo.sh puts HttpWorkers to sleep when there's no network activity +- HttpWorkers are meant for traffic. Users shouldn't pay the price of + background tasks. For now, it only implements the most basic features of the ``queue_job`` runner, notably no channel capacity nor priorities. Please check the @@ -79,18 +83,18 @@ In Odoo.sh, this is done by default. Parallel execution of jobs can be achieved by leveraging multiple ``ir.cron`` records: -- Make sure you have enough CronWorkers available (Odoo CLI - ``--max-cron-threads``) -- Duplicate the ``queue_job_cron`` cron record as many times as needed, - until you have as much records as cron workers. +- Make sure you have enough CronWorkers available (Odoo CLI + ``--max-cron-threads``) +- Duplicate the ``queue_job_cron`` cron record as many times as needed, + until you have as much records as cron workers. Known issues / Roadmap ====================== -- Support channel capacity and priority. (See ``_acquire_one_job``) -- Gracefully handle CronWorker CPU timeouts. (See ``_job_runner``) -- Commit transaction after job state updated to started. (See - ``_process``) +- Support channel capacity and priority. (See ``_acquire_one_job``) +- Gracefully handle CronWorker CPU timeouts. (See ``_job_runner``) +- Commit transaction after job state updated to started. (See + ``_process``) Bug Tracker =========== @@ -113,9 +117,9 @@ Authors Contributors ------------ -- `Camptocamp `__ +- `Camptocamp `__ - - Iván Todorovich + - Iván Todorovich Maintainers ----------- diff --git a/queue_job_cron_jobrunner/__manifest__.py b/queue_job_cron_jobrunner/__manifest__.py index 964bbd0511..ce61da2147 100644 --- a/queue_job_cron_jobrunner/__manifest__.py +++ b/queue_job_cron_jobrunner/__manifest__.py @@ -1,7 +1,7 @@ { "name": "Queue Job Cron Jobrunner", "summary": "Run jobs without a dedicated JobRunner", - "version": "17.0.1.0.0", + "version": "17.0.1.1.0", "development_status": "Alpha", "author": "Camptocamp SA, Odoo Community Association (OCA)", "maintainers": ["ivantodorovich"], diff --git a/queue_job_cron_jobrunner/static/description/index.html b/queue_job_cron_jobrunner/static/description/index.html index a5ef5391ca..1b922802a0 100644 --- a/queue_job_cron_jobrunner/static/description/index.html +++ b/queue_job_cron_jobrunner/static/description/index.html @@ -3,15 +3,16 @@ -Queue Job Cron Jobrunner +README.rst -
-

Queue Job Cron Jobrunner

+
+ + +Odoo Community Association + +
+

Queue Job Cron Jobrunner

-

Alpha License: AGPL-3 OCA/queue Translate me on Weblate Try me on Runboat

+

Alpha License: AGPL-3 OCA/queue Translate me on Weblate Try me on Runboat

This module implements a simple queue.job runner using ir.cron triggers.

It’s meant to be used on environments where the regular job runner can’t @@ -405,7 +411,7 @@

Queue Job Cron Jobrunner

-

Configuration

+

Configuration

Warning

Don’t use this module if you’re already running the regular queue_job runner.

@@ -427,7 +433,7 @@

Configuration

-

Known issues / Roadmap

+

Known issues / Roadmap

  • Support channel capacity and priority. (See _acquire_one_job)
  • Gracefully handle CronWorker CPU timeouts. (See _job_runner)
  • @@ -436,7 +442,7 @@

    Known issues / Roadmap

-

Bug Tracker

+

Bug Tracker

Bugs are tracked on GitHub Issues. In case of trouble, please check there if your issue has already been reported. If you spotted it first, help us to smash it by providing a detailed and welcomed @@ -444,15 +450,15 @@

Bug Tracker

Do not contact contributors directly about support or help with technical issues.

-

Credits

+

Credits

-

Authors

+

Authors

  • Camptocamp SA
-

Maintainers

+

Maintainers

This module is maintained by the OCA.

-Odoo Community Association + +Odoo Community Association +

OCA, or the Odoo Community Association, is a nonprofit organization whose mission is to support the collaborative development of Odoo features and promote its widespread use.

@@ -477,5 +485,6 @@

Maintainers

+
diff --git a/test_queue_job/__manifest__.py b/test_queue_job/__manifest__.py index baa4e9cf06..8202c0cf40 100644 --- a/test_queue_job/__manifest__.py +++ b/test_queue_job/__manifest__.py @@ -3,7 +3,7 @@ { "name": "Queue Job Tests", - "version": "17.0.1.0.1", + "version": "17.0.1.1.0", "author": "Camptocamp,Odoo Community Association (OCA)", "license": "LGPL-3", "category": "Generic Modules", From be56299d71c152877c0d889081c78d1143158496 Mon Sep 17 00:00:00 2001 From: Weblate Date: Fri, 6 Jun 2025 19:43:00 +0000 Subject: [PATCH 18/19] Update translation files Updated by "Update PO files to match POT (msgmerge)" hook in Weblate. Translation: queue-17.0/queue-17.0-queue_job Translate-URL: https://translation.odoo-community.org/projects/queue-17-0/queue-17-0-queue_job/ --- queue_job/i18n/de.po | 26 ++++++++++++++++++++++++-- queue_job/i18n/es.po | 39 ++++++++++++++++++++++++++++++++++----- queue_job/i18n/it.po | 39 ++++++++++++++++++++++++++++++++++----- queue_job/i18n/zh_CN.po | 37 +++++++++++++++++++++++++++++++++---- 4 files changed, 125 insertions(+), 16 deletions(-) diff --git a/queue_job/i18n/de.po b/queue_job/i18n/de.po index db0b269e36..f575a55297 100644 --- a/queue_job/i18n/de.po +++ b/queue_job/i18n/de.po @@ -101,6 +101,7 @@ msgstr "" #. module: queue_job #: model:ir.model.fields.selection,name:queue_job.selection__queue_job__state__cancelled +#: model_terms:ir.ui.view,arch_db:queue_job.view_queue_job_search msgid "Cancelled" msgstr "" @@ -176,6 +177,11 @@ msgstr "Erstellt am" msgid "Created by" msgstr "Erstellt von" +#. module: queue_job +#: model_terms:ir.ui.view,arch_db:queue_job.view_queue_job_search +msgid "Created date" +msgstr "" + #. module: queue_job #: model:ir.model.fields,field_description:queue_job.field_queue_job_channel__create_date #: model:ir.model.fields,field_description:queue_job.field_queue_job_lock__create_date @@ -484,6 +490,21 @@ msgstr "" msgid "Kwargs" msgstr "Kwargs" +#. module: queue_job +#: model_terms:ir.ui.view,arch_db:queue_job.view_queue_job_search +msgid "Last 24 hours" +msgstr "" + +#. module: queue_job +#: model_terms:ir.ui.view,arch_db:queue_job.view_queue_job_search +msgid "Last 30 days" +msgstr "" + +#. module: queue_job +#: model_terms:ir.ui.view,arch_db:queue_job.view_queue_job_search +msgid "Last 7 days" +msgstr "" + #. module: queue_job #: model:ir.model.fields,field_description:queue_job.field_queue_job_channel__write_uid #: model:ir.model.fields,field_description:queue_job.field_queue_job_lock__write_uid @@ -907,8 +928,9 @@ msgstr "" #, python-format msgid "" "Unexpected format of Retry Pattern for {}.\n" -"Example of valid format:\n" -"{{1: 300, 5: 600, 10: 1200, 15: 3000}}" +"Example of valid formats:\n" +"{{1: 300, 5: 600, 10: 1200, 15: 3000}}\n" +"{{1: (1, 10), 5: (11, 20), 10: (21, 30), 15: (100, 300)}}" msgstr "" #. module: queue_job diff --git a/queue_job/i18n/es.po b/queue_job/i18n/es.po index aaa626a4d8..5d599f54c9 100644 --- a/queue_job/i18n/es.po +++ b/queue_job/i18n/es.po @@ -104,6 +104,7 @@ msgstr "Cancelar trabajos" #. module: queue_job #: model:ir.model.fields.selection,name:queue_job.selection__queue_job__state__cancelled +#: model_terms:ir.ui.view,arch_db:queue_job.view_queue_job_search msgid "Cancelled" msgstr "Cancelada" @@ -179,6 +180,11 @@ msgstr "Fecha de creación" msgid "Created by" msgstr "Creado por" +#. module: queue_job +#: model_terms:ir.ui.view,arch_db:queue_job.view_queue_job_search +msgid "Created date" +msgstr "" + #. module: queue_job #: model:ir.model.fields,field_description:queue_job.field_queue_job_channel__create_date #: model:ir.model.fields,field_description:queue_job.field_queue_job_lock__create_date @@ -485,6 +491,21 @@ msgstr "Trabajos para gráfico %s" msgid "Kwargs" msgstr "Kwargs" +#. module: queue_job +#: model_terms:ir.ui.view,arch_db:queue_job.view_queue_job_search +msgid "Last 24 hours" +msgstr "" + +#. module: queue_job +#: model_terms:ir.ui.view,arch_db:queue_job.view_queue_job_search +msgid "Last 30 days" +msgstr "" + +#. module: queue_job +#: model_terms:ir.ui.view,arch_db:queue_job.view_queue_job_search +msgid "Last 7 days" +msgstr "" + #. module: queue_job #: model:ir.model.fields,field_description:queue_job.field_queue_job_channel__write_uid #: model:ir.model.fields,field_description:queue_job.field_queue_job_lock__write_uid @@ -923,12 +944,10 @@ msgstr "" #, python-format msgid "" "Unexpected format of Retry Pattern for {}.\n" -"Example of valid format:\n" -"{{1: 300, 5: 600, 10: 1200, 15: 3000}}" +"Example of valid formats:\n" +"{{1: 300, 5: 600, 10: 1200, 15: 3000}}\n" +"{{1: (1, 10), 5: (11, 20), 10: (21, 30), 15: (100, 300)}}" msgstr "" -"Formato inesperado en el patrón de reintentos de {}.\n" -"Ejemplo de un formato válido:\n" -"{{1: 300, 5: 600, 10: 1200, 15: 3000}}" #. module: queue_job #: model:ir.model.fields,field_description:queue_job.field_queue_job__user_id @@ -951,6 +970,16 @@ msgstr "Asistente para volver a poner en cola una selección de trabajos" msgid "Worker Pid" msgstr "Pid del trabajador" +#, python-format +#~ msgid "" +#~ "Unexpected format of Retry Pattern for {}.\n" +#~ "Example of valid format:\n" +#~ "{{1: 300, 5: 600, 10: 1200, 15: 3000}}" +#~ msgstr "" +#~ "Formato inesperado en el patrón de reintentos de {}.\n" +#~ "Ejemplo de un formato válido:\n" +#~ "{{1: 300, 5: 600, 10: 1200, 15: 3000}}" + #, python-format #~ msgid "If both parameters are 0, ALL jobs will be requeued!" #~ msgstr "" diff --git a/queue_job/i18n/it.po b/queue_job/i18n/it.po index 2994eb9681..0886b8e240 100644 --- a/queue_job/i18n/it.po +++ b/queue_job/i18n/it.po @@ -104,6 +104,7 @@ msgstr "Annulla lavori" #. module: queue_job #: model:ir.model.fields.selection,name:queue_job.selection__queue_job__state__cancelled +#: model_terms:ir.ui.view,arch_db:queue_job.view_queue_job_search msgid "Cancelled" msgstr "Annullata" @@ -179,6 +180,11 @@ msgstr "Data creazione" msgid "Created by" msgstr "Creato da" +#. module: queue_job +#: model_terms:ir.ui.view,arch_db:queue_job.view_queue_job_search +msgid "Created date" +msgstr "" + #. module: queue_job #: model:ir.model.fields,field_description:queue_job.field_queue_job_channel__create_date #: model:ir.model.fields,field_description:queue_job.field_queue_job_lock__create_date @@ -484,6 +490,21 @@ msgstr "Lavori per grafico %s" msgid "Kwargs" msgstr "Kwargs" +#. module: queue_job +#: model_terms:ir.ui.view,arch_db:queue_job.view_queue_job_search +msgid "Last 24 hours" +msgstr "" + +#. module: queue_job +#: model_terms:ir.ui.view,arch_db:queue_job.view_queue_job_search +msgid "Last 30 days" +msgstr "" + +#. module: queue_job +#: model_terms:ir.ui.view,arch_db:queue_job.view_queue_job_search +msgid "Last 7 days" +msgstr "" + #. module: queue_job #: model:ir.model.fields,field_description:queue_job.field_queue_job_channel__write_uid #: model:ir.model.fields,field_description:queue_job.field_queue_job_lock__write_uid @@ -920,12 +941,10 @@ msgstr "" #, python-format msgid "" "Unexpected format of Retry Pattern for {}.\n" -"Example of valid format:\n" -"{{1: 300, 5: 600, 10: 1200, 15: 3000}}" +"Example of valid formats:\n" +"{{1: 300, 5: 600, 10: 1200, 15: 3000}}\n" +"{{1: (1, 10), 5: (11, 20), 10: (21, 30), 15: (100, 300)}}" msgstr "" -"Formato inaspettato di schema tentativo per {}.\n" -"Esempio di formato valido:\n" -"{{1: 300, 5: 600, 10: 1200, 15: 3000}}" #. module: queue_job #: model:ir.model.fields,field_description:queue_job.field_queue_job__user_id @@ -948,6 +967,16 @@ msgstr "Procedura guidata per riaccodare una selezione di lavori" msgid "Worker Pid" msgstr "PID worker" +#, python-format +#~ msgid "" +#~ "Unexpected format of Retry Pattern for {}.\n" +#~ "Example of valid format:\n" +#~ "{{1: 300, 5: 600, 10: 1200, 15: 3000}}" +#~ msgstr "" +#~ "Formato inaspettato di schema tentativo per {}.\n" +#~ "Esempio di formato valido:\n" +#~ "{{1: 300, 5: 600, 10: 1200, 15: 3000}}" + #, python-format #~ msgid "If both parameters are 0, ALL jobs will be requeued!" #~ msgstr "Se entrambi i parametri sono 0, tutti i lavori verranno riaccodati!" diff --git a/queue_job/i18n/zh_CN.po b/queue_job/i18n/zh_CN.po index 897aa1e4ea..804ca86780 100644 --- a/queue_job/i18n/zh_CN.po +++ b/queue_job/i18n/zh_CN.po @@ -104,6 +104,7 @@ msgstr "取消作业" #. module: queue_job #: model:ir.model.fields.selection,name:queue_job.selection__queue_job__state__cancelled +#: model_terms:ir.ui.view,arch_db:queue_job.view_queue_job_search msgid "Cancelled" msgstr "已取消" @@ -179,6 +180,11 @@ msgstr "创建日期" msgid "Created by" msgstr "创建者" +#. module: queue_job +#: model_terms:ir.ui.view,arch_db:queue_job.view_queue_job_search +msgid "Created date" +msgstr "" + #. module: queue_job #: model:ir.model.fields,field_description:queue_job.field_queue_job_channel__create_date #: model:ir.model.fields,field_description:queue_job.field_queue_job_lock__create_date @@ -484,6 +490,21 @@ msgstr "图表 %s 的作业" msgid "Kwargs" msgstr "关键字参数" +#. module: queue_job +#: model_terms:ir.ui.view,arch_db:queue_job.view_queue_job_search +msgid "Last 24 hours" +msgstr "" + +#. module: queue_job +#: model_terms:ir.ui.view,arch_db:queue_job.view_queue_job_search +msgid "Last 30 days" +msgstr "" + +#. module: queue_job +#: model_terms:ir.ui.view,arch_db:queue_job.view_queue_job_search +msgid "Last 7 days" +msgstr "" + #. module: queue_job #: model:ir.model.fields,field_description:queue_job.field_queue_job_channel__write_uid #: model:ir.model.fields,field_description:queue_job.field_queue_job_lock__write_uid @@ -914,11 +935,10 @@ msgstr "" #, python-format msgid "" "Unexpected format of Retry Pattern for {}.\n" -"Example of valid format:\n" -"{{1: 300, 5: 600, 10: 1200, 15: 3000}}" +"Example of valid formats:\n" +"{{1: 300, 5: 600, 10: 1200, 15: 3000}}\n" +"{{1: (1, 10), 5: (11, 20), 10: (21, 30), 15: (100, 300)}}" msgstr "" -"对于 {},重试模式的格式不符合预期。 有效格式的示例:\n" -"{{1: 300, 5: 600, 10: 1200, 15: 3000}}" #. module: queue_job #: model:ir.model.fields,field_description:queue_job.field_queue_job__user_id @@ -941,6 +961,15 @@ msgstr "重新排队向导所选的作业" msgid "Worker Pid" msgstr "工作进程PID" +#, python-format +#~ msgid "" +#~ "Unexpected format of Retry Pattern for {}.\n" +#~ "Example of valid format:\n" +#~ "{{1: 300, 5: 600, 10: 1200, 15: 3000}}" +#~ msgstr "" +#~ "对于 {},重试模式的格式不符合预期。 有效格式的示例:\n" +#~ "{{1: 300, 5: 600, 10: 1200, 15: 3000}}" + #, python-format #~ msgid "If both parameters are 0, ALL jobs will be requeued!" #~ msgstr "如果两个参数都为0,所有任务都将被重新排队!" From 0af91875b0ee3eb2ff6617befefb8d902d42a641 Mon Sep 17 00:00:00 2001 From: Weblate Date: Fri, 6 Jun 2025 19:43:01 +0000 Subject: [PATCH 19/19] Update translation files Updated by "Update PO files to match POT (msgmerge)" hook in Weblate. Translation: queue-17.0/queue-17.0-queue_job_cron Translate-URL: https://translation.odoo-community.org/projects/queue-17-0/queue-17-0-queue_job_cron/ --- queue_job_cron/i18n/de.po | 13 +++++++++++++ queue_job_cron/i18n/es.po | 13 +++++++++++++ queue_job_cron/i18n/it.po | 13 +++++++++++++ queue_job_cron/i18n/zh_CN.po | 13 +++++++++++++ 4 files changed, 52 insertions(+) diff --git a/queue_job_cron/i18n/de.po b/queue_job_cron/i18n/de.po index 4240c6e702..45df6d9aa8 100644 --- a/queue_job_cron/i18n/de.po +++ b/queue_job_cron/i18n/de.po @@ -16,11 +16,24 @@ msgstr "" "Plural-Forms: nplurals=2; plural=n != 1;\n" "X-Generator: Weblate 3.7.1\n" +#. module: queue_job_cron +#: model:ir.model.fields,help:queue_job_cron.field_ir_cron__no_parallel_queue_job_run +msgid "" +"Avoid parallel run. If the cron job is already running, the new one will be " +"skipped. By default, odoo never runs the same cron job in parallel. This " +"option is therefore set to True by default when job is run as a queue job." +msgstr "" + #. module: queue_job_cron #: model:ir.model.fields,field_description:queue_job_cron.field_ir_cron__channel_id msgid "Channel" msgstr "Kanal" +#. module: queue_job_cron +#: model:ir.model.fields,field_description:queue_job_cron.field_ir_cron__no_parallel_queue_job_run +msgid "No Parallel Queue Job Run" +msgstr "" + #. module: queue_job_cron #: model:ir.model.fields,field_description:queue_job_cron.field_ir_cron__run_as_queue_job msgid "Run As Queue Job" diff --git a/queue_job_cron/i18n/es.po b/queue_job_cron/i18n/es.po index 7d7f06c148..74366d89de 100644 --- a/queue_job_cron/i18n/es.po +++ b/queue_job_cron/i18n/es.po @@ -16,11 +16,24 @@ msgstr "" "Plural-Forms: nplurals=2; plural=n != 1;\n" "X-Generator: Weblate 4.17\n" +#. module: queue_job_cron +#: model:ir.model.fields,help:queue_job_cron.field_ir_cron__no_parallel_queue_job_run +msgid "" +"Avoid parallel run. If the cron job is already running, the new one will be " +"skipped. By default, odoo never runs the same cron job in parallel. This " +"option is therefore set to True by default when job is run as a queue job." +msgstr "" + #. module: queue_job_cron #: model:ir.model.fields,field_description:queue_job_cron.field_ir_cron__channel_id msgid "Channel" msgstr "Canal" +#. module: queue_job_cron +#: model:ir.model.fields,field_description:queue_job_cron.field_ir_cron__no_parallel_queue_job_run +msgid "No Parallel Queue Job Run" +msgstr "" + #. module: queue_job_cron #: model:ir.model.fields,field_description:queue_job_cron.field_ir_cron__run_as_queue_job msgid "Run As Queue Job" diff --git a/queue_job_cron/i18n/it.po b/queue_job_cron/i18n/it.po index ab9659c1dc..f5a06bb7d2 100644 --- a/queue_job_cron/i18n/it.po +++ b/queue_job_cron/i18n/it.po @@ -16,11 +16,24 @@ msgstr "" "Plural-Forms: nplurals=2; plural=n != 1;\n" "X-Generator: Weblate 4.17\n" +#. module: queue_job_cron +#: model:ir.model.fields,help:queue_job_cron.field_ir_cron__no_parallel_queue_job_run +msgid "" +"Avoid parallel run. If the cron job is already running, the new one will be " +"skipped. By default, odoo never runs the same cron job in parallel. This " +"option is therefore set to True by default when job is run as a queue job." +msgstr "" + #. module: queue_job_cron #: model:ir.model.fields,field_description:queue_job_cron.field_ir_cron__channel_id msgid "Channel" msgstr "Canale" +#. module: queue_job_cron +#: model:ir.model.fields,field_description:queue_job_cron.field_ir_cron__no_parallel_queue_job_run +msgid "No Parallel Queue Job Run" +msgstr "" + #. module: queue_job_cron #: model:ir.model.fields,field_description:queue_job_cron.field_ir_cron__run_as_queue_job msgid "Run As Queue Job" diff --git a/queue_job_cron/i18n/zh_CN.po b/queue_job_cron/i18n/zh_CN.po index 166d51606d..a764f6df3c 100644 --- a/queue_job_cron/i18n/zh_CN.po +++ b/queue_job_cron/i18n/zh_CN.po @@ -16,11 +16,24 @@ msgstr "" "Plural-Forms: nplurals=1; plural=0;\n" "X-Generator: Weblate 3.7.1\n" +#. module: queue_job_cron +#: model:ir.model.fields,help:queue_job_cron.field_ir_cron__no_parallel_queue_job_run +msgid "" +"Avoid parallel run. If the cron job is already running, the new one will be " +"skipped. By default, odoo never runs the same cron job in parallel. This " +"option is therefore set to True by default when job is run as a queue job." +msgstr "" + #. module: queue_job_cron #: model:ir.model.fields,field_description:queue_job_cron.field_ir_cron__channel_id msgid "Channel" msgstr "频道" +#. module: queue_job_cron +#: model:ir.model.fields,field_description:queue_job_cron.field_ir_cron__no_parallel_queue_job_run +msgid "No Parallel Queue Job Run" +msgstr "" + #. module: queue_job_cron #: model:ir.model.fields,field_description:queue_job_cron.field_ir_cron__run_as_queue_job msgid "Run As Queue Job"