|
| 1 | +"""Core execution logic shared between normal and subprocess execution modes.""" |
| 2 | + |
| 3 | +import logging |
| 4 | +import time |
| 5 | + |
| 6 | +_active_worker = None |
| 7 | + |
| 8 | + |
| 9 | +def create_worker(server_instance): |
| 10 | + """Create worker backend. Returns NativeWorker or SubprocessWorker.""" |
| 11 | + global _active_worker |
| 12 | + from comfy.cli_args import args |
| 13 | + |
| 14 | + server = WorkerServer(server_instance) |
| 15 | + |
| 16 | + if args.use_subprocess_workers: |
| 17 | + from comfy.worker_process import SubprocessWorker |
| 18 | + worker = SubprocessWorker(server, timeout=args.subprocess_timeout) |
| 19 | + else: |
| 20 | + from comfy.worker_native import NativeWorker |
| 21 | + worker = NativeWorker(server) |
| 22 | + |
| 23 | + _active_worker = worker |
| 24 | + return worker |
| 25 | + |
| 26 | + |
| 27 | +async def init_execution_environment(): |
| 28 | + """Load nodes and custom nodes. Returns number of node types loaded.""" |
| 29 | + import nodes |
| 30 | + from comfy.cli_args import args |
| 31 | + |
| 32 | + await nodes.init_extra_nodes( |
| 33 | + init_custom_nodes=(not args.disable_all_custom_nodes) or len(args.whitelist_custom_nodes) > 0, |
| 34 | + init_api_nodes=not args.disable_api_nodes |
| 35 | + ) |
| 36 | + return len(nodes.NODE_CLASS_MAPPINGS) |
| 37 | + |
| 38 | + |
| 39 | +def setup_progress_hook(server_instance, interrupt_checker): |
| 40 | + """Set up global progress hook. interrupt_checker must raise on interrupt.""" |
| 41 | + import comfy.utils |
| 42 | + from comfy_execution.progress import get_progress_state |
| 43 | + from comfy_execution.utils import get_executing_context |
| 44 | + |
| 45 | + def hook(value, total, preview_image, prompt_id=None, node_id=None): |
| 46 | + ctx = get_executing_context() |
| 47 | + if ctx: |
| 48 | + prompt_id = prompt_id or ctx.prompt_id |
| 49 | + node_id = node_id or ctx.node_id |
| 50 | + |
| 51 | + interrupt_checker() |
| 52 | + |
| 53 | + prompt_id = prompt_id or server_instance.last_prompt_id |
| 54 | + node_id = node_id or server_instance.last_node_id |
| 55 | + |
| 56 | + get_progress_state().update_progress(node_id, value, total, preview_image) |
| 57 | + server_instance.send_sync("progress", {"value": value, "max": total, "prompt_id": prompt_id, "node": node_id}, server_instance.client_id) |
| 58 | + |
| 59 | + comfy.utils.set_progress_bar_global_hook(hook) |
| 60 | + |
| 61 | + |
| 62 | +class WorkerServer: |
| 63 | + """Protocol boundary: client_id, last_node_id, last_prompt_id, sockets_metadata, send_sync(), queue_updated()""" |
| 64 | + |
| 65 | + _WRITABLE = {'client_id', 'last_node_id', 'last_prompt_id'} |
| 66 | + |
| 67 | + def __init__(self, server): |
| 68 | + object.__setattr__(self, '_server', server) |
| 69 | + |
| 70 | + def __setattr__(self, name, value): |
| 71 | + if name in self._WRITABLE: |
| 72 | + setattr(self._server, name, value) |
| 73 | + else: |
| 74 | + raise AttributeError(f"WorkerServer does not accept attribute '{name}'") |
| 75 | + |
| 76 | + @property |
| 77 | + def client_id(self): |
| 78 | + return self._server.client_id |
| 79 | + |
| 80 | + @property |
| 81 | + def last_node_id(self): |
| 82 | + return self._server.last_node_id |
| 83 | + |
| 84 | + @property |
| 85 | + def last_prompt_id(self): |
| 86 | + return self._server.last_prompt_id |
| 87 | + |
| 88 | + @property |
| 89 | + def sockets_metadata(self): |
| 90 | + return self._server.sockets_metadata |
| 91 | + |
| 92 | + def send_sync(self, event, data, sid=None): |
| 93 | + self._server.send_sync(event, data, sid or self.client_id) |
| 94 | + |
| 95 | + def queue_updated(self): |
| 96 | + self._server.queue_updated() |
| 97 | + |
| 98 | +def interrupt_processing(value=True): |
| 99 | + _active_worker.interrupt(value) |
| 100 | + |
| 101 | + |
| 102 | +def _strip_sensitive(prompt): |
| 103 | + return prompt[:5] + prompt[6:] |
| 104 | + |
| 105 | + |
| 106 | +def prompt_worker(q, worker): |
| 107 | + """Main prompt execution loop.""" |
| 108 | + import execution |
| 109 | + |
| 110 | + server = worker.server_instance |
| 111 | + |
| 112 | + while True: |
| 113 | + queue_item = q.get(timeout=worker.get_gc_timeout()) |
| 114 | + if queue_item is not None: |
| 115 | + item, item_id = queue_item |
| 116 | + start_time = time.perf_counter() |
| 117 | + prompt_id = item[1] |
| 118 | + server.last_prompt_id = prompt_id |
| 119 | + |
| 120 | + extra_data = {**item[3], **item[5]} |
| 121 | + |
| 122 | + result = worker.execute_prompt(item[2], prompt_id, extra_data, item[4], server=server) |
| 123 | + worker.mark_needs_gc() |
| 124 | + |
| 125 | + q.task_done( |
| 126 | + item_id, |
| 127 | + result['history_result'], |
| 128 | + status=execution.PromptQueue.ExecutionStatus( |
| 129 | + status_str='success' if result['success'] else 'error', |
| 130 | + completed=result['success'], |
| 131 | + messages=result['status_messages'] |
| 132 | + ), |
| 133 | + process_item=_strip_sensitive |
| 134 | + ) |
| 135 | + |
| 136 | + if server.client_id is not None: |
| 137 | + server.send_sync("executing", {"node": None, "prompt_id": prompt_id}, server.client_id) |
| 138 | + |
| 139 | + elapsed = time.perf_counter() - start_time |
| 140 | + if elapsed > 600: |
| 141 | + logging.info(f"Prompt executed in {time.strftime('%H:%M:%S', time.gmtime(elapsed))}") |
| 142 | + else: |
| 143 | + logging.info(f"Prompt executed in {elapsed:.2f} seconds") |
| 144 | + |
| 145 | + worker.handle_flags(q.get_flags()) |
0 commit comments