Skip to content

Commit c1bdc3e

Browse files
Merge pull request #1768 from roboflow/fix/usage-collector-model-id
Usage collector model_id
2 parents c2ce156 + 230ec7a commit c1bdc3e

File tree

6 files changed

+25
-2
lines changed

6 files changed

+25
-2
lines changed

inference/core/version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
__version__ = "0.62.2"
1+
__version__ = "0.62.3"
22

33

44
if __name__ == "__main__":

inference/models/clip/clip_model.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@
2424
CLIP_MODEL_ID,
2525
ONNXRUNTIME_EXECUTION_PROVIDERS,
2626
REQUIRED_ONNX_PROVIDERS,
27-
TENSORRT_CACHE_PATH,
2827
)
2928
from inference.core.exceptions import OnnxProviderNotAvailable
3029
from inference.core.models.roboflow import OnnxRoboflowCoreModel

inference/models/sam2/segment_anything2.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@
4242
from inference.core.utils.image_utils import load_image_rgb
4343
from inference.core.utils.postprocess import masks2multipoly
4444
from inference.core.utils.torchscript_guard import _temporarily_disable_torch_jit_script
45+
from inference.usage_tracking.collector import usage_collector
4546

4647
if DEVICE is None:
4748
DEVICE = "cuda:0" if torch.cuda.is_available() else "cpu"
@@ -176,6 +177,7 @@ def embed_image(
176177
safe_remove_from_dict(values=self.image_size_cache, key=cache_key)
177178
return embedding_dict, img_in.shape[:2], image_id
178179

180+
@usage_collector("model")
179181
def infer_from_request(self, request: Sam2InferenceRequest):
180182
"""Performs inference based on the request type.
181183

inference/models/sam3/segment_anything3.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,7 @@
6565
)
6666
from inference.core.utils.image_utils import load_image_rgb
6767
from inference.core.utils.postprocess import masks2multipoly
68+
from inference.usage_tracking.collector import usage_collector
6869

6970

7071
def _to_numpy_masks(masks_any) -> np.ndarray:
@@ -355,6 +356,7 @@ def preproc_image(self, image: InferenceRequestImage) -> np.ndarray:
355356
np_image = load_image_rgb(image)
356357
return np_image
357358

359+
@usage_collector("model")
358360
def infer_from_request(self, request: Sam3InferenceRequest):
359361
# with self.sam3_lock:
360362
t1 = perf_counter()

inference/models/sam3/visual_segmentation.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@
5151
from inference.core.utils.image_utils import load_image_rgb
5252
from inference.core.utils.postprocess import masks2multipoly
5353
from inference.core.utils.torchscript_guard import _temporarily_disable_torch_jit_script
54+
from inference.usage_tracking.collector import usage_collector
5455

5556
# from sam3.model.sam1_task_predictor import SAM3InteractiveImagePredictor
5657
# from sam3.sam3_video_model_builder import build_sam3_tracking_predictor
@@ -186,6 +187,7 @@ def embed_image(
186187
safe_remove_from_dict(values=self.image_size_cache, key=cache_key)
187188
return embedding_dict, img_in.shape[:2], image_id
188189

190+
@usage_collector("model")
189191
def infer_from_request(self, request: Sam2InferenceRequest):
190192
"""Performs inference based on the request type.
191193

inference/usage_tracking/decorator_helpers.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,12 @@ def get_request_resource_id_from_kwargs(func_kwargs: Dict[str, Any]) -> Optional
107107
if version_id:
108108
return f"{dataset_id}/{version_id}"
109109
return str(dataset_id)
110+
if hasattr(inference_request, "model_id"):
111+
return str(inference_request.model_id)
112+
if "request" in func_kwargs:
113+
request = func_kwargs["request"]
114+
if hasattr(request, "model_id"):
115+
return str(request.model_id)
110116
if "dataset_id" in func_kwargs and "version_id" in func_kwargs:
111117
dataset_id = func_kwargs["dataset_id"]
112118
version_id = func_kwargs["version_id"]
@@ -119,6 +125,18 @@ def get_request_resource_id_from_kwargs(func_kwargs: Dict[str, Any]) -> Optional
119125
workflow_request = func_kwargs["workflow_request"]
120126
if hasattr(workflow_request, "workflow_id"):
121127
return str(workflow_request.workflow_id)
128+
if "self" in func_kwargs:
129+
_self = func_kwargs["self"]
130+
if hasattr(_self, "dataset_id") and hasattr(_self, "version_id"):
131+
dataset_id = _self.dataset_id
132+
version_id = _self.version_id
133+
if version_id:
134+
return f"{dataset_id}/{version_id}"
135+
return str(dataset_id)
136+
if hasattr(_self, "model_id"):
137+
return str(_self.model_id)
138+
if hasattr(_self, "endpoint"):
139+
return str(_self.endpoint)
122140
return None
123141

124142

0 commit comments

Comments
 (0)