diff --git a/distribution/run.yaml b/distribution/run.yaml index f266d6b..d49381c 100644 --- a/distribution/run.yaml +++ b/distribution/run.yaml @@ -93,5 +93,3 @@ registered_resources: tool_groups: [] server: port: 8321 -telemetry: - enabled: true diff --git a/src/llama_stack_provider_ragas/compat.py b/src/llama_stack_provider_ragas/compat.py new file mode 100644 index 0000000..86e1ccf --- /dev/null +++ b/src/llama_stack_provider_ragas/compat.py @@ -0,0 +1,110 @@ +""" +Compatibility layer for llama_stack imports. + +This module provides backward compatibility by attempting to import from +the legacy llama_stack package first, then falling back to the newer +llama_stack_api package structure. +""" + +# Provider datatypes and API definitions +try: # Legacy llama_stack layout + from llama_stack.apis.datatypes import Api + from llama_stack.providers.datatypes import ( + BenchmarksProtocolPrivate, + InlineProviderSpec, + ProviderSpec, + RemoteProviderSpec, + ) +except (ImportError, ModuleNotFoundError): + # Newer llama_stack_api layout + from llama_stack_api import ( + Api, + BenchmarksProtocolPrivate, + InlineProviderSpec, + ProviderSpec, + RemoteProviderSpec, + ) + +# Benchmarks +try: + from llama_stack.apis.benchmarks import Benchmark +except (ImportError, ModuleNotFoundError): + from llama_stack_api import Benchmark + +# Common job types +try: + from llama_stack.apis.common.job_types import Job, JobStatus +except (ImportError, ModuleNotFoundError): + from llama_stack_api import Job, JobStatus + +# DatasetIO +try: + from llama_stack.apis.datasetio import DatasetIO +except (ImportError, ModuleNotFoundError): + from llama_stack_api import DatasetIO + +# Eval +try: + from llama_stack.apis.eval import BenchmarkConfig, Eval, EvaluateResponse +except (ImportError, ModuleNotFoundError): + from llama_stack_api import BenchmarkConfig, Eval, EvaluateResponse + +# Inference +try: + from llama_stack.apis.inference import ( + Inference, + OpenAICompletionRequestWithExtraBody, + OpenAIEmbeddingsRequestWithExtraBody, + SamplingParams, + TopPSamplingStrategy, + ) +except (ImportError, ModuleNotFoundError): + from llama_stack_api import ( + Inference, + OpenAICompletionRequestWithExtraBody, + OpenAIEmbeddingsRequestWithExtraBody, + SamplingParams, + TopPSamplingStrategy, + ) + +# Scoring +try: + from llama_stack.apis.scoring import ScoringResult +except (ImportError, ModuleNotFoundError): + from llama_stack_api import ScoringResult + +# Schema utils +try: + from llama_stack.schema_utils import json_schema_type +except (ImportError, ModuleNotFoundError): + from llama_stack_api import json_schema_type + +__all__ = [ + # API and Provider types + "Api", + "BenchmarksProtocolPrivate", + "InlineProviderSpec", + "ProviderSpec", + "RemoteProviderSpec", + # Benchmarks + "Benchmark", + # Job types + "Job", + "JobStatus", + # DatasetIO + "DatasetIO", + # Eval + "BenchmarkConfig", + "Eval", + "EvaluateResponse", + # Inference + "Inference", + "OpenAICompletionRequestWithExtraBody", + "OpenAIEmbeddingsRequestWithExtraBody", + "SamplingParams", + "TopPSamplingStrategy", + # Scoring + "ScoringResult", + # Schema utils + "json_schema_type", +] diff --git a/src/llama_stack_provider_ragas/config.py b/src/llama_stack_provider_ragas/config.py index a7b127b..2778940 100644 --- a/src/llama_stack_provider_ragas/config.py +++ b/src/llama_stack_provider_ragas/config.py @@ -1,6 +1,7 @@ -from llama_stack.schema_utils import json_schema_type from pydantic import BaseModel, Field, SecretStr +from .compat import json_schema_type + class RagasConfig(BaseModel): """Additional configuration parameters for Ragas evaluation.""" diff --git a/src/llama_stack_provider_ragas/inline/__init__.py b/src/llama_stack_provider_ragas/inline/__init__.py index 778a3f1..df7b23c 100644 --- a/src/llama_stack_provider_ragas/inline/__init__.py +++ b/src/llama_stack_provider_ragas/inline/__init__.py @@ -1,7 +1,6 @@ from typing import Any -from llama_stack.apis.datatypes import Api - +from ..compat import Api from ..config import RagasProviderInlineConfig from .ragas_inline_eval import RagasEvaluatorInline diff --git a/src/llama_stack_provider_ragas/inline/provider.py b/src/llama_stack_provider_ragas/inline/provider.py index 3215a4b..3bd11f9 100644 --- a/src/llama_stack_provider_ragas/inline/provider.py +++ b/src/llama_stack_provider_ragas/inline/provider.py @@ -1,5 +1,4 @@ -from llama_stack.providers.datatypes import Api, InlineProviderSpec, ProviderSpec - +from ..compat import Api, InlineProviderSpec, ProviderSpec from ..constants import PROVIDER_TYPE @@ -15,6 +14,5 @@ def get_provider_spec() -> ProviderSpec: Api.files, Api.benchmarks, Api.datasetio, - Api.telemetry, ], ) diff --git a/src/llama_stack_provider_ragas/inline/ragas_inline_eval.py b/src/llama_stack_provider_ragas/inline/ragas_inline_eval.py index 6f68c7a..33e89ff 100644 --- a/src/llama_stack_provider_ragas/inline/ragas_inline_eval.py +++ b/src/llama_stack_provider_ragas/inline/ragas_inline_eval.py @@ -3,14 +3,6 @@ import logging from typing import Any -from llama_stack.apis.benchmarks import Benchmark -from llama_stack.apis.common.job_types import Job, JobStatus -from llama_stack.apis.datasetio import DatasetIO -from llama_stack.apis.eval import BenchmarkConfig, Eval, EvaluateResponse -from llama_stack.apis.inference import Inference -from llama_stack.apis.scoring import ScoringResult -from llama_stack.providers.datatypes import BenchmarksProtocolPrivate -from llama_stack.schema_utils import json_schema_type from ragas import EvaluationDataset from ragas import evaluate as ragas_evaluate from ragas.metrics import ( @@ -22,6 +14,19 @@ ) from ragas.run_config import RunConfig +from ..compat import ( + Benchmark, + BenchmarkConfig, + BenchmarksProtocolPrivate, + DatasetIO, + Eval, + EvaluateResponse, + Inference, + Job, + JobStatus, + ScoringResult, + json_schema_type, +) from ..config import RagasProviderInlineConfig from ..constants import METRIC_MAPPING from ..errors import RagasEvaluationError diff --git a/src/llama_stack_provider_ragas/inline/wrappers_inline.py b/src/llama_stack_provider_ragas/inline/wrappers_inline.py index 63e62f7..3bff775 100644 --- a/src/llama_stack_provider_ragas/inline/wrappers_inline.py +++ b/src/llama_stack_provider_ragas/inline/wrappers_inline.py @@ -3,15 +3,16 @@ from langchain_core.language_models.llms import Generation, LLMResult from langchain_core.prompt_values import PromptValue -from llama_stack.apis.inference import ( +from ragas.embeddings.base import BaseRagasEmbeddings +from ragas.llms.base import BaseRagasLLM +from ragas.run_config import RunConfig + +from ..compat import ( OpenAICompletionRequestWithExtraBody, OpenAIEmbeddingsRequestWithExtraBody, SamplingParams, TopPSamplingStrategy, ) -from ragas.embeddings.base import BaseRagasEmbeddings -from ragas.llms.base import BaseRagasLLM -from ragas.run_config import RunConfig logger = logging.getLogger(__name__) diff --git a/src/llama_stack_provider_ragas/remote/__init__.py b/src/llama_stack_provider_ragas/remote/__init__.py index 87c1e35..7b60cc6 100644 --- a/src/llama_stack_provider_ragas/remote/__init__.py +++ b/src/llama_stack_provider_ragas/remote/__init__.py @@ -1,7 +1,6 @@ from typing import Any -from llama_stack.apis.datatypes import Api - +from ..compat import Api from ..config import RagasProviderRemoteConfig from .ragas_remote_eval import RagasEvaluatorRemote diff --git a/src/llama_stack_provider_ragas/remote/kubeflow/components.py b/src/llama_stack_provider_ragas/remote/kubeflow/components.py index b8a98c5..e3121d0 100644 --- a/src/llama_stack_provider_ragas/remote/kubeflow/components.py +++ b/src/llama_stack_provider_ragas/remote/kubeflow/components.py @@ -85,11 +85,11 @@ def run_ragas_evaluation( import logging import pandas as pd - from llama_stack.apis.inference import SamplingParams from ragas import EvaluationDataset, evaluate from ragas.dataset_schema import EvaluationResult from ragas.run_config import RunConfig + from llama_stack_provider_ragas.compat import SamplingParams from llama_stack_provider_ragas.constants import METRIC_MAPPING from llama_stack_provider_ragas.logging_utils import render_dataframe_as_table from llama_stack_provider_ragas.remote.wrappers_remote import ( diff --git a/src/llama_stack_provider_ragas/remote/provider.py b/src/llama_stack_provider_ragas/remote/provider.py index 1a7419f..30263e0 100644 --- a/src/llama_stack_provider_ragas/remote/provider.py +++ b/src/llama_stack_provider_ragas/remote/provider.py @@ -1,9 +1,4 @@ -from llama_stack.providers.datatypes import ( - Api, - ProviderSpec, - RemoteProviderSpec, -) - +from ..compat import Api, ProviderSpec, RemoteProviderSpec from ..constants import PROVIDER_TYPE @@ -26,6 +21,5 @@ def get_provider_spec() -> ProviderSpec: Api.files, Api.benchmarks, Api.datasetio, - Api.telemetry, ], ) diff --git a/src/llama_stack_provider_ragas/remote/ragas_remote_eval.py b/src/llama_stack_provider_ragas/remote/ragas_remote_eval.py index 4bbcdd7..03f9946 100644 --- a/src/llama_stack_provider_ragas/remote/ragas_remote_eval.py +++ b/src/llama_stack_provider_ragas/remote/ragas_remote_eval.py @@ -4,14 +4,19 @@ import pandas as pd import requests -from llama_stack.apis.benchmarks import Benchmark -from llama_stack.apis.common.job_types import Job, JobStatus -from llama_stack.apis.eval import BenchmarkConfig, Eval, EvaluateResponse -from llama_stack.apis.scoring import ScoringResult -from llama_stack.providers.datatypes import BenchmarksProtocolPrivate -from llama_stack.schema_utils import json_schema_type from pydantic import BaseModel +from ..compat import ( + Benchmark, + BenchmarkConfig, + BenchmarksProtocolPrivate, + Eval, + EvaluateResponse, + Job, + JobStatus, + ScoringResult, + json_schema_type, +) from ..config import ( KubeflowConfig, RagasConfig, diff --git a/src/llama_stack_provider_ragas/remote/wrappers_remote.py b/src/llama_stack_provider_ragas/remote/wrappers_remote.py index 7f4059e..8a410bc 100644 --- a/src/llama_stack_provider_ragas/remote/wrappers_remote.py +++ b/src/llama_stack_provider_ragas/remote/wrappers_remote.py @@ -2,7 +2,6 @@ from langchain_core.language_models.llms import Generation, LLMResult from langchain_core.prompt_values import PromptValue -from llama_stack.apis.inference import SamplingParams, TopPSamplingStrategy from llama_stack_client import AsyncLlamaStackClient, LlamaStackClient, omit from llama_stack_client.types.completion_create_response import CompletionCreateResponse from llama_stack_client.types.create_embeddings_response import CreateEmbeddingsResponse @@ -10,6 +9,8 @@ from ragas.llms.base import BaseRagasLLM from ragas.run_config import RunConfig +from ..compat import SamplingParams, TopPSamplingStrategy + logger = logging.getLogger(__name__) diff --git a/tests/conftest.py b/tests/conftest.py index cfcbee1..90b1c83 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -3,10 +3,10 @@ import pytest from dotenv import load_dotenv -from llama_stack.apis.inference import SamplingParams, TopPSamplingStrategy from llama_stack_client import LlamaStackClient from ragas import EvaluationDataset +from llama_stack_provider_ragas.compat import SamplingParams, TopPSamplingStrategy from llama_stack_provider_ragas.config import ( KubeflowConfig, RagasProviderInlineConfig,