Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion backend/openedx_ai_extensions/api/v1/workflows/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def _handle_request(self, request, method):
action=action, course_id=course_id, user=user, context=context
)

result = workflow.execute(body_data.get("user_query", {}))
result = workflow.execute(body_data.get("user_input", {}))

# TODO: this should go through a serializer so that every UI actuator receives a compatible object
request_id = body_data.get("requestId", "no-request-id")
Expand Down
2 changes: 2 additions & 0 deletions backend/openedx_ai_extensions/processors/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
Processors module - handles data extraction and AI processing
"""

from .educator_assistant_processor import EducatorAssistantProcessor
from .llm_processor import LLMProcessor
from .openedx.openedx_processor import OpenEdXProcessor
from .openedx.submission_processor import SubmissionProcessor
Expand All @@ -12,4 +13,5 @@
"OpenEdXProcessor",
"ResponsesProcessor",
"SubmissionProcessor",
"EducatorAssistantProcessor"
]
100 changes: 100 additions & 0 deletions backend/openedx_ai_extensions/processors/content_libraries_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
"""
Utility functions and classes for content library operations.
"""
import logging
from uuid import uuid4

from django.db import transaction
from opaque_keys.edx.locator import LibraryLocatorV2

logger = logging.getLogger(__name__)


class ContentLibraryHelper:
"""
Helper class for content library operations
"""

def __init__(self, library_key: str, user) -> None:
self.library_key = LibraryLocatorV2.from_string(library_key)
self.user = user

def create_collection_and_add_items(self, items, title, description="") -> str:
"""Create a collection and add items to it."""
with transaction.atomic():
collection = self.create_collection(title=title, description=description)

opaque_keys = []
for problem_info in items:

block_data = {
"block_type": problem_info['category'],
"definition_id": str(uuid4()),
"can_stand_alone": True,
}

problem = self.create_block(block_data)
self.modify_block_olx(usage_key=problem.usage_key, data=problem_info['data'])

opaque_keys.append(problem.usage_key)

self.update_library_collection_items(
collection_key=collection.key,
item_keys=opaque_keys
)
return str(collection.key)

def create_block(self, data):
"""Create a library block."""
# pylint: disable=import-error, import-outside-toplevel
from openedx.core.djangoapps.content_libraries import api
from openedx.core.djangoapps.content_libraries.rest_api import serializers

serializer = serializers.LibraryXBlockCreationSerializer(data=data)
serializer.is_valid(raise_exception=True)

try:
result = api.create_library_block(self.library_key, user_id=self.user.id, **serializer.validated_data)
except api.IncompatibleTypesError as err:
logger.error(f"Error creating library block: {err}")

return result

def modify_block_olx(self, usage_key, data):
"""Modify the OLX of a library block."""
# pylint: disable=import-error, import-outside-toplevel
from openedx.core.djangoapps.content_libraries import api

api.set_library_block_olx(usage_key, data)

def create_collection(self, title, description="") -> None:
"""Create a collection in the library."""
# pylint: disable=import-error, import-outside-toplevel
from openedx.core.djangoapps.content_libraries import api, permissions

content_library = api.require_permission_for_library_key(
self.library_key, self.user, permissions.CAN_EDIT_THIS_CONTENT_LIBRARY
)

key = str(uuid4().hex[:10])

collection = api.create_library_collection(
library_key=content_library.library_key,
content_library=content_library,
collection_key=key,
title=title,
description=description,
created_by=self.user.id,
)
return collection

def update_library_collection_items(self, collection_key, item_keys) -> None:
# pylint: disable=import-error, import-outside-toplevel
from openedx.core.djangoapps.content_libraries import api

api.update_library_collection_items(
library_key=self.library_key,
collection_key=collection_key,
created_by=self.user.id,
opaque_keys=item_keys
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@
"""
LLM Processing using LiteLLM for multiple providers
"""

import json
import logging
import os

from django.conf import settings
from litellm import completion

from .content_libraries_utils import ContentLibraryHelper

logger = logging.getLogger(__name__)


class EducatorAssistantProcessor:
"""Handles AI/LLM processing operations"""

def __init__(self, config=None, user=None, context=None):
config = config or {}

class_name = self.__class__.__name__
self.config = config.get(class_name, {})
self.user = user
self.context = context

self.config_profile = self.config.get("config", "default")

# Extract API configuration once during initialization
self.api_key = settings.AI_EXTENSIONS[self.config_profile]['API_KEY']
self.model = settings.AI_EXTENSIONS[self.config_profile]['LITELLM_MODEL']

self.extra_params = {}
if hasattr(settings.AI_EXTENSIONS[self.config_profile], 'TIMEOUT'):
self.extra_params['timeout'] = settings.AI_EXTENSIONS[self.config_profile]['TIMEOUT']
if hasattr(settings.AI_EXTENSIONS[self.config_profile], 'TEMPERATURE'):
self.extra_params['temperature'] = settings.AI_EXTENSIONS[self.config_profile]['TEMPERATURE']
if hasattr(settings.AI_EXTENSIONS[self.config_profile], 'MAX_TOKENS'):
self.extra_params['max_tokens'] = settings.AI_EXTENSIONS[self.config_profile]['MAX_TOKENS']

if not self.api_key:
logger.error("AI API key not configured")

def process(self, input_data):
"""Process based on configured function"""
function_name = self.config.get("function")
function = getattr(self, function_name)
return function(input_data)

def _call_completion_api(self, system_role):
"""
General method to call LiteLLM completion API
Handles configuration and returns standardized response
"""
try:
if not self.api_key:
return {"error": "AI API key not configured"}

# Build completion parameters
completion_params = {
"model": self.model,
"messages": [
{"role": "system", "content": system_role},
],
"api_key": self.api_key,
}

# Add optional parameters only if configured
if self.extra_params:
completion_params.update(self.extra_params)

response = completion(**completion_params)
content = response.choices[0].message.content

return {
"response": content,
"tokens_used": response.usage.total_tokens if response.usage else 0,
"model_used": self.model,
"status": "success",
}

except Exception as e: # pylint: disable=broad-exception-caught
logger.error(f"Error calling LiteLLM: {e}")
return {"error": f"AI processing failed: {str(e)}"}

def generate_quiz_questions(self, input_data):
"""Generate quiz questions based on the content provided"""
lib_key_str = input_data.get('library_id')
requested_questions = input_data.get('num_questions')
extra_instructions = input_data.get('extra_instructions')

prompt_file_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'prompts',
'default_generate_quiz_questions.txt'
)
try:
with open(prompt_file_path, "r") as f:
prompt = f.read()
except Exception as e: # pylint: disable=broad-exception-caught
logger.error(f"Error loading prompt template: {e}")
return {"error": "Failed to load prompt template."}

if '{{NUM_QUESTIONS}}' in prompt:
prompt = prompt.replace("{{NUM_QUESTIONS}}", str(requested_questions))
if '{{CONTEXT}}' in prompt:
prompt = prompt.replace("{{CONTEXT}}", str(self.context))
if '{{EXTRA_INSTRUCTIONS}}' in prompt:
prompt = prompt.replace("{{EXTRA_INSTRUCTIONS}}", extra_instructions or "")

result = self._call_completion_api(prompt)
tokens_used = result.get("tokens_used", 0)

library_helper = ContentLibraryHelper(library_key=lib_key_str, user=self.user)

# if response is not json serializable, try 3 times to fix it
response = []
for attempt in range(3):
try:
response = json.loads(result['response'])
break
except json.JSONDecodeError:
result = self._call_completion_api(prompt)
tokens_used += result.get("tokens_used", 0)
if attempt == 2:
return {
"error": "Failed to parse AI response as JSON after multiple attempts.",
"tokens_used": tokens_used,
"model_used": self.model,
}

collection_key = library_helper.create_collection_and_add_items(
title=response.get("collection", "AI Generated Questions"),
description="AI-generated quiz questions",
items=response["items"]
)

return {
"response": f"authoring/library/{lib_key_str}/collection/{collection_key}",
"tokens_used": tokens_used,
"model_used": self.model,
"status": "success",
}
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,14 @@ def __init__(self, config=None, user_session=None):
# Extract API configuration once during initialization
self.api_key = settings.AI_EXTENSIONS[self.config_profile]["API_KEY"]
self.model = settings.AI_EXTENSIONS[self.config_profile]["LITELLM_MODEL"]
self.timeout = settings.AI_EXTENSIONS[self.config_profile]["TIMEOUT"]
self.temperature = settings.AI_EXTENSIONS[self.config_profile]["TEMPERATURE"]
self.max_tokens = settings.AI_EXTENSIONS[self.config_profile]["MAX_TOKENS"]

self.extra_params = {}
if "TIMEOUT" in settings.AI_EXTENSIONS[self.config_profile]:
self.extra_params["timeout"] = settings.AI_EXTENSIONS[self.config_profile]["TIMEOUT"]
if "TEMPERATURE" in settings.AI_EXTENSIONS[self.config_profile]:
self.extra_params["temperature"] = settings.AI_EXTENSIONS[self.config_profile]["TEMPERATURE"]
if "MAX_TOKENS" in settings.AI_EXTENSIONS[self.config_profile]:
self.extra_params["max_tokens"] = settings.AI_EXTENSIONS[self.config_profile]["MAX_TOKENS"]

if not self.api_key:
logger.error("AI API key not configured")
Expand Down
7 changes: 1 addition & 6 deletions backend/openedx_ai_extensions/processors/llm_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,12 +43,7 @@ def _call_completion_api(self, system_role, user_content):
}

# Add optional parameters only if configured
if self.temperature is not None:
completion_params["temperature"] = self.temperature
if self.max_tokens is not None:
completion_params["max_tokens"] = self.max_tokens
if self.timeout is not None:
completion_params["timeout"] = self.timeout
completion_params.update(self.extra_params)

response = completion(**completion_params)
content = response.choices[0].message.content
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,10 +68,7 @@ def _build_completion_params(self, system_role=None, context=None, user_query=No
]

# Add optional parameters only if configured
if self.temperature is not None:
completion_params["temperature"] = self.temperature
if self.max_tokens is not None:
completion_params["max_tokens"] = self.max_tokens
completion_params.update(self.extra_params)

return completion_params

Expand Down
Loading
Loading