Skip to content

Commit 047d019

Browse files
authored
Athena: Add Artemis competencies (#276)
1 parent ad7fca6 commit 047d019

File tree

10 files changed

+98
-31
lines changed

10 files changed

+98
-31
lines changed

athena/athena/athena/endpoints.py

Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,14 @@
33
from fastapi import Depends, BackgroundTasks, Body
44
from pydantic import ConfigDict, BaseModel, ValidationError
55
from pydantic.alias_generators import to_camel
6-
from typing import TypeVar, Callable, List, Union, Any, Coroutine, Type, Optional
6+
from typing import TypeVar, Callable, List, Union, Any, Coroutine, Type
77

88
from athena.app import app
99
from athena.authenticate import authenticated
1010
from athena.metadata import with_meta
1111
from athena.module_config import get_dynamic_module_config_factory
1212
from athena.logger import logger
13-
from athena.schemas import Exercise, Submission, Feedback, LearnerProfile
13+
from athena.schemas import Exercise, Submission, Feedback, LearnerProfile, Competency
1414
from athena.storage import get_stored_submission_meta, get_stored_exercise_meta, get_stored_feedback_meta, \
1515
store_exercise, store_feedback, store_feedback_suggestions, store_submissions, get_stored_submissions
1616

@@ -273,6 +273,8 @@ def feedback_provider(func: Union[
273273
Callable[[E, S, G, C, LearnerProfile], Coroutine[Any, Any, List[F]]],
274274
Callable[[E, S, G, C, LearnerProfile, S], List[F]],
275275
Callable[[E, S, G, C, LearnerProfile, S], Coroutine[Any, Any, List[F]]],
276+
Callable[[E, S, G, C, LearnerProfile, S, List[Competency]], List[F]],
277+
Callable[[E, S, G, C, LearnerProfile, S, List[Competency]], Coroutine[Any, Any, List[F]]],
276278
]):
277279
"""
278280
Provide feedback to the Assessment Module Manager.
@@ -318,13 +320,22 @@ def feedback_provider(func: Union[
318320
>>> @feedback_provider
319321
... async def async_suggest_feedback_with_profile(exercise: Exercise, submission: Submission, module_config: Optional[dict], learner_profile: Optional[LearnerProfile], latest_submission: Optional[Submission]):
320322
... # suggest feedback here using module_config and learner_profile and return it as a list
323+
324+
With competencies (both synchronous and asynchronous forms):
325+
>>> @feedback_provider
326+
... def sync_suggest_feedback_with_profile(exercise: Exercise, submission: Submission, module_config: Optional[dict], learner_profile: Optional[LearnerProfile], latest_submission: Optional[Submission], competencies: Optional[List[Competency]]):
327+
... # suggest feedback here using module_config and learner_profile and return it as a list
328+
>>> @feedback_provider
329+
... async def async_suggest_feedback_with_profile(exercise: Exercise, submission: Submission, module_config: Optional[dict], learner_profile: Optional[LearnerProfile], latest_submission: Optional[Submission], competencies: Optional[List[Competency]]):
330+
... # suggest feedback here using module_config and learner_profile and return it as a list
321331
"""
322332
exercise_type = inspect.signature(func).parameters["exercise"].annotation
323333
submission_type = inspect.signature(func).parameters["submission"].annotation
324334
module_config_type = inspect.signature(func).parameters["module_config"].annotation if "module_config" in inspect.signature(func).parameters else None
325335
is_graded_type = inspect.signature(func).parameters["is_graded"].annotation if "is_graded" in inspect.signature(func).parameters else None
326336
learner_profile_type = inspect.signature(func).parameters["learner_profile"].annotation if "learner_profile" in inspect.signature(func).parameters else None
327337
latest_submission_type = inspect.signature(func).parameters["latest_submission"].annotation if "latest_submission" in inspect.signature(func).parameters else None
338+
competencies_type = inspect.signature(func).parameters["competencies"].annotation if "competencies" in inspect.signature(func).parameters else None
328339

329340
@app.post("/feedback_suggestions", responses=module_responses)
330341
@authenticated
@@ -335,6 +346,7 @@ async def wrapper(
335346
isGraded: is_graded_type = Body(True, alias="isGraded"),
336347
learner_profile: learner_profile_type = Body(None, alias="learnerProfile"),
337348
latest_submission: latest_submission_type = Body(None, alias="latestSubmission"),
349+
competencies: competencies_type = Body(None, alias="competencies"),
338350
module_config: module_config_type = Depends(get_dynamic_module_config_factory(module_config_type))):
339351

340352
# Retrieve existing metadata for the exercise, submission and feedback
@@ -358,13 +370,17 @@ async def wrapper(
358370
if "latest_submission" in inspect.signature(func).parameters:
359371
kwargs["latest_submission"] = latest_submission
360372

373+
if "competencies" in inspect.signature(func).parameters:
374+
kwargs["competencies"] = competencies
375+
361376
# Filter out unexpected kwargs and log warnings
362377
accepted_params = set(inspect.signature(func).parameters.keys())
363378
all_possible_kwargs = {
364379
"module_config": module_config,
365380
"is_graded": isGraded,
366381
"learner_profile": learner_profile,
367382
"latest_submission": latest_submission,
383+
"competencies": competencies,
368384
}
369385

370386
# Only pass accepted kwargs

athena/athena/athena/schemas/__init__.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,4 +15,6 @@
1515
from .modeling_submission import ModelingSubmission
1616
from .grading_criterion import GradingCriterion, StructuredGradingInstruction
1717
from .structured_grading_criterion import StructuredGradingCriterion
18-
from .learner_profile import LearnerProfile
18+
from .learner_profile import LearnerProfile
19+
from .competency import Competency
20+
from .competency_taxonomy import CompetencyTaxonomy
Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
from abc import ABC
2+
3+
from pydantic import Field, validator
4+
5+
from .schema import Schema
6+
from .competency_taxonomy import CompetencyTaxonomy
7+
8+
9+
class Competency(Schema, ABC):
10+
"""A competency that is required by the lecturer to be mastered by the student, enhanced with module-specific metadata."""
11+
id: int = Field(example=1)
12+
title: str = Field("", description="The title of the competency.", example="Competency 1")
13+
description: str = Field("", description="The description of the competency.", example="Competency 1 description")
14+
taxonomy: CompetencyTaxonomy = Field(None, description="The taxonomy of the competency.")
15+
16+
meta: dict = Field(default_factory=dict, example={"internal_id": "5"})
17+
18+
@validator('taxonomy', pre=True)
19+
def validate_taxonomy(cls, v):
20+
"""Validate and convert taxonomy to the correct format."""
21+
if isinstance(v, str):
22+
return CompetencyTaxonomy.from_any_case(v)
23+
return v
24+
25+
class Config:
26+
orm_mode = True
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
from enum import Enum
2+
3+
4+
class CompetencyTaxonomy(Enum):
5+
"""The taxonomy of competencies that is used to structure the competencies."""
6+
REMEMBER = "remember"
7+
UNDERSTAND = "understand"
8+
APPLY = "apply"
9+
ANALYZE = "analyze"
10+
EVALUATE = "evaluate"
11+
CREATE = "create"
12+
13+
@classmethod
14+
def from_any_case(cls, value):
15+
"""Convert any case to the correct enum value."""
16+
if isinstance(value, str):
17+
value_lower = value.lower()
18+
for member in cls:
19+
if member.value == value_lower:
20+
return member
21+
raise ValueError(f"Invalid taxonomy value: {value}")

athena/modules/text/module_text_llm/module_text_llm/__main__.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
)
1414
from athena.text import Exercise, Submission, Feedback
1515
from athena.logger import logger
16-
from athena.schemas import LearnerProfile
16+
from athena.schemas import LearnerProfile, Competency
1717

1818
from module_text_llm.config import Configuration
1919
from module_text_llm.evaluation import get_feedback_statistics, get_llm_statistics
@@ -61,6 +61,7 @@ async def suggest_feedback(
6161
is_graded: bool,
6262
learner_profile: Optional[LearnerProfile] = None,
6363
latest_submission: Optional[Submission] = None,
64+
competencies: Optional[List[Competency]] = None,
6465
) -> List[Feedback]:
6566
logger.info(
6667
"suggest_feedback: %s suggestions for submission %d of exercise %d were requested, with approach: %s",
@@ -77,6 +78,7 @@ async def suggest_feedback(
7778
is_graded=is_graded,
7879
learner_profile=learner_profile,
7980
latest_submission=latest_submission,
81+
competencies=competencies,
8082
)
8183

8284

athena/modules/text/module_text_llm/module_text_llm/approach_controller.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
from typing import Any, Dict, List, Optional
22
from athena.text import Exercise, Submission, Feedback
3-
from athena.schemas.learner_profile import LearnerProfile
3+
from athena.schemas import LearnerProfile, Competency
44
import inspect
55

66
from module_text_llm.registry import APPROACH_IMPLEMENTATIONS
@@ -15,6 +15,7 @@ async def generate_suggestions(
1515
is_graded: bool,
1616
learner_profile: Optional[LearnerProfile] = None,
1717
latest_submission: Optional[Submission] = None,
18+
competencies: Optional[List[Competency]] = None,
1819
) -> List[Feedback]:
1920
implementation_func = APPROACH_IMPLEMENTATIONS.get(config.type)
2021
if implementation_func is None:
@@ -36,6 +37,9 @@ async def generate_suggestions(
3637
if "latest_submission" in sig.parameters:
3738
kwargs["latest_submission"] = latest_submission
3839

40+
if "competencies" in sig.parameters:
41+
kwargs["competencies"] = competencies
42+
3943
return await implementation_func(
4044
exercise,
4145
submission,

athena/modules/text/module_text_llm/module_text_llm/default_approach/generate_suggestions.py

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from typing import List, Optional
22

33
from athena import emit_meta
4-
from athena.schemas import LearnerProfile
4+
from athena.schemas import LearnerProfile, Competency
55
from athena.text import Exercise, Submission, Feedback
66
from athena.logger import logger
77

@@ -44,6 +44,7 @@ def _prepare_analysis_prompt_input(
4444
exercise: Exercise,
4545
submission: Submission,
4646
latest_submission: Optional[Submission] = None,
47+
competencies: Optional[List[Competency]] = None,
4748
) -> dict:
4849
"""Prepare input data for the submission analysis prompt."""
4950
return {
@@ -57,6 +58,7 @@ def _prepare_analysis_prompt_input(
5758
"submission": add_sentence_numbers(submission.text),
5859
"previous_submission": add_sentence_numbers(
5960
latest_submission.text) if latest_submission is not None else "Previous submission is not available.",
61+
"competencies": competencies or [],
6062
}
6163

6264

@@ -243,7 +245,7 @@ async def _generate_graded_feedback(
243245

244246
result = await predict_and_parse(
245247
model=config.model,
246-
chat_prompt=chat_prompt,
248+
chat_prompt=chat_prompt,
247249
prompt_input=prompt_input,
248250
pydantic_object=AssessmentModel,
249251
tags=[
@@ -321,6 +323,7 @@ async def generate_suggestions(
321323
is_graded: bool,
322324
learner_profile: Optional[LearnerProfile],
323325
latest_submission: Optional[Submission] = None,
326+
competencies: Optional[List[Competency]] = None,
324327
) -> List[Feedback]:
325328
"""Generate feedback suggestions for a student submission using a two-step LLM approach."""
326329
if latest_submission is None:
@@ -337,11 +340,16 @@ async def generate_suggestions(
337340
return []
338341
return _convert_to_feedback_objects(result, exercise, submission, is_graded)
339342

343+
if competencies is None:
344+
logger.info("Competencies are not provided.")
345+
else:
346+
logger.info("Competencies are provided: %s", competencies)
347+
340348
# Setup learner profile with fallbacks
341349
learner_profile = _setup_learner_profile(learner_profile, config)
342350

343351
# Prepare input for submission analysis
344-
prompt_input = _prepare_analysis_prompt_input(exercise, submission, latest_submission)
352+
prompt_input = _prepare_analysis_prompt_input(exercise, submission, latest_submission, competencies)
345353

346354
# Analyze the submission
347355
submission_analysis = await _analyze_submission(

athena/modules/text/module_text_llm/module_text_llm/default_approach/prompts/submission_analysis_prompt.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,12 @@
55
You are an educational evaluator reviewing a student's progress on a text-based exercise.
66
77
You will:
8-
1. Analyze the problem statement, sample solution, and grading instructions to extract core competencies required to solve the task.
8+
0. Observe if the required competencies are set by the instructor. If they are, move to step 2 and skip the step 1:
9+
10+
Required Competencies:
11+
{competencies}
12+
13+
1. Only if the required competencies are not provided, then analyze the problem statement, sample solution, and grading instructions to extract core competencies required to solve the task.
914
- For each competency, define the expected cognitive level (e.g., Recall, Understand, Apply, Analyze, Evaluate, Create).
1015
- Reference the grading_instruction_id if relevant.
1116

athena/modules/text/module_text_llm/module_text_llm/default_approach/schemas/__init__.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,6 @@
11
from .feedback_models import FeedbackType, FeedbackModel, AssessmentModel
22
from .analysis_models import (
33
CompetencyStatus,
4-
CognitiveLevel,
5-
RequiredCompetency,
64
CompetencyChange,
75
EnhancedCompetencyEvaluation,
86
SubmissionAnalysis,
@@ -13,8 +11,6 @@
1311
"FeedbackModel",
1412
"AssessmentModel",
1513
"CompetencyStatus",
16-
"CognitiveLevel",
17-
"RequiredCompetency",
1814
"CompetencyChange",
1915
"EnhancedCompetencyEvaluation",
2016
"SubmissionAnalysis",

athena/modules/text/module_text_llm/module_text_llm/default_approach/schemas/analysis_models.py

Lines changed: 5 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,9 @@
1-
from pydantic import BaseModel, Field
21
from typing import List, Optional, Literal
32
from enum import Enum
43

4+
from athena.schemas import Competency
5+
from pydantic import BaseModel, Field
6+
57

68
class CompetencyStatus(str, Enum):
79
NOT_ATTEMPTED = "Not Attempted"
@@ -10,21 +12,6 @@ class CompetencyStatus(str, Enum):
1012
CORRECT = "Correct"
1113

1214

13-
class CognitiveLevel(str, Enum):
14-
RECALL = "Recall"
15-
UNDERSTAND = "Understand"
16-
APPLY = "Apply"
17-
ANALYZE = "Analyze"
18-
EVALUATE = "Evaluate"
19-
CREATE = "Create"
20-
21-
22-
class RequiredCompetency(BaseModel):
23-
description: str = Field(..., description="The skill or knowledge the student is expected to demonstrate.")
24-
cognitive_level: Optional[CognitiveLevel] = Field(None, description="Bloom's Taxonomy level")
25-
grading_instruction_id: Optional[int] = Field(None, description="Reference to grading instruction if applicable")
26-
27-
2815
class CompetencyChange(BaseModel):
2916
type: Literal["added", "removed", "modified", "unchanged"]
3017
is_positive: Optional[bool] = Field(None, description="Whether the change is positive (improvement)")
@@ -35,8 +22,8 @@ class CompetencyChange(BaseModel):
3522

3623

3724
class EnhancedCompetencyEvaluation(BaseModel):
38-
competency: RequiredCompetency
39-
status: CompetencyStatus
25+
competency: Competency = Field(..., description="The competency that was evaluated")
26+
status: CompetencyStatus = Field(..., description="The student's status for the competency")
4027
evidence: Optional[str] = Field(None, description="Quote or paraphrase from student's current submission")
4128
line_start: Optional[int] = Field(None, description="Start line number for evidence")
4229
line_end: Optional[int] = Field(None, description="End line number for evidence")

0 commit comments

Comments
 (0)