Skip to content

Commit 6003298

Browse files
committed
WIP fixing wrong imports on frontend
1 parent 4930994 commit 6003298

File tree

6 files changed

+96
-99
lines changed

6 files changed

+96
-99
lines changed

frontend/app/[locale]/(authenticated)/eval/[evaluationId]/edit/components/evaluation-edit-form-wizard.tsx

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ import { FormWizard, SubmitData } from "@/app/[locale]/components/form-wizard";
1616
import {
1717
evaluationsPatch,
1818
EvaluationUpdate,
19-
RagEvalBackendEvalEvaluationsModelsEvaluationResult,
19+
LlmEvalEvalEvaluationsModelsEvaluationResult,
2020
} from "@/app/client";
2121
import { EditOrigin } from "@/app/types/edit-origin";
2222
import { callApi } from "@/app/utils/backend-client/client";
@@ -32,7 +32,7 @@ const editSchema = z.object({
3232
const schemas = { default: editSchema };
3333

3434
export type EvaluationEditFormWizardProps = {
35-
evaluation: RagEvalBackendEvalEvaluationsModelsEvaluationResult;
35+
evaluation: LlmEvalEvalEvaluationsModelsEvaluationResult;
3636
origin?: EditOrigin;
3737
};
3838

frontend/app/[locale]/(authenticated)/eval/[evaluationId]/edit/page.test.tsx

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ import {
1212
evaluationsGet,
1313
evaluationsPatch,
1414
EvaluationStatus,
15-
RagEvalBackendEvalEvaluationsModelsEvaluationResult,
15+
LlmEvalEvalEvaluationsModelsEvaluationResult,
1616
} from "@/app/client";
1717
import {
1818
expectInputError,
@@ -29,7 +29,7 @@ vi.mock("@/app/client");
2929

3030
describe("Edit Evaluation Page", () => {
3131
const evaluationId = "123";
32-
const testEvaluation: RagEvalBackendEvalEvaluationsModelsEvaluationResult = {
32+
const testEvaluation: LlmEvalEvalEvaluationsModelsEvaluationResult = {
3333
id: evaluationId,
3434
name: "Evaluation Name",
3535
createdAt: "2022-01-01T00:00:00Z",

frontend/app/[locale]/(authenticated)/eval/[evaluationId]/results/[evaluationResultId]/components/evaluation-result-details.tsx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ import {
88
PropertyList,
99
PropertyListItem,
1010
} from "@/app/[locale]/components/property-list";
11-
import { RagEvalBackendEvalEvaluateResultsRouterEvaluationResult as EvaluationResult } from "@/app/client";
11+
import { LlmEvalEvalEvaluateResultsRouterEvaluationResult as EvaluationResult } from "@/app/client";
1212

1313
const TextValue = ({ children }: PropsWithChildren) => {
1414
return <div className="whitespace-pre-wrap overflow-x-auto">{children}</div>;

frontend/app/[locale]/(authenticated)/eval/[evaluationId]/results/[evaluationResultId]/page.test.tsx

Lines changed: 87 additions & 90 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@ import {
66
evaluationResultsGet,
77
evaluationsGet,
88
EvaluationStatus,
9-
RagEvalBackendEvalEvaluateResultsRouterEvaluationResult,
10-
RagEvalBackendEvalEvaluationsModelsEvaluationResult,
9+
LlmEvalEvalEvaluateResultsRouterEvaluationResult,
10+
LlmEvalEvalEvaluationsModelsEvaluationResult,
1111
TestCaseStatus,
1212
} from "@/app/client";
1313
import { expectValue } from "@/app/test-utils/details-page";
@@ -19,92 +19,89 @@ import Page from "./page";
1919
vi.mock("@/app/client");
2020

2121
describe("Evaluation Result Page", () => {
22-
const fullTestCase: RagEvalBackendEvalEvaluateResultsRouterEvaluationResult =
23-
{
24-
id: "tc1",
25-
actualOutput: "actual",
26-
expectedOutput: "expected",
27-
status: TestCaseStatus.SUCCESS,
28-
configurationId: "configId1",
29-
configurationName: "configName1",
30-
configurationVersion: "configVersion1",
31-
context: ["context"],
32-
input: "input",
33-
metaData: {},
34-
metricsData: [
35-
{
36-
id: "m1",
37-
name: "metric1",
38-
score: 0.5,
39-
error: null,
40-
evaluationModel: "evalModel1",
41-
reason: "reason1",
42-
success: true,
43-
threshold: 0.3,
44-
strictMode: true,
45-
},
46-
{
47-
id: "m2",
48-
name: "metric2",
49-
score: 0.6,
50-
error: "",
51-
evaluationModel: "",
52-
reason: "",
53-
success: false,
54-
threshold: 0.7,
55-
strictMode: false,
56-
},
57-
{
58-
id: "m3",
59-
name: "metric3",
60-
score: null,
61-
error: "error3",
62-
evaluationModel: null,
63-
reason: null,
64-
success: false,
65-
threshold: 0.7,
66-
strictMode: false,
67-
},
68-
],
69-
retrievalContext: ["retrievalContext"],
70-
error: null,
71-
};
72-
73-
const minimalTestCase: RagEvalBackendEvalEvaluateResultsRouterEvaluationResult =
74-
{
75-
id: "tc1",
76-
expectedOutput: "expected",
77-
actualOutput: null,
78-
status: TestCaseStatus.PENDING,
79-
configurationId: null,
80-
configurationName: null,
81-
configurationVersion: null,
82-
context: null,
83-
input: "input",
84-
metaData: null,
85-
metricsData: [],
86-
retrievalContext: null,
87-
error: null,
88-
};
89-
90-
const emptyTestCase: RagEvalBackendEvalEvaluateResultsRouterEvaluationResult =
91-
{
92-
id: "tc1",
93-
expectedOutput: "expected",
94-
actualOutput: "",
95-
status: TestCaseStatus.PENDING,
96-
configurationId: null,
97-
configurationName: null,
98-
configurationVersion: null,
99-
context: [],
100-
input: "input",
101-
metaData: {},
102-
metricsData: [],
103-
retrievalContext: [],
104-
error: null,
105-
};
106-
107-
const evaluation: RagEvalBackendEvalEvaluationsModelsEvaluationResult = {
22+
const fullTestCase: LlmEvalEvalEvaluateResultsRouterEvaluationResult = {
23+
id: "tc1",
24+
actualOutput: "actual",
25+
expectedOutput: "expected",
26+
status: TestCaseStatus.SUCCESS,
27+
configurationId: "configId1",
28+
configurationName: "configName1",
29+
configurationVersion: "configVersion1",
30+
context: ["context"],
31+
input: "input",
32+
metaData: {},
33+
metricsData: [
34+
{
35+
id: "m1",
36+
name: "metric1",
37+
score: 0.5,
38+
error: null,
39+
evaluationModel: "evalModel1",
40+
reason: "reason1",
41+
success: true,
42+
threshold: 0.3,
43+
strictMode: true,
44+
},
45+
{
46+
id: "m2",
47+
name: "metric2",
48+
score: 0.6,
49+
error: "",
50+
evaluationModel: "",
51+
reason: "",
52+
success: false,
53+
threshold: 0.7,
54+
strictMode: false,
55+
},
56+
{
57+
id: "m3",
58+
name: "metric3",
59+
score: null,
60+
error: "error3",
61+
evaluationModel: null,
62+
reason: null,
63+
success: false,
64+
threshold: 0.7,
65+
strictMode: false,
66+
},
67+
],
68+
retrievalContext: ["retrievalContext"],
69+
error: null,
70+
};
71+
72+
const minimalTestCase: LlmEvalEvalEvaluateResultsRouterEvaluationResult = {
73+
id: "tc1",
74+
expectedOutput: "expected",
75+
actualOutput: null,
76+
status: TestCaseStatus.PENDING,
77+
configurationId: null,
78+
configurationName: null,
79+
configurationVersion: null,
80+
context: null,
81+
input: "input",
82+
metaData: null,
83+
metricsData: [],
84+
retrievalContext: null,
85+
error: null,
86+
};
87+
88+
const emptyTestCase: LlmEvalEvalEvaluateResultsRouterEvaluationResult = {
89+
id: "tc1",
90+
expectedOutput: "expected",
91+
actualOutput: "",
92+
status: TestCaseStatus.PENDING,
93+
configurationId: null,
94+
configurationName: null,
95+
configurationVersion: null,
96+
context: [],
97+
input: "input",
98+
metaData: {},
99+
metricsData: [],
100+
retrievalContext: [],
101+
error: null,
102+
};
103+
104+
const evaluation: LlmEvalEvalEvaluationsModelsEvaluationResult = {
108105
id: "eval1",
109106
name: "evaluation",
110107
createdAt: "2021-01-01T00:00:00Z",
@@ -161,7 +158,7 @@ describe("Evaluation Result Page", () => {
161158
});
162159

163160
const expectTestCase = async (
164-
testCase: RagEvalBackendEvalEvaluateResultsRouterEvaluationResult,
161+
testCase: LlmEvalEvalEvaluateResultsRouterEvaluationResult,
165162
) => {
166163
await expectValue("EvaluationResultDetails.input", testCase.input);
167164
await expectValue(
@@ -213,7 +210,7 @@ describe("Evaluation Result Page", () => {
213210
};
214211

215212
const mockApi = (
216-
testCase: RagEvalBackendEvalEvaluateResultsRouterEvaluationResult,
213+
testCase: LlmEvalEvalEvaluateResultsRouterEvaluationResult,
217214
) => {
218215
vi.mocked(evaluationsGet).mockResolvedValue(
219216
successfulServiceResponse(evaluation),

frontend/app/[locale]/(authenticated)/eval/new/page.test.tsx

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ import {
1616
LlmEndpoint,
1717
llmEndpointsGet,
1818
llmEndpointsGetAll,
19+
LlmEvalEvalEvaluationsModelsEvaluationResult,
1920
Metric,
2021
MetricConfigurationRead,
2122
metricsGetAll,
@@ -25,7 +26,6 @@ import {
2526
qaCatalogGetAll,
2627
QaCatalogPreview,
2728
QaCatalogStatus,
28-
RagEvalBackendEvalEvaluationsModelsEvaluationResult,
2929
} from "@/app/client";
3030
import {
3131
expectComboBoxError,
@@ -278,7 +278,7 @@ describe("New Evaluation Page", () => {
278278
};
279279

280280
const mockEvaluationCreate = () => {
281-
const evaluation: RagEvalBackendEvalEvaluationsModelsEvaluationResult = {
281+
const evaluation: LlmEvalEvalEvaluationsModelsEvaluationResult = {
282282
id: "1",
283283
name: "Test Execution",
284284
createdAt: "2021-09-01T00:00:00Z",

frontend/app/[locale]/(authenticated)/qa-catalogs/generate/components/qa-catalog-generation-form-wizard.tsx

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,8 @@ import {
1717
ActiveQaCatalogGeneratorType,
1818
qaCatalogCreateDataSourceConfig,
1919
qaCatalogGenerate,
20+
QaCatalogGenerationConfig,
2021
QaCatalogGenerationData,
21-
RagasQaCatalogGeneratorConfig,
2222
} from "@/app/client";
2323
import { callApi } from "@/app/utils/backend-client/client";
2424
import { useRouter } from "@/i18n/routing";
@@ -109,7 +109,7 @@ export const QACatalogGeneratorFormWizard = ({
109109
generatorType: data.configuration.type,
110110
});
111111

112-
const config: RagasQaCatalogGeneratorConfig = {
112+
const config: QaCatalogGenerationConfig = {
113113
...data.configuration.config,
114114
type: data.configuration.type,
115115
knowledgeGraphLocation: null,

0 commit comments

Comments
 (0)