From cd57d8d062f881619467b3d54dfcf282b63dfe97 Mon Sep 17 00:00:00 2001 From: Matthew Lenhard Date: Fri, 16 May 2025 14:19:50 -0400 Subject: [PATCH] feat - tests + evals --- README.md | 9 +++++++++ package.json | 5 +++-- src/evals/evals.ts | 42 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 54 insertions(+), 2 deletions(-) create mode 100644 src/evals/evals.ts diff --git a/README.md b/README.md index 757c4c1..eeec948 100644 --- a/README.md +++ b/README.md @@ -103,6 +103,15 @@ Optionally, you can add the following to a file called `.vscode/mcp.json` in you } ``` + + +## Running evals + +The evals package loads an mcp client that then runs the index.ts file, so there is no need to rebuild between tests. You can load environment variables by prefixing the npx command. Full documentation can be found [here](https://www.mcpevals.io/docs). + +```bash +OPENAI_API_KEY=your-key npx mcp-eval src/evals/evals.ts src/index.ts +``` ## Configuration on Claude/Windsurf/Cursor/Cline ```json diff --git a/package.json b/package.json index c669b36..26c811d 100644 --- a/package.json +++ b/package.json @@ -27,7 +27,8 @@ "@vectorize-io/vectorize-client": "^0.1.3", "dotenv": "^16.4.7", "p-queue": "^8.0.1", - "shx": "^0.3.4" + "shx": "^0.3.4", + "mcp-evals": "^1.0.18" }, "devDependencies": { "@types/jest": "^29.5.14", @@ -58,4 +59,4 @@ "url": "https://github.com/vectorize-io/vectorize-mcp-server/issues" }, "homepage": "https://github.com/vectorize-io/vectorize-mcp-server#readme" -} +} \ No newline at end of file diff --git a/src/evals/evals.ts b/src/evals/evals.ts new file mode 100644 index 0000000..74a4c6a --- /dev/null +++ b/src/evals/evals.ts @@ -0,0 +1,42 @@ +//evals.ts + +import { EvalConfig } from 'mcp-evals'; +import { openai } from "@ai-sdk/openai"; +import { grade, EvalFunction } from "mcp-evals"; + +const retrieveEval: EvalFunction = { + name: 'retrieveEval', + description: 'Evaluates retrieving documents from the pipeline', + run: async () => { + const result = await grade(openai("gpt-4"), "Retrieve 4 documents about best practices for large language models."); + return JSON.parse(result); + } +}; + +const deepResearchEval: EvalFunction = { + name: 'Deep Research Tool Evaluation', + description: 'Evaluates the functionality of the deep research tool', + run: async () => { + const result = await grade(openai("gpt-4"), "Perform a deep research on the impact of microplastics in oceans and provide references. Use web search if necessary."); + return JSON.parse(result); + } +}; + +const extractEval: EvalFunction = { + name: 'extract tool evaluation', + description: 'Evaluates the text extraction and chunking functionality of the extract tool', + run: async () => { + const prompt = "Please extract the text from the following base64-encoded document: dGVzdCBkYXRh. The content type is application/pdf. Provide the extracted text."; + const result = await grade(openai("gpt-4"), prompt); + return JSON.parse(result); + } +}; + +const config: EvalConfig = { + model: openai("gpt-4"), + evals: [retrieveEval, deepResearchEval, extractEval] +}; + +export default config; + +export const evals = [retrieveEval, deepResearchEval, extractEval]; \ No newline at end of file