
학습할 내용:
- 지식 베이스 구축
- 관련 문서를 찾는 검색 step이 포함된 RAG 애플리케이션 만들기
- Weave로 검색 step 추적
- 컨텍스트 정밀도를 측정하기 위해 LLM 기반 평가자를 사용해 RAG 애플리케이션 평가
- 맞춤형 점수 함수 정의
사전 요구 사항
- W&B 계정
- Python 3.8+ 또는 Node.js 18+
- 필수 패키지가 설치되어 있어야 합니다:
- Python:
pip install weave openai - TypeScript:
npm install weave openai
- Python:
- OpenAI API 키를 환경 변수로 설정해야 합니다
지식 베이스 구축하기
- Python
- TypeScript
from openai import OpenAI
import weave
from weave import Model
import numpy as np
import json
import asyncio
articles = [
"Novo Nordisk and Eli Lilly rival soars 32 percent after promising weight loss drug results Shares of Denmarks Zealand Pharma shot 32 percent higher in morning trade, after results showed success in its liver disease treatment survodutide, which is also on trial as a drug to treat obesity. The trial “tells us that the 6mg dose is safe, which is the top dose used in the ongoing [Phase 3] obesity trial too,” one analyst said in a note. The results come amid feverish investor interest in drugs that can be used for weight loss.",
"Berkshire shares jump after big profit gain as Buffetts conglomerate nears $1 trillion valuation Berkshire Hathaway shares rose on Monday after Warren Buffetts conglomerate posted strong earnings for the fourth quarter over the weekend. Berkshires Class A and B shares jumped more than 1.5%, each. Class A shares are higher by more than 17% this year, while Class B has gained more than 18%. Berkshire was last valued at $930.1 billion, up from $905.5 billion where it closed on Friday, according to FactSet. Berkshire on Saturday posted fourth-quarter operating earnings of $8.481 billion, about 28 percent higher than the $6.625 billion from the year-ago period, driven by big gains in its insurance business. Operating earnings refers to profits from businesses across insurance, railroads and utilities. Meanwhile, Berkshires cash levels also swelled to record levels. The conglomerate held $167.6 billion in cash in the fourth quarter, surpassing the $157.2 billion record the conglomerate held in the prior quarter.",
"Highmark Health says its combining tech from Google and Epic to give doctors easier access to information Highmark Health announced it is integrating technology from Google Cloud and the health-care software company Epic Systems. The integration aims to make it easier for both payers and providers to access key information they need, even if its stored across multiple points and formats, the company said. Highmark is the parent company of a health plan with 7 million members, a provider network of 14 hospitals and other entities",
"Rivian and Lucid shares plunge after weak EV earnings reports Shares of electric vehicle makers Rivian and Lucid fell Thursday after the companies reported stagnant production in their fourth-quarter earnings after the bell Wednesday. Rivian shares sank about 25 percent, and Lucids stock dropped around 17 percent. Rivian forecast it will make 57,000 vehicles in 2024, slightly less than the 57,232 vehicles it produced in 2023. Lucid said it expects to make 9,000 vehicles in 2024, more than the 8,428 vehicles it made in 2023.",
"Mauritius blocks Norwegian cruise ship over fears of a potential cholera outbreak Local authorities on Sunday denied permission for the Norwegian Dawn ship, which has 2,184 passengers and 1,026 crew on board, to access the Mauritius capital of Port Louis, citing “potential health risks.” The Mauritius Ports Authority said Sunday that samples were taken from at least 15 passengers on board the cruise ship. A spokesperson for the U.S.-headquartered Norwegian Cruise Line Holdings said Sunday that 'a small number of guests experienced mild symptoms of a stomach-related illness' during Norwegian Dawns South Africa voyage.",
"Intuitive Machines lands on the moon in historic first for a U.S. company Intuitive Machines Nova-C cargo lander, named Odysseus after the mythological Greek hero, is the first U.S. spacecraft to soft land on the lunar surface since 1972. Intuitive Machines is the first company to pull off a moon landing — government agencies have carried out all previously successful missions. The company's stock surged in extended trading Thursday, after falling 11 percent in regular trading.",
"Lunar landing photos: Intuitive Machines Odysseus sends back first images from the moon Intuitive Machines cargo moon lander Odysseus returned its first images from the surface. Company executives believe the lander caught its landing gear sideways on the moon's surface while touching down and tipped over. Despite resting on its side, the company's historic IM-1 mission is still operating on the moon.",
]
def docs_to_embeddings(docs: list) -> list:
openai = OpenAI()
document_embeddings = []
for doc in docs:
response = (
openai.embeddings.create(input=doc, model="text-embedding-3-small")
.data[0]
.embedding
)
document_embeddings.append(response)
return document_embeddings
article_embeddings = docs_to_embeddings(articles) # 참고: 일반적으로 이 작업은 기사에 대해 한 번만 수행하고 임베딩 및 메타데이터를 데이터베이스에 저장합니다
require('dotenv').config();
import { OpenAI } from 'openai';
import * as weave from 'weave';
interface Article {
text: string;
embedding?: number[];
}
const articles: Article[] = [
{
text: `Novo Nordisk and Eli Lilly rival soars 32 percent after promising weight loss drug results Shares of Denmarks Zealand Pharma shot 32 percent higher in morning trade, after results showed success in its liver disease treatment survodutide, which is also on trial as a drug to treat obesity. The trial tells us that the 6mg dose is safe, which is the top dose used in the ongoing [Phase 3] obesity trial too, one analyst said in a note. The results come amid feverish investor interest in drugs that can be used for weight loss.`
},
{
text: `Berkshire shares jump after big profit gain as Buffetts conglomerate nears $1 trillion valuation Berkshire Hathaway shares rose on Monday after Warren Buffetts conglomerate posted strong earnings for the fourth quarter over the weekend. Berkshires Class A and B shares jumped more than 1.5%, each. Class A shares are higher by more than 17% this year, while Class B has gained more than 18%. Berkshire was last valued at $930.1 billion, up from $905.5 billion where it closed on Friday, according to FactSet. Berkshire on Saturday posted fourth-quarter operating earnings of $8.481 billion, about 28 percent higher than the $6.625 billion from the year-ago period, driven by big gains in its insurance business. Operating earnings refers to profits from businesses across insurance, railroads and utilities. Meanwhile, Berkshires cash levels also swelled to record levels. The conglomerate held $167.6 billion in cash in the fourth quarter, surpassing the $157.2 billion record the conglomerate held in the prior quarter.`
},
{
text: `Highmark Health says its combining tech from Google and Epic to give doctors easier access to information Highmark Health announced it is integrating technology from Google Cloud and the health-care software company Epic Systems. The integration aims to make it easier for both payers and providers to access key information they need, even if its stored across multiple points and formats, the company said. Highmark is the parent company of a health plan with 7 million members, a provider network of 14 hospitals and other entities`
}
];
function cosineSimilarity(a: number[], b: number[]): number {
const dotProduct = a.reduce((sum, val, i) => sum + val * b[i], 0);
const magnitudeA = Math.sqrt(a.reduce((sum, val) => sum + val * val, 0));
const magnitudeB = Math.sqrt(b.reduce((sum, val) => sum + val * val, 0));
return dotProduct / (magnitudeA * magnitudeB);
}
const docsToEmbeddings = weave.op(async function(docs: Article[]): Promise<Article[]> {
const openai = new OpenAI();
const enrichedDocs = await Promise.all(docs.map(async (doc) => {
const response = await openai.embeddings.create({
input: doc.text,
model: "text-embedding-3-small"
});
return {
...doc,
embedding: response.data[0].embedding
};
}));
return enrichedDocs;
});
RAG 앱 만들기
get_most_relevant_document를 weave.op() 데코레이터로 래핑하고 Model 클래스를 만듭니다. weave.init('<team-name>/rag-quickstart')를 호출해 나중에 확인할 수 있도록 함수의 모든 inputs와 outputs 추적을 시작합니다. 팀 이름을 지정하지 않으면 출력은 W&B 기본 team 또는 entity에 기록됩니다.
- Python
- TypeScript
from openai import OpenAI
import weave
from weave import Model
import numpy as np
import asyncio
@weave.op()
def get_most_relevant_document(query):
openai = OpenAI()
query_embedding = (
openai.embeddings.create(input=query, model="text-embedding-3-small")
.data[0]
.embedding
)
similarities = [
np.dot(query_embedding, doc_emb)
/ (np.linalg.norm(query_embedding) * np.linalg.norm(doc_emb))
for doc_emb in article_embeddings
]
# 가장 유사한 문서의 인덱스 조회
most_relevant_doc_index = np.argmax(similarities)
return articles[most_relevant_doc_index]
class RAGModel(Model):
system_message: str
model_name: str = "gpt-3.5-turbo-1106"
@weave.op()
def predict(self, question: str) -> dict: # 참고: `question`은 나중에 evaluation 행에서 데이터를 선택하는 데 사용됩니다
from openai import OpenAI
context = get_most_relevant_document(question)
client = OpenAI()
query = f"""Use the following information to answer the subsequent question. If the answer cannot be found, write "I don't know."
Context:
\"\"\"
{context}
\"\"\"
Question: {question}"""
response = client.chat.completions.create(
model=self.model_name,
messages=[
{"role": "system", "content": self.system_message},
{"role": "user", "content": query},
],
temperature=0.0,
response_format={"type": "text"},
)
answer = response.choices[0].message.content
return {'answer': answer, 'context': context}
# team 및 프로젝트 이름 설정
weave.init('<team-name>/rag-quickstart')
model = RAGModel(
system_message="You are an expert in finance and answer questions related to finance, financial services, and financial markets. When responding based on provided information, be sure to cite the source."
)
model.predict("What significant result was reported about Zealand Pharma's obesity trial?")
class RAGModel {
private openai: OpenAI;
private systemMessage: string;
private modelName: string;
private articleEmbeddings: Article[];
constructor(config: {
systemMessage: string;
modelName?: string;
articleEmbeddings: Article[];
}) {
this.openai = new OpenAI();
this.systemMessage = config.systemMessage;
this.modelName = config.modelName || "gpt-3.5-turbo-1106";
this.articleEmbeddings = config.articleEmbeddings;
this.predict = weave.op(this, this.predict);
}
async predict(question: string): Promise<{
answer: string;
context: string;
}> {
const context = await this.getMostRelevantDocument(question);
const response = await this.openai.chat.completions.create({
model: this.modelName,
messages: [
{ role: "system", content: this.systemMessage },
{ role: "user", content: `Use the following information to answer the subsequent question. If the answer cannot be found, write "I don't know."
Context:
"""
${context}
"""
Question: ${question}` }
],
temperature: 0
});
return {
answer: response.choices[0].message.content || "",
context
};
}
}
LLM Judge로 평가하기
점수 함수 정의하기
question은 행 딕셔너리에서 가져옵니다. output은 모델의 출력입니다. 모델 입력도 입력 매개변수에 따라 예시에서 가져오므로, 여기서 역시 question이 사용됩니다. 이 예제는 async 함수를 사용하므로 병렬로 빠르게 실행됩니다. async에 대한 간단한 소개가 필요하면 여기를 참고하세요.
- Python
- TypeScript
from openai import OpenAI
import weave
import asyncio
@weave.op()
async def context_precision_score(question, output):
context_precision_prompt = """Given question, answer and context verify if the context was useful in arriving at the given answer. Give verdict as "1" if useful and "0" if not with json output.
Output in only valid JSON format.
question: {question}
context: {context}
answer: {answer}
verdict: """
client = OpenAI()
prompt = context_precision_prompt.format(
question=question,
context=output['context'],
answer=output['answer'],
)
response = client.chat.completions.create(
model="gpt-4-turbo-preview",
messages=[{"role": "user", "content": prompt}],
response_format={ "type": "json_object" }
)
response_message = response.choices[0].message
response = json.loads(response_message.content)
return {
"verdict": int(response["verdict"]) == 1,
}
questions = [
{"question": "What significant result was reported about Zealand Pharma's obesity trial?"},
{"question": "How much did Berkshire Hathaway's cash levels increase in the fourth quarter?"},
{"question": "What is the goal of Highmark Health's integration of Google Cloud and Epic Systems technology?"},
{"question": "What were Rivian and Lucid's vehicle production forecasts for 2024?"},
{"question": "Why was the Norwegian Dawn cruise ship denied access to Mauritius?"},
{"question": "Which company achieved the first U.S. moon landing since 1972?"},
{"question": "What issue did Intuitive Machines' lunar lander encounter upon landing on the moon?"}
]
evaluation = weave.Evaluation(dataset=questions, scorers=[context_precision_score])
asyncio.run(evaluation.evaluate(model)) # 참고: 평가할 모델을 정의해야 합니다
const contextPrecisionScore = weave.op(async function(args: {
datasetRow: QuestionRow;
modelOutput: { answer: string; context: string; }
}): Promise<ScorerResult> {
const openai = new OpenAI();
const prompt = `Given question, answer and context verify if the context was useful...`;
const response = await openai.chat.completions.create({
model: "gpt-4-turbo-preview",
messages: [{ role: "user", content: prompt }],
response_format: { type: "json_object" }
});
const result = JSON.parse(response.choices[0].message.content || "{}");
return {
verdict: parseInt(result.verdict) === 1
};
});
const evaluation = new weave.Evaluation({
dataset: createQuestionDataset(),
scorers: [contextPrecisionScore]
});
await evaluation.evaluate({
model: weave.op((args: { datasetRow: QuestionRow }) =>
model.predict(args.datasetRow.question)
)
});
선택 사항: Scorer 클래스 정의하기
LLMJudge 클래스를 만들어야 할 수 있습니다. Weave는 바로 사용할 수 있는 Scorer 클래스 목록을 제공하며, 맞춤형 Scorer를 쉽게 만들 수 있도록 지원합니다. 다음 예제는 맞춤형 class CorrectnessLLMJudge(Scorer)를 만드는 방법을 보여줍니다.
개략적으로 보면 맞춤형 Scorer를 만드는 단계는 매우 단순합니다:
weave.flow.scorer.Scorer를 상속하는 맞춤형 클래스를 정의합니다score함수를 재정의하고, 함수의 각 call을 추적하려면@weave.op()을 추가합니다- 이 함수는 모델의 예측 결과가 전달될
output인수를 정의해야 합니다. 모델이 “None”을 반환할 수 있다면 유형을Optional[dict]로 정의하세요. - 나머지 인수는 일반
Any또는dict일 수도 있고,weave.Evaluate클래스를 사용해 모델을 평가할 때 사용하는 데이터셋에서 특정 column을 선택할 수도 있습니다. 이 인수들의 이름은 column 이름과 정확히 같아야 하며,preprocess_model_input을 사용하는 경우에는 그 처리를 거친 뒤 단일 행의 키 이름과 정확히 일치해야 합니다.
- 이 함수는 모델의 예측 결과가 전달될
- 선택 사항: 집계 점수 계산을 사용자 지정하려면
summarize함수를 재정의합니다. 기본적으로 맞춤형 함수를 정의하지 않으면 Weave는weave.flow.scorer.auto_summarize함수를 사용합니다.- 이 함수에는
@weave.op()decorator가 있어야 합니다.
- 이 함수에는
- Python
- TypeScript
from weave import Scorer
class CorrectnessLLMJudge(Scorer):
prompt: str
model_name: str
device: str
@weave.op()
async def score(self, output: Optional[dict], query: str, answer: str) -> Any:
"""pred, query, target를 비교해 예측의 정확성을 점수화합니다.
Args:
- output: 평가 대상 모델이 제공하는 dict
- query: 데이터셋에 정의된 질문
- answer: 데이터셋에 정의된 정답
Returns:
- 단일 dict {메트릭 이름: 단일 평가값}"""
# get_model is defined as general model getter based on provided params (OpenAI,HF...)
eval_model = get_model(
model_name = self.model_name,
prompt = self.prompt
device = self.device,
)
# 평가 속도를 높이기 위한 async evaluation - 반드시 async일 필요는 없습니다
grade = await eval_model.async_predict(
{
"query": query,
"answer": answer,
"result": output.get("result"),
}
)
# output parsing - pydantic을 사용하면 더 견고하게 처리할 수 있습니다
evaluation = "incorrect" not in grade["text"].strip().lower()
# Weave에 표시되는 column name
return {"correct": evaluation}
@weave.op()
def summarize(self, score_rows: list) -> Optional[dict]:
"""점수 함수가 각 행에 대해 계산한 모든 점수를 집계합니다.
Args:
- score_rows: dict의 목록. 각 dict에는 메트릭과 점수가 있습니다
Returns:
- 입력과 동일한 구조의 중첩 dict"""
# 아무것도 제공하지 않으면 weave.flow.scorer.auto_summarize 함수가 사용됩니다
# return auto_summarize(score_rows)
valid_data = [x.get("correct") for x in score_rows if x.get("correct") is not None]
count_true = list(valid_data).count(True)
int_data = [int(x) for x in valid_data]
sample_mean = np.mean(int_data) if int_data else 0
sample_variance = np.var(int_data) if int_data else 0
sample_error = np.sqrt(sample_variance / len(int_data)) if int_data else 0
# 추가 "correct" 레이어는 필수는 아니지만 UI에서 구조를 더 명확하게 보여줍니다
return {
"correct": {
"true_count": count_true,
"true_fraction": sample_mean,
"stderr": sample_error,
}
}
이 기능은 아직 TypeScript에서 사용할 수 없습니다.
Evaluation의 scorers 인수에 다음과 같이 전달합니다:
- Python
- TypeScript
evaluation = weave.Evaluation(dataset=questions, scorers=[CorrectnessLLMJudge()])
이 기능은 아직 TypeScript에서 사용할 수 없습니다.
전체 흐름 정리
- LLM call과 검색 step 함수를
weave.op()으로 감싸기 - (선택)
predict함수와 앱 정보를 포함한Model하위 클래스 만들기 - 평가할 예제 수집하기
- 단일 예제를 채점하는
점수 함수만들기 Evaluation클래스를 사용해 예제에 대한 평가 실행하기
Evaluation이 비동기로 실행되면서 OpenAI, Anthropic 등의 모델에서 요청 속도 제한이 트리거될 수 있습니다. 이를 방지하려면 병렬 worker 수를 제한하는 환경 변수를 설정하면 됩니다. 예: WEAVE_PARALLELISM=3.
전체 코드는 다음과 같습니다.
- Python
- TypeScript
from openai import OpenAI
import weave
from weave import Model
import numpy as np
import json
import asyncio
# 평가에 사용할 예시
articles = [
"Novo Nordisk and Eli Lilly rival soars 32 percent after promising weight loss drug results Shares of Denmarks Zealand Pharma shot 32 percent higher in morning trade, after results showed success in its liver disease treatment survodutide, which is also on trial as a drug to treat obesity. The trial “tells us that the 6mg dose is safe, which is the top dose used in the ongoing [Phase 3] obesity trial too,” one analyst said in a note. The results come amid feverish investor interest in drugs that can be used for weight loss.",
"Berkshire shares jump after big profit gain as Buffetts conglomerate nears $1 trillion valuation Berkshire Hathaway shares rose on Monday after Warren Buffetts conglomerate posted strong earnings for the fourth quarter over the weekend. Berkshires Class A and B shares jumped more than 1.5%, each. Class A shares are higher by more than 17% this year, while Class B has gained more than 18%. Berkshire was last valued at $930.1 billion, up from $905.5 billion where it closed on Friday, according to FactSet. Berkshire on Saturday posted fourth-quarter operating earnings of $8.481 billion, about 28 percent higher than the $6.625 billion from the year-ago period, driven by big gains in its insurance business. Operating earnings refers to profits from businesses across insurance, railroads and utilities. Meanwhile, Berkshires cash levels also swelled to record levels. The conglomerate held $167.6 billion in cash in the fourth quarter, surpassing the $157.2 billion record the conglomerate held in the prior quarter.",
"Highmark Health says its combining tech from Google and Epic to give doctors easier access to information Highmark Health announced it is integrating technology from Google Cloud and the health-care software company Epic Systems. The integration aims to make it easier for both payers and providers to access key information they need, even if it's stored across multiple points and formats, the company said. Highmark is the parent company of a health plan with 7 million members, a provider network of 14 hospitals and other entities",
"Rivian and Lucid shares plunge after weak EV earnings reports Shares of electric vehicle makers Rivian and Lucid fell Thursday after the companies reported stagnant production in their fourth-quarter earnings after the bell Wednesday. Rivian shares sank about 25 percent, and Lucids stock dropped around 17 percent. Rivian forecast it will make 57,000 vehicles in 2024, slightly less than the 57,232 vehicles it produced in 2023. Lucid said it expects to make 9,000 vehicles in 2024, more than the 8,428 vehicles it made in 2023.",
"Mauritius blocks Norwegian cruise ship over fears of a potential cholera outbreak Local authorities on Sunday denied permission for the Norwegian Dawn ship, which has 2,184 passengers and 1,026 crew on board, to access the Mauritius capital of Port Louis, citing “potential health risks.” The Mauritius Ports Authority said Sunday that samples were taken from at least 15 passengers on board the cruise ship. A spokesperson for the U.S.-headquartered Norwegian Cruise Line Holdings said Sunday that 'a small number of guests experienced mild symptoms of a stomach-related illness' during Norwegian Dawns South Africa voyage.",
"Intuitive Machines lands on the moon in historic first for a U.S. company Intuitive Machines Nova-C cargo lander, named Odysseus after the mythological Greek hero, is the first U.S. spacecraft to soft land on the lunar surface since 1972. Intuitive Machines is the first company to pull off a moon landing — government agencies have carried out all previously successful missions. The company's stock surged in extended trading Thursday, after falling 11 percent in regular trading.",
"Lunar landing photos: Intuitive Machines Odysseus sends back first images from the moon Intuitive Machines cargo moon lander Odysseus returned its first images from the surface. Company executives believe the lander caught its landing gear sideways on the surface of the moon while touching down and tipped over. Despite resting on its side, the company's historic IM-1 mission is still operating on the moon.",
]
def docs_to_embeddings(docs: list) -> list:
openai = OpenAI()
document_embeddings = []
for doc in docs:
response = (
openai.embeddings.create(input=doc, model="text-embedding-3-small")
.data[0]
.embedding
)
document_embeddings.append(response)
return document_embeddings
article_embeddings = docs_to_embeddings(articles) # 참고: 일반적으로 이 작업은 아티클에 대해 한 번만 수행하고, 임베딩과 메타데이터를 데이터베이스에 저장합니다
# 검색 단계에 데코레이터 추가
@weave.op()
def get_most_relevant_document(query):
openai = OpenAI()
query_embedding = (
openai.embeddings.create(input=query, model="text-embedding-3-small")
.data[0]
.embedding
)
similarities = [
np.dot(query_embedding, doc_emb)
/ (np.linalg.norm(query_embedding) * np.linalg.norm(doc_emb))
for doc_emb in article_embeddings
]
# 가장 유사한 문서의 인덱스 조회
most_relevant_doc_index = np.argmax(similarities)
return articles[most_relevant_doc_index]
# 앱 세부 정보와 응답을 생성하는 predict 함수를 포함한 Model 서브클래스 생성
class RAGModel(Model):
system_message: str
model_name: str = "gpt-3.5-turbo-1106"
@weave.op()
def predict(self, question: str) -> dict: # 참고: `question`은 나중에 평가 행에서 데이터를 선택하는 데 사용됩니다
from openai import OpenAI
context = get_most_relevant_document(question)
client = OpenAI()
query = f"""다음 정보를 사용하여 아래 질문에 답하세요. 답을 찾을 수 없는 경우 "모르겠습니다"라고 작성하세요.
Context:
\"\"\"
{context}
\"\"\"
Question: {question}"""
response = client.chat.completions.create(
model=self.model_name,
messages=[
{"role": "system", "content": self.system_message},
{"role": "user", "content": query},
],
temperature=0.0,
response_format={"type": "text"},
)
answer = response.choices[0].message.content
return {'answer': answer, 'context': context}
# 팀 및 프로젝트 이름 설정
weave.init('<team-name>/rag-quickstart')
model = RAGModel(
system_message="당신은 금융 전문가로서 금융, 금융 서비스, 금융 시장과 관련된 질문에 답합니다. 제공된 정보를 바탕으로 답변할 때는 반드시 출처를 인용하세요."
)
# 질문과 출력을 사용하여 점수를 산출하는 채점 함수
@weave.op()
async def context_precision_score(question, output):
context_precision_prompt = """주어진 질문, 답변, 컨텍스트를 바탕으로 해당 컨텍스트가 답변 도출에 유용했는지 확인하세요. 유용한 경우 "1", 그렇지 않은 경우 "0"을 JSON 형식으로 출력하세요.
유효한 JSON 형식으로만 출력하세요.
question: {question}
context: {context}
answer: {answer}
verdict: """
client = OpenAI()
prompt = context_precision_prompt.format(
question=question,
context=output['context'],
answer=output['answer'],
)
response = client.chat.completions.create(
model="gpt-4-turbo-preview",
messages=[{"role": "user", "content": prompt}],
response_format={ "type": "json_object" }
)
response_message = response.choices[0].message
response = json.loads(response_message.content)
return {
"verdict": int(response["verdict"]) == 1,
}
questions = [
{"question": "Zealand Pharma의 비만 임상시험에서 보고된 주요 결과는 무엇인가요?"},
{"question": "Berkshire Hathaway의 4분기 현금 보유액은 얼마나 증가했나요?"},
{"question": "Highmark Health의 Google Cloud 및 Epic Systems 기술 통합의 목표는 무엇인가요?"},
{"question": "Rivian과 Lucid의 2024년 차량 생산 전망은 어떻게 되나요?"},
{"question": "Norwegian Dawn 크루즈선이 모리셔스 입항을 거부당한 이유는 무엇인가요?"},
{"question": "1972년 이후 최초로 미국의 달 착륙을 달성한 회사는 어디인가요?"},
{"question": "Intuitive Machines의 달 착륙선이 달에 착륙할 때 어떤 문제가 발생했나요?"}
]
# Evaluation 객체를 정의하고 예시 질문과 채점 함수를 전달
evaluation = weave.Evaluation(dataset=questions, scorers=[context_precision_score])
asyncio.run(evaluation.evaluate(model))
require('dotenv').config();
import { OpenAI } from 'openai';
import * as weave from 'weave';
interface Article {
text: string;
embedding?: number[];
}
const articles: Article[] = [
{
text: `Novo Nordisk and Eli Lilly rival soars 32 percent after promising weight loss drug results Shares of Denmarks Zealand Pharma shot 32 percent higher in morning trade, after results showed success in its liver disease treatment survodutide, which is also on trial as a drug to treat obesity. The trial tells us that the 6mg dose is safe, which is the top dose used in the ongoing [Phase 3] obesity trial too, one analyst said in a note. The results come amid feverish investor interest in drugs that can be used for weight loss.`
},
{
text: `Berkshire shares jump after big profit gain as Buffetts conglomerate nears $1 trillion valuation Berkshire Hathaway shares rose on Monday after Warren Buffetts conglomerate posted strong earnings for the fourth quarter over the weekend. Berkshires Class A and B shares jumped more than 1.5%, each. Class A shares are higher by more than 17% this year, while Class B has gained more than 18%. Berkshire was last valued at $930.1 billion, up from $905.5 billion where it closed on Friday, according to FactSet. Berkshire on Saturday posted fourth-quarter operating earnings of $8.481 billion, about 28 percent higher than the $6.625 billion from the year-ago period, driven by big gains in its insurance business. Operating earnings refers to profits from businesses across insurance, railroads and utilities. Meanwhile, Berkshires cash levels also swelled to record levels. The conglomerate held $167.6 billion in cash in the fourth quarter, surpassing the $157.2 billion record the conglomerate held in the prior quarter.`
},
{
text: `Highmark Health says its combining tech from Google and Epic to give doctors easier access to information Highmark Health announced it is integrating technology from Google Cloud and the health-care software company Epic Systems. The integration aims to make it easier for both payers and providers to access key information they need, even if its stored across multiple points and formats, the company said. Highmark is the parent company of a health plan with 7 million members, a provider network of 14 hospitals and other entities`
}
];
function cosineSimilarity(a: number[], b: number[]): number {
const dotProduct = a.reduce((sum, val, i) => sum + val * b[i], 0);
const magnitudeA = Math.sqrt(a.reduce((sum, val) => sum + val * val, 0));
const magnitudeB = Math.sqrt(b.reduce((sum, val) => sum + val * val, 0));
return dotProduct / (magnitudeA * magnitudeB);
}
const docsToEmbeddings = weave.op(async function(docs: Article[]): Promise<Article[]> {
const openai = new OpenAI();
const enrichedDocs = await Promise.all(docs.map(async (doc) => {
const response = await openai.embeddings.create({
input: doc.text,
model: "text-embedding-3-small"
});
return {
...doc,
embedding: response.data[0].embedding
};
}));
return enrichedDocs;
});
class RAGModel {
private openai: OpenAI;
private systemMessage: string;
private modelName: string;
private articleEmbeddings: Article[];
constructor(config: {
systemMessage: string;
modelName?: string;
articleEmbeddings: Article[];
}) {
this.openai = new OpenAI();
this.systemMessage = config.systemMessage;
this.modelName = config.modelName || "gpt-3.5-turbo-1106";
this.articleEmbeddings = config.articleEmbeddings;
this.predict = weave.op(this, this.predict);
}
private async getMostRelevantDocument(query: string): Promise<string> {
const queryEmbedding = await this.openai.embeddings.create({
input: query,
model: "text-embedding-3-small"
});
const similarities = this.articleEmbeddings.map(doc => {
if (!doc.embedding) return 0;
return cosineSimilarity(queryEmbedding.data[0].embedding, doc.embedding);
});
const mostRelevantIndex = similarities.indexOf(Math.max(...similarities));
return this.articleEmbeddings[mostRelevantIndex].text;
}
async predict(question: string): Promise<{
answer: string;
context: string;
}> {
const context = await this.getMostRelevantDocument(question);
const response = await this.openai.chat.completions.create({
model: this.modelName,
messages: [
{ role: "system", content: this.systemMessage },
{
role: "user",
content: `다음 정보를 사용하여 이후 질문에 답하세요. 답을 찾을 수 없는 경우 "모르겠습니다."라고 작성하세요.
Context:
"""
${context}
"""
Question: ${question}`
}
],
temperature: 0
});
return {
answer: response.choices[0].message.content || "",
context
};
}
}
interface ScorerResult {
verdict: boolean;
}
interface QuestionRow {
question: string;
}
function createQuestionDataset(): weave.Dataset<QuestionRow> {
return new weave.Dataset<QuestionRow>({
id: 'rag-questions',
rows: [
{ question: "Zealand Pharma의 비만 임상시험에서 보고된 주요 결과는 무엇인가요?" },
{ question: "Berkshire Hathaway의 현금 보유액은 4분기에 얼마나 증가했나요?" },
{ question: "Highmark Health의 Google Cloud 및 Epic Systems 기술 통합 목표는 무엇인가요?" }
]
});
}
const contextPrecisionScore = weave.op(async function(args: {
datasetRow: QuestionRow;
modelOutput: { answer: string; context: string; }
}): Promise<ScorerResult> {
const openai = new OpenAI();
const prompt = `주어진 질문, 답변, 컨텍스트를 바탕으로 해당 컨텍스트가 답변 도출에 유용했는지 검증하세요. 유용한 경우 "1", 그렇지 않은 경우 "0"을 JSON 형식으로 출력하세요.
유효한 JSON 형식으로만 출력하세요.
question: ${args.datasetRow.question}
context: ${args.modelOutput.context}
answer: ${args.modelOutput.answer}
verdict: `;
const response = await openai.chat.completions.create({
model: "gpt-4-turbo-preview",
messages: [{ role: "user", content: prompt }],
response_format: { type: "json_object" }
});
const result = JSON.parse(response.choices[0].message.content || "{}");
return {
verdict: parseInt(result.verdict) === 1
};
});
async function main() {
# 팀 및 프로젝트 이름을 설정하세요
await weave.init('<team-name>/rag-quickstart');
const articleEmbeddings = await docsToEmbeddings(articles);
const model = new RAGModel({
systemMessage: "당신은 금융 전문가로서 금융, 금융 서비스 및 금융 시장과 관련된 질문에 답변합니다. 제공된 정보를 바탕으로 응답할 때는 반드시 출처를 인용하세요.",
articleEmbeddings
});
const evaluation = new weave.Evaluation({
dataset: createQuestionDataset(),
scorers: [contextPrecisionScore]
});
const results = await evaluation.evaluate({
model: weave.op((args: { datasetRow: QuestionRow }) =>
model.predict(args.datasetRow.question)
)
});
console.log('평가 결과:', results);
}
if (require.main === module) {
main().catch(console.error);
}