Basic
Qualifire provides an SDK to help you integrate our services into your application. The SDK is available for the following languages:- Node.js
- Python
Node.js
To use the Node.js SDK, you need to install it using npm:Copy
npm install qualifire
apiKey is not provided the SDK will look for a value in the environment variable QUALIFIRE_API_KEY.Working with Text
You can also send parsed text to the SDK for evaluationCopy
import { Qualifire } from "qualifire";
const qualifire = new Qualifire();
qualifire.evaluate({
input: "What is the capital of France?",
output: "Paris",
promptInjections: true,
piiCheck: true,
hallucinationsCheck: true,
groundingCheck: true,
consistencyCheck: true,
assertions: ["don't give medical advice"],
dangerousContentCheck: true,
harassmentCheck: true,
hateSpeechCheck: true,
sexualContentCheck: true,
});
Python
To use the Python SDK, you need to install it using pip:Copy
pip install qualifire
Copy
import qualifire
client = qualifire.client.Client(
api_key="YOUR API KEY",
)
res = client.invoke_evaluation(
input="what is the capital of France",
output="Paris",
evaluation_id='g2r8puzojwb8q6yi2f6x162a' # get this from the evaluations page
)
# ---- OR ----
res = client.evaluate(
input="what is the capital of France",
output="Paris",
prompt_injections=True,
pii_check=True,
hallucinations_check=True,
grounding_check=True,
consistency_check=True,
assertions=['don\'t give medical advice'],
dangerous_content_check=True,
harassment_check=True,
hate_speech_check=True,
sexual_content_check=True,
)
# ---- OR ----
res = client.evaluate(
messages=[
LLMMessage(
role="user",
content="What is the weather tomorrow in New York?",
),
LLMMessage(
role="assistant",
content='please run the following tool'
tool_calls=[
LLMToolCall(
"id": "tool_call_id",
"name": "get_weather_forecast",
"arguments": {
"location": "New York, NY",
"date": "tomorrow",
},
),
],
),
],
available_tools=[
LLMToolDefinition(
name="get_weather_forecast",
description="Provides the weather forecast for a given location and date.",
parameters={
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g., San Francisco, CA",
},
"date": {
"type": "string",
"description": "The date for the forecast, e.g., tomorrow, or YYYY-MM-DD",
},
},
"required": [
"location",
"date",
],
},
),
],
tool_selection_quality_check=True,
)
api_key is not provided the SDK will look for a value in the environment variable QUALIFIRE_API_KEY.Async evaluations
In some cases we wouldn’t want to wait for the response of the evaluation, in those cases we can use theblock=False parameter in the call.Copy
import qualifire
client = qualifire.client.Client(
api_key
)
client.evaluate(
"what is the capital of France",
"Paris",
block=False
)
Instrumentation
The SDK provides a way to instrument your code with theinit function.Copy
import qualifire
from openai import OpenAI
# Configure Qualifire tracing
qualifire.init(
gateway_url="https://proxy.qualifire.ai",
api_key="YOUR_QUALIFIRE_API_KEY"
)
client = OpenAI(
api_key="OpenAI API Key",
base_url="https://proxy.qualifire.ai/api/providers/openai",
default_headers={
"X-Qualifire-API-Key": "YOUR API KEY",
}
)
res = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "user",
"content": "tell me a joke",
},
],
)
Copy
import qualifire
from langchain.chat_models import init_chat_model
from langgraph.prebuilt import create_react_agent
qualifire.init(
api_key="YOUR API KEY",
)
tools = ...
llm = init_chat_model(
"openai:gpt-4.1",
api_key="Your OpenAI Api Key",
base_url="https://proxy.qualifire.ai/api/providers/openai/",
default_headers={
"X-Qualifire-API-Key": "Your Qualifire Api Key"
},
)
agent = create_react_agent(
llm,
tools,
prompt="system prompt...",
)
question = "Tell me a joke"
for step in agent.stream(
{"messages": [{"role": "user", "content": question}]},
stream_mode="values",
):
step["messages"][-1].pretty_print()
API Reference documentation is here.