Comment on page
🦜
Langchain
Portkey adds core production capabilities to any Langchain app.
Using Portkey with Langchain is as simple as just choosing which Portkey features you want, enabling them via
headers=Portkey.Config
and passing it in your LLM calls.Python
Node
from langchain.llms import OpenAI
from langchain.utilities import Portkey
llm = OpenAI(
openai_api_key = "OPENAI_API_KEY",
headers = Portkey.Config(
api_key="PORTKEY_API_KEY",
trace_id="TRACE_ID",
)
)
# Tracing agent calls across different requests
tools = load_tools(["serpapi", "llm-math"], llm=llm)
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION)
agent.run("What was the high temperature in SF yesterday in Fahrenheit? What is that number raised to the .023 power?")
import { OpenAI } from "langchain/llms/openai";
const model = new OpenAI({
modelName: "gpt-3.5-turbo-instruct",
openAIApiKey: "OPENAI_API_KEY",
configuration: {
basePath: "https://api.portkey.ai/v1/proxy",
baseOptions: {
headers: {
'x-portkey-api-key': '<PORTKEY_API_KEY>',
'x-portkey-mode': 'proxy openai',
'x-portkey-trace-id' : 'langchain_demo'
}
}
}
});
async function main() {
const r = await model.call("Describe the world as written by Herodotus.");
console.log(r);
}
main();
Feature | Config Key | Value (Type) | Required/Optional |
---|---|---|---|
API Key | api_key | API Key ( string ) | ✅ Required |
trace_id | Custom string | ❔ Optional | |
retry_count | integer [1,2,3,4,5] | ❔ Optional | |
cache | simple OR semantic | ❔ Optional | |
Cache Force Refresh | cache_force_refresh | True | ❔ Optional |
Set Cache Expiry | cache_age | integer (in seconds) | ❔ Optional |
user | string | ❔ Optional | |
organisation | string | ❔ Optional | |
environment | string | ❔ Optional | |
prompt | string | ❔ Optional |
Python
Node
import os
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_VERSION"] = "2023-03-15-preview"
os.environ["OPENAI_API_BASE"] = "https://api.portkey.ai/v1/proxy/RESOURCE_NAME.openai.azure.com/"
os.environ["OPENAI_API_KEY"] = "AZURE_API_KEY"
from langchain.llms import AzureOpenAI
llm = AzureOpenAI(
headers = {
"x-portkey-api-key": "PORTKEY_API_KEY",
"x-portkey-mode": "proxy azure-openai"
},
deployment_name="DEPLOYMENT_NAME",
model_name="MODEL_NAME",
)
llm("Tell me a joke")
import { ChatOpenAI } from "langchain/chat_models/openai";
const model = new ChatOpenAI({
azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY
azureOpenAIApiVersion: process.env.AZURE_OPENAI_API_VERSION
azureOpenAIApiInstanceName: process.env.AZURE_OPENAI_API_INSTANCE_NAME
azureOpenAIApiDeploymentName: process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME
azureOpenAIBasePath: "https://api.portkey.ai/v1/proxy/${process.env.AZURE_OPENAI_API_INSTANCE_NAME}.openai.azure.com/openai/deployments",
},
{
baseOptions: {
headers: {
"x-portkey-api-key": "<PORTKEY_API_KEY>",
"x-portkey-mode": "proxy azure-openai"
},
},
}
);
async function main() {
const message = await model.invoke("Tell me a joke");
console.log(message);
}
main();
Last modified 1mo ago