For AI Agents
ActionKit for Tool Calling
Use ActionKit to provide your AI agent with hundreds of Integration Actions.
ActionKit is designed to give AI agents the ability to call out to integration logic as a part of a prompt or ongoing conversation with a user. The API exposes JSON Schema specs to easily provide your agent with capabilities including:
- Creating tasks in Jira based on action items an agent recognizes from a meeting transcript
- Querying real-time sales reports in Shopify when an agent is asked questions about online sales
- Creating a Google Docs draft to start a project based on a user prompt
Implementation Examples
Vercel AI SDK
Implementing ActionKit in AI SDK
Copy
Ask AI
import { generateText, jsonSchema, tool } from "ai";
import { openai } from "@ai-sdk/openai";
const response = await fetch(
"https://actionkit.useparagon.com/projects/<Project ID>/actions",
{
method: "GET",
headers: {
Authorization: `Bearer ${paragonUserToken}`,
},
}
);
const { actions, errors } = await response.json();
if (errors.length === 0) {
await generateText({
model: openai("gpt-4o"),
tools: Object.fromEntries(
actions.map((action) => [
action.function.name,
tool({
description: tool.function.description,
parameters: jsonSchema(tool.function.parameters),
execute: async (params: any, { toolCallId }) => {
try {
const response = await fetch(
`https://actionkit.useparagon.com/projects/<Project ID>/actions`,
{
method: "POST",
body: JSON.stringify({
action: tool.function.name,
parameters: params,
}),
headers: {
Authorization: `Bearer ${session.paragonUserToken}`,
"Content-Type": "application/json",
},
}
);
const output = await response.json();
if (!response.ok) {
throw new Error(JSON.stringify(output, null, 2));
}
return output;
} catch (err) {
if (err instanceof Error) {
return { error: { message: err.message } };
}
return err;
}
},
}),
])
),
toolChoice: "auto",
temperature: 0,
system: "You are a helpful assistant. Be as concise as possible.",
prompt: "Help me create a new task in Jira.",
});
}
LangGraph / LangChain
Implementing ActionKit in LangChain
Copy
Ask AI
import json
import requests
from typing import Annotated, Any, TypedDict
from langchain.tools import BaseTool
from langchain.schema import HumanMessage
from langgraph.graph import StateGraph, START, END
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode, tools_condition
from langchain_openai import ChatOpenAI
PARAGON_PROJECT_ID = ""
PARAGON_USER_TOKEN = ""
OPENAI_API_KEY = ""
class ActionKitTool(BaseTool):
name: str
description: str
action_name: str
paragon_token: str
def _run(self, tool_input: str) -> str:
try:
params = json.loads(tool_input)
response = requests.post(
url=f"https://actionkit.useparagon.com/projects/{PARAGON_PROJECT_ID}/actions",
headers={
"Authorization": f"Bearer {self.paragon_token}",
"Content-Type": "application/json",
},
json={
"action": self.action_name,
"parameters": params,
},
timeout=30
)
data = response.json()
if not response.ok:
raise ValueError(json.dumps(data, indent=2))
return json.dumps(data)
except Exception as e:
return json.dumps({"error": {"message": str(e)}})
class State(TypedDict):
messages: Annotated[list, add_messages]
def main():
graph_builder = StateGraph(State)
url = f"https://actionkit.useparagon.com/projects/{PARAGON_PROJECT_ID}/actions"
headers = {"Authorization": f"Bearer {PARAGON_USER_TOKEN}"}
resp = requests.get(url, headers=headers)
json_resp = resp.json()
actions = json_resp.get("actions", [])
errors = json_resp.get("errors", [])
if not actions:
print("Failed to fetch Paragon actions or encountered errors:")
print(errors)
return
tools = []
for integration in actions:
integration_actions = actions.get(integration)
for action in integration_actions:
func_def = action["function"]
tool_name = func_def["name"]
tool_description = func_def["description"]
paragon_tool = ActionKitTool(
name=tool_name,
description=tool_description,
action_name=tool_name,
paragon_token=PARAGON_USER_TOKEN
)
tools.append(paragon_tool)
llm = ChatOpenAI(
openai_api_key=OPENAI_API_KEY,
model_name="o1"
)
def chatbot(state: State):
return {"messages": [llm.bind_tools(tools).invoke(state["messages"])]}
graph_builder.add_node("chatbot", chatbot)
tools_node = ToolNode(tools=tools)
graph_builder.add_node("tools", tools_node)
graph_builder.add_conditional_edges(
"chatbot",
tools_condition,
)
graph_builder.add_edge("tools", "chatbot")
graph_builder.add_edge(START, "chatbot")
graph = graph_builder.compile()
def stream_graph_updates(user_input: str):
for event in graph.stream({"messages": [{"role": "user", "content": user_input}]}):
for value in event.values():
print("Assistant:", value["messages"][-1].content)
while True:
try:
user_input = input("User: ")
if user_input.lower() in ["quit", "exit", "q"]:
print("Goodbye!")
break
stream_graph_updates(user_input)
except:
# fallback if input() is not available
user_input = "What do you know about LangGraph?"
print("User: " + user_input)
stream_graph_updates(user_input)
break
if __name__ == "__main__":
main()
Other implementations
If you’re not using TypeScript, you can pass the JSON Schema specs from the ActionKit to the request to your LLM. Here is an example in Python with OpenAI’s library:
Loading ActionKit tools for OpenAI client library
Copy
Ask AI
import requests
from openai import OpenAI
client = OpenAI()
actions_url = f"https://actionkit.useparagon.com/projects/{project_id}/actions"
actions_auth_header = {
"Authorization": f"Bearer {user_token}"
}
get_actions_params = {
"format": "json_schema",
"categories": "crm,project_management"
}
response = requests.get(actions_url, params=params, headers=actions_auth_header)
paragon_tools = response.json()
messages = [{"role": "user", "content": "Help me create a Jira ticket"}]
completion = client.chat.completions.create(
model="gpt-4o",
messages=messages,
tools=paragon_tools["actions"],
tool_choice="auto"
)
When passing the specs directly, you will also need to respond to the agent’s request to use a tool and route it to the ActionKit:
Calling ActionKit tools with OpenAI client library
Copy
Ask AI
message = completion["choices"][0]["message"]
if message.get("function_call"):
function_name = message["function_call"]["name"]
arguments = json.loads(message["function_call"]["arguments"])
# Check if this tool uses ActionKit
if any(tool["name"] == function_name for tool in paragon_tools["actions"]):
run_actions_body = {
"action": function_name,
"parameters": arguments
}
# Run Action
response = requests.post(actions_url, body=run_actions_body, headers=actions_auth_header)
result = response.json()
messages.append({
"role": "function",
"name": function_name,
"content": json.dumps(result)
})
# Return to chat with tool result
completion = client.chat.completions.create(
model="gpt-4o",
messages=messages,
tools=paragon_tools,
tool_choice="auto"
)
Was this page helpful?
Assistant
Responses are generated using AI and may contain mistakes.