Skip to content
Snippets Groups Projects
Unverified Commit 42504393 authored by Sanyam Bhutani's avatar Sanyam Bhutani Committed by GitHub
Browse files

colab links fix (#790)

parents b9fc1069 b579bd04
No related branches found
No related tags found
No related merge requests found
%% Cell type:markdown id:7a4b75bb-d60a-41e3-abca-1ca0f0bf1201 tags: %% Cell type:markdown id:7a4b75bb-d60a-41e3-abca-1ca0f0bf1201 tags:
<a href="https://colab.research.google.com/github/meta-llama/llama-recipes/blob/main/recipes/quickstart/agents/dlai/AI_Agentic_Design_Patterns_with_AutoGen_L4_Tool_Use_and_Conversational_Chess.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> <a href="https://colab.research.google.com/github/meta-llama/llama-recipes/blob/main/recipes/quickstart/agents/DeepLearningai_Course_Notebooks/AI_Agentic_Design_Patterns_with_AutoGen_L4_Tool_Use_and_Conversational_Chess.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
%% Cell type:markdown id:51581f90-911f-46ef-82dd-f3ca9c1d4b96 tags: %% Cell type:markdown id:51581f90-911f-46ef-82dd-f3ca9c1d4b96 tags:
This notebook ports the DeepLearning.AI short course [AI Agentic Design Patterns with AutoGen Lesson 4 Tool Use and Conversational Chess](https://learn.deeplearning.ai/courses/ai-agentic-design-patterns-with-autogen/lesson/5/tool-use-and-conversational-chess) to using Llama 3. This notebook ports the DeepLearning.AI short course [AI Agentic Design Patterns with AutoGen Lesson 4 Tool Use and Conversational Chess](https://learn.deeplearning.ai/courses/ai-agentic-design-patterns-with-autogen/lesson/5/tool-use-and-conversational-chess) to using Llama 3.
You should take the course before or after going through this notebook to have a deeper understanding. You should take the course before or after going through this notebook to have a deeper understanding.
%% Cell type:code id:f9824ea5-3791-4638-a09d-43eb2c906d79 tags: %% Cell type:code id:f9824ea5-3791-4638-a09d-43eb2c906d79 tags:
``` python ``` python
!pip install chess !pip install chess
!pip install pyautogen !pip install pyautogen
``` ```
%% Cell type:code id:a082a6dc-ceb1-4a3e-b3ae-afcb835de6da tags: %% Cell type:code id:a082a6dc-ceb1-4a3e-b3ae-afcb835de6da tags:
``` python ``` python
import chess import chess
import chess.svg import chess.svg
from typing_extensions import Annotated from typing_extensions import Annotated
``` ```
%% Cell type:code id:fbcdd9ea-f589-463d-a306-3fb3fcde770c tags: %% Cell type:code id:fbcdd9ea-f589-463d-a306-3fb3fcde770c tags:
``` python ``` python
board = chess.Board() board = chess.Board()
made_move = False made_move = False
``` ```
%% Cell type:code id:9d27858c-4a0b-40f6-bd58-01b19c33ab38 tags: %% Cell type:code id:9d27858c-4a0b-40f6-bd58-01b19c33ab38 tags:
``` python ``` python
def get_legal_moves( def get_legal_moves(
) -> Annotated[str, "A list of legal moves in UCI format"]: ) -> Annotated[str, "A list of legal moves in UCI format"]:
return "Possible moves are: " + ",".join( return "Possible moves are: " + ",".join(
[str(move) for move in board.legal_moves] [str(move) for move in board.legal_moves]
) )
``` ```
%% Cell type:code id:67742daa-9d9a-46b3-9466-beb96d535334 tags: %% Cell type:code id:67742daa-9d9a-46b3-9466-beb96d535334 tags:
``` python ``` python
from IPython.display import SVG from IPython.display import SVG
def make_move( def make_move(
move: Annotated[str, "A move in UCI format."] move: Annotated[str, "A move in UCI format."]
) -> Annotated[str, "Result of the move."]: ) -> Annotated[str, "Result of the move."]:
move = chess.Move.from_uci(move) move = chess.Move.from_uci(move)
board.push_uci(str(move)) board.push_uci(str(move))
global made_move global made_move
made_move = True made_move = True
svg_str = chess.svg.board( svg_str = chess.svg.board(
board, board,
arrows=[(move.from_square, move.to_square)], arrows=[(move.from_square, move.to_square)],
fill={move.from_square: "gray"}, fill={move.from_square: "gray"},
size=200 size=200
) )
display( display(
SVG(data=svg_str) SVG(data=svg_str)
) )
# Get the piece name. # Get the piece name.
piece = board.piece_at(move.to_square) piece = board.piece_at(move.to_square)
piece_symbol = piece.unicode_symbol() piece_symbol = piece.unicode_symbol()
piece_name = ( piece_name = (
chess.piece_name(piece.piece_type).capitalize() chess.piece_name(piece.piece_type).capitalize()
if piece_symbol.isupper() if piece_symbol.isupper()
else chess.piece_name(piece.piece_type) else chess.piece_name(piece.piece_type)
) )
return f"Moved {piece_name} ({piece_symbol}) from "\ return f"Moved {piece_name} ({piece_symbol}) from "\
f"{chess.SQUARE_NAMES[move.from_square]} to "\ f"{chess.SQUARE_NAMES[move.from_square]} to "\
f"{chess.SQUARE_NAMES[move.to_square]}." f"{chess.SQUARE_NAMES[move.to_square]}."
``` ```
%% Cell type:code id:e84508c0-0465-4be8-a97b-2e702265bcfb tags: %% Cell type:code id:e84508c0-0465-4be8-a97b-2e702265bcfb tags:
``` python ``` python
# base url from https://console.groq.com/docs/openai # base url from https://console.groq.com/docs/openai
config_list = [ config_list = [
{ {
"model": "llama3-70b-8192", "model": "llama3-70b-8192",
"base_url": "https://api.groq.com/openai/v1", "base_url": "https://api.groq.com/openai/v1",
'api_key': 'your_groq_api_key', # get a free key at https://console.groq.com/keys 'api_key': 'your_groq_api_key', # get a free key at https://console.groq.com/keys
}, },
] ]
``` ```
%% Cell type:code id:86dbb782-61f0-4b61-aab5-41fd12c26f51 tags: %% Cell type:code id:86dbb782-61f0-4b61-aab5-41fd12c26f51 tags:
``` python ``` python
from autogen import ConversableAgent from autogen import ConversableAgent
# Player white agent # Player white agent
player_white = ConversableAgent( player_white = ConversableAgent(
name="Player White", name="Player White",
system_message="You are a chess player and you play as white. " system_message="You are a chess player and you play as white. "
"First call get_legal_moves(), to get a list of legal moves in UCI format. " "First call get_legal_moves(), to get a list of legal moves in UCI format. "
"Then call make_move(move) to make a move. Finally, tell the proxy what you have moved and ask the black to move", # added "Finally..." to make the agents work "Then call make_move(move) to make a move. Finally, tell the proxy what you have moved and ask the black to move", # added "Finally..." to make the agents work
llm_config={"config_list": config_list, llm_config={"config_list": config_list,
"temperature": 0, "temperature": 0,
}, },
) )
``` ```
%% Cell type:code id:1c57411c-183a-44ea-95ab-33c0e97feb74 tags: %% Cell type:code id:1c57411c-183a-44ea-95ab-33c0e97feb74 tags:
``` python ``` python
# Player black agent # Player black agent
player_black = ConversableAgent( player_black = ConversableAgent(
name="Player Black", name="Player Black",
system_message="You are a chess player and you play as black. " system_message="You are a chess player and you play as black. "
"First call get_legal_moves(), to get a list of legal moves in UCI format. " "First call get_legal_moves(), to get a list of legal moves in UCI format. "
"Then call make_move(move) to make a move. Finally, tell the proxy what you have moved and ask the white to move", # added "Finally..." to make the agents work "Then call make_move(move) to make a move. Finally, tell the proxy what you have moved and ask the white to move", # added "Finally..." to make the agents work
llm_config={"config_list": config_list, llm_config={"config_list": config_list,
"temperature": 0, "temperature": 0,
},) },)
``` ```
%% Cell type:code id:60e5cb2d-4273-45a9-af40-0ffb1ada0009 tags: %% Cell type:code id:60e5cb2d-4273-45a9-af40-0ffb1ada0009 tags:
``` python ``` python
def check_made_move(msg): def check_made_move(msg):
global made_move global made_move
if made_move: if made_move:
made_move = False made_move = False
return True return True
else: else:
return False return False
``` ```
%% Cell type:code id:be4c7b55-9d50-4aa8-ae4b-3b959ffbb298 tags: %% Cell type:code id:be4c7b55-9d50-4aa8-ae4b-3b959ffbb298 tags:
``` python ``` python
board_proxy = ConversableAgent( board_proxy = ConversableAgent(
name="Board Proxy", name="Board Proxy",
llm_config=False, llm_config=False,
is_termination_msg=check_made_move, is_termination_msg=check_made_move,
default_auto_reply="Please make a move.", default_auto_reply="Please make a move.",
human_input_mode="NEVER", human_input_mode="NEVER",
) )
``` ```
%% Cell type:code id:e122875c-8bff-4212-8a1b-5f91d253fdd7 tags: %% Cell type:code id:e122875c-8bff-4212-8a1b-5f91d253fdd7 tags:
``` python ``` python
from autogen import register_function from autogen import register_function
``` ```
%% Cell type:code id:20edcb8c-5b7b-438e-b476-1cb16d14ef62 tags: %% Cell type:code id:20edcb8c-5b7b-438e-b476-1cb16d14ef62 tags:
``` python ``` python
for caller in [player_white, player_black]: for caller in [player_white, player_black]:
register_function( register_function(
get_legal_moves, get_legal_moves,
caller=caller, caller=caller,
executor=board_proxy, executor=board_proxy,
name="get_legal_moves", name="get_legal_moves",
description="Call this tool to get all legal moves in UCI format.", description="Call this tool to get all legal moves in UCI format.",
) )
register_function( register_function(
make_move, make_move,
caller=caller, caller=caller,
executor=board_proxy, executor=board_proxy,
name="make_move", name="make_move",
description="Call this tool to make a move.", description="Call this tool to make a move.",
) )
``` ```
%% Cell type:code id:b254ea02-0a81-4e9f-91fa-788dead9ffb8 tags: %% Cell type:code id:b254ea02-0a81-4e9f-91fa-788dead9ffb8 tags:
``` python ``` python
player_black.llm_config["tools"] player_black.llm_config["tools"]
``` ```
%% Cell type:code id:3715f56c-8ab8-4563-8f00-233beb3959b9 tags: %% Cell type:code id:3715f56c-8ab8-4563-8f00-233beb3959b9 tags:
``` python ``` python
player_white.register_nested_chats( player_white.register_nested_chats(
trigger=player_black, trigger=player_black,
chat_queue=[ chat_queue=[
{ {
"sender": board_proxy, "sender": board_proxy,
"recipient": player_white, "recipient": player_white,
"summary_method": "last_msg", "summary_method": "last_msg",
} }
], ],
) )
player_black.register_nested_chats( player_black.register_nested_chats(
trigger=player_white, trigger=player_white,
chat_queue=[ chat_queue=[
{ {
"sender": board_proxy, "sender": board_proxy,
"recipient": player_black, "recipient": player_black,
"summary_method": "last_msg", "summary_method": "last_msg",
} }
], ],
) )
``` ```
%% Cell type:code id:eda4f544-ab4c-4e9e-bceb-f93ad57c4026 tags: %% Cell type:code id:eda4f544-ab4c-4e9e-bceb-f93ad57c4026 tags:
``` python ``` python
board = chess.Board() board = chess.Board()
chat_result = player_black.initiate_chat( chat_result = player_black.initiate_chat(
player_white, player_white,
message="Let's play chess! Your move.", message="Let's play chess! Your move.",
max_turns=3, max_turns=3,
) )
``` ```
......
%% Cell type:markdown id:de56ee05-3b71-43c9-8cbf-6ad9b3233f38 tags: %% Cell type:markdown id:de56ee05-3b71-43c9-8cbf-6ad9b3233f38 tags:
<a href="https://colab.research.google.com/github/meta-llama/llama-recipes/blob/main/recipes/quickstart/agents/dlai/AI_Agents_in_LangGraph_L1_Build_an_Agent_from_Scratch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> <a href="https://colab.research.google.com/github/meta-llama/llama-recipes/blob/main/recipes/quickstart/agents/DeepLearningai_Course_Notebooks/AI_Agents_in_LangGraph_L1_Build_an_Agent_from_Scratch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
%% Cell type:markdown id:16ba1896-6867-4c68-9951-b0aadb819782 tags: %% Cell type:markdown id:16ba1896-6867-4c68-9951-b0aadb819782 tags:
This notebook ports the DeepLearning.AI short course [AI Agents in LangGraph Lesson 1 Build an Agent from Scratch](https://learn.deeplearning.ai/courses/ai-agents-in-langgraph/lesson/2/build-an-agent-from-scratch) to using Llama 3, with a bonus section that ports the agent from scratch code to using LangGraph, introduced in [Lession 2 LangGraph Components](https://learn.deeplearning.ai/courses/ai-agents-in-langgraph/lesson/3/langgraph-components) of the course. This notebook ports the DeepLearning.AI short course [AI Agents in LangGraph Lesson 1 Build an Agent from Scratch](https://learn.deeplearning.ai/courses/ai-agents-in-langgraph/lesson/2/build-an-agent-from-scratch) to using Llama 3, with a bonus section that ports the agent from scratch code to using LangGraph, introduced in [Lession 2 LangGraph Components](https://learn.deeplearning.ai/courses/ai-agents-in-langgraph/lesson/3/langgraph-components) of the course.
You should take the course, especially the first two lessons, before or after going through this notebook, to have a deeper understanding. You should take the course, especially the first two lessons, before or after going through this notebook, to have a deeper understanding.
%% Cell type:code id:9b168b57-6ff8-41d1-8f8f-a0c4a5ff108e tags: %% Cell type:code id:9b168b57-6ff8-41d1-8f8f-a0c4a5ff108e tags:
``` python ``` python
!pip install groq !pip install groq
``` ```
%% Cell type:code id:2c067d5f-c58c-47c0-8ccd-9a8710711bf7 tags: %% Cell type:code id:2c067d5f-c58c-47c0-8ccd-9a8710711bf7 tags:
``` python ``` python
import os import os
from groq import Groq from groq import Groq
os.environ['GROQ_API_KEY'] = 'your_groq_api_key' # get a free key at https://console.groq.com/keys os.environ['GROQ_API_KEY'] = 'your_groq_api_key' # get a free key at https://console.groq.com/keys
``` ```
%% Cell type:code id:5f7d8d95-36fb-4b14-bd28-99d305c0fded tags: %% Cell type:code id:5f7d8d95-36fb-4b14-bd28-99d305c0fded tags:
``` python ``` python
# a quick sanity test of calling Llama 3 70b on Groq # a quick sanity test of calling Llama 3 70b on Groq
# see https://console.groq.com/docs/text-chat for more info # see https://console.groq.com/docs/text-chat for more info
client = Groq() client = Groq()
chat_completion = client.chat.completions.create( chat_completion = client.chat.completions.create(
model="llama3-70b-8192", model="llama3-70b-8192",
messages=[{"role": "user", "content": "what are the words Charlotte wrote for the pig?"}] messages=[{"role": "user", "content": "what are the words Charlotte wrote for the pig?"}]
) )
print(chat_completion.choices[0].message.content) print(chat_completion.choices[0].message.content)
``` ```
%% Cell type:markdown id:f758c771-5afe-4008-9d7f-92a6f526778b tags: %% Cell type:markdown id:f758c771-5afe-4008-9d7f-92a6f526778b tags:
### ReAct Agent from Sractch ### ReAct Agent from Sractch
%% Cell type:code id:c00c0479-0913-4a92-8991-fe5a9a936bdb tags: %% Cell type:code id:c00c0479-0913-4a92-8991-fe5a9a936bdb tags:
``` python ``` python
client = Groq() client = Groq()
model = "llama3-8b-8192" # this model works with the prompt below only for the first simpler example; you'll see how to modify the prompt to make it work for a more complicated question model = "llama3-8b-8192" # this model works with the prompt below only for the first simpler example; you'll see how to modify the prompt to make it work for a more complicated question
#model = "llama3-70b-8192" # this model works with the prompt below for both example questions #model = "llama3-70b-8192" # this model works with the prompt below for both example questions
class Agent: class Agent:
def __init__(self, system=""): def __init__(self, system=""):
self.system = system self.system = system
self.messages = [] self.messages = []
if self.system: if self.system:
self.messages.append({"role": "system", "content": system}) self.messages.append({"role": "system", "content": system})
def __call__(self, message): def __call__(self, message):
self.messages.append({"role": "user", "content": message}) self.messages.append({"role": "user", "content": message})
result = self.execute() result = self.execute()
self.messages.append({"role": "assistant", "content": result}) self.messages.append({"role": "assistant", "content": result})
return result return result
def execute(self): def execute(self):
completion = client.chat.completions.create( completion = client.chat.completions.create(
model=model, model=model,
temperature=0, temperature=0,
messages=self.messages) messages=self.messages)
return completion.choices[0].message.content return completion.choices[0].message.content
``` ```
%% Cell type:code id:f766fb44-e8c2-43db-af83-8b9053a334ef tags: %% Cell type:code id:f766fb44-e8c2-43db-af83-8b9053a334ef tags:
``` python ``` python
prompt = """ prompt = """
You run in a loop of Thought, Action, PAUSE, Observation. You run in a loop of Thought, Action, PAUSE, Observation.
At the end of the loop you output an Answer At the end of the loop you output an Answer
Use Thought to describe your thoughts about the question you have been asked. Use Thought to describe your thoughts about the question you have been asked.
Use Action to run one of the actions available to you - then return PAUSE. Use Action to run one of the actions available to you - then return PAUSE.
Observation will be the result of running those actions. Observation will be the result of running those actions.
Your available actions are: Your available actions are:
calculate: calculate:
e.g. calculate: 4 * 7 / 3 e.g. calculate: 4 * 7 / 3
Runs a calculation and returns the number - uses Python so be sure to use floating point syntax if necessary Runs a calculation and returns the number - uses Python so be sure to use floating point syntax if necessary
average_dog_weight: average_dog_weight:
e.g. average_dog_weight: Collie e.g. average_dog_weight: Collie
returns average weight of a dog when given the breed returns average weight of a dog when given the breed
Example session: Example session:
Question: How much does a Bulldog weigh? Question: How much does a Bulldog weigh?
Thought: I should look the dogs weight using average_dog_weight Thought: I should look the dogs weight using average_dog_weight
Action: average_dog_weight: Bulldog Action: average_dog_weight: Bulldog
PAUSE PAUSE
You will be called again with this: You will be called again with this:
Observation: A Bulldog weights 51 lbs Observation: A Bulldog weights 51 lbs
You then output: You then output:
Answer: A bulldog weights 51 lbs Answer: A bulldog weights 51 lbs
""".strip() """.strip()
``` ```
%% Cell type:code id:93ab1290-625b-4b69-be4d-210fca43a513 tags: %% Cell type:code id:93ab1290-625b-4b69-be4d-210fca43a513 tags:
``` python ``` python
def calculate(what): def calculate(what):
return eval(what) return eval(what)
def average_dog_weight(name): def average_dog_weight(name):
if name in "Scottish Terrier": if name in "Scottish Terrier":
return("Scottish Terriers average 20 lbs") return("Scottish Terriers average 20 lbs")
elif name in "Border Collie": elif name in "Border Collie":
return("a Border Collies average weight is 37 lbs") return("a Border Collies average weight is 37 lbs")
elif name in "Toy Poodle": elif name in "Toy Poodle":
return("a toy poodles average weight is 7 lbs") return("a toy poodles average weight is 7 lbs")
else: else:
return("An average dog weights 50 lbs") return("An average dog weights 50 lbs")
known_actions = { known_actions = {
"calculate": calculate, "calculate": calculate,
"average_dog_weight": average_dog_weight "average_dog_weight": average_dog_weight
} }
``` ```
%% Cell type:code id:52f900d9-15f0-4f48-9bf3-6165c70e4b42 tags: %% Cell type:code id:52f900d9-15f0-4f48-9bf3-6165c70e4b42 tags:
``` python ``` python
abot = Agent(prompt) abot = Agent(prompt)
``` ```
%% Cell type:code id:f1b612c9-2a7d-4325-b36f-182899252538 tags: %% Cell type:code id:f1b612c9-2a7d-4325-b36f-182899252538 tags:
``` python ``` python
result = abot("How much does a toy poodle weigh?") result = abot("How much does a toy poodle weigh?")
print(result) print(result)
``` ```
%% Cell type:code id:e27dda33-c76d-4a19-8aef-02ba5389e7a6 tags: %% Cell type:code id:e27dda33-c76d-4a19-8aef-02ba5389e7a6 tags:
``` python ``` python
abot.messages abot.messages
``` ```
%% Cell type:code id:b6e85ca1-85af-43e3-a5ea-c5faf0935361 tags: %% Cell type:code id:b6e85ca1-85af-43e3-a5ea-c5faf0935361 tags:
``` python ``` python
# manually call the exeternal func (tool) for now # manually call the exeternal func (tool) for now
result = average_dog_weight("Toy Poodle") result = average_dog_weight("Toy Poodle")
``` ```
%% Cell type:code id:a9263ac7-fa81-4c95-91c8-a6c0741ab7f8 tags: %% Cell type:code id:a9263ac7-fa81-4c95-91c8-a6c0741ab7f8 tags:
``` python ``` python
result result
``` ```
%% Cell type:code id:cb309710-0693-422f-a739-38ca9455e497 tags: %% Cell type:code id:cb309710-0693-422f-a739-38ca9455e497 tags:
``` python ``` python
next_prompt = "Observation: {}".format(result) next_prompt = "Observation: {}".format(result)
``` ```
%% Cell type:code id:bd567e42-b5a9-4e4e-8807-38bb1d6c80a4 tags: %% Cell type:code id:bd567e42-b5a9-4e4e-8807-38bb1d6c80a4 tags:
``` python ``` python
abot(next_prompt) abot(next_prompt)
``` ```
%% Cell type:code id:255bf148-bf85-40c5-b33e-d849a42c127b tags: %% Cell type:code id:255bf148-bf85-40c5-b33e-d849a42c127b tags:
``` python ``` python
abot.messages abot.messages
``` ```
%% Cell type:code id:c286a6d5-b5b3-473b-bad6-aa6f1468e603 tags: %% Cell type:code id:c286a6d5-b5b3-473b-bad6-aa6f1468e603 tags:
``` python ``` python
abot = Agent(prompt) abot = Agent(prompt)
``` ```
%% Cell type:code id:f5e13b6e-e68e-45c2-b688-a257b531e482 tags: %% Cell type:code id:f5e13b6e-e68e-45c2-b688-a257b531e482 tags:
``` python ``` python
question = """I have 2 dogs, a border collie and a scottish terrier. \ question = """I have 2 dogs, a border collie and a scottish terrier. \
What is their combined weight""" What is their combined weight"""
abot(question) abot(question)
``` ```
%% Cell type:code id:049202f1-585f-42c3-8511-08eca7e5ed0e tags: %% Cell type:code id:049202f1-585f-42c3-8511-08eca7e5ed0e tags:
``` python ``` python
abot.messages abot.messages
``` ```
%% Cell type:code id:f086f19a-30fe-40ca-aafb-f1ce7c28982d tags: %% Cell type:code id:f086f19a-30fe-40ca-aafb-f1ce7c28982d tags:
``` python ``` python
next_prompt = "Observation: {}".format(average_dog_weight("Border Collie")) next_prompt = "Observation: {}".format(average_dog_weight("Border Collie"))
print(next_prompt) print(next_prompt)
``` ```
%% Cell type:code id:1747c78d-642d-4f57-81a0-27218eab3958 tags: %% Cell type:code id:1747c78d-642d-4f57-81a0-27218eab3958 tags:
``` python ``` python
abot(next_prompt) abot(next_prompt)
``` ```
%% Cell type:code id:85809d8f-cd95-4e0a-acb7-9705817bea70 tags: %% Cell type:code id:85809d8f-cd95-4e0a-acb7-9705817bea70 tags:
``` python ``` python
abot.messages abot.messages
``` ```
%% Cell type:code id:e77591fa-4e04-4eb6-8a40-ca26a71765f9 tags: %% Cell type:code id:e77591fa-4e04-4eb6-8a40-ca26a71765f9 tags:
``` python ``` python
next_prompt = "Observation: {}".format(average_dog_weight("Scottish Terrier")) next_prompt = "Observation: {}".format(average_dog_weight("Scottish Terrier"))
print(next_prompt) print(next_prompt)
``` ```
%% Cell type:code id:1f72b638-de07-4972-bbdb-8c8602f3d143 tags: %% Cell type:code id:1f72b638-de07-4972-bbdb-8c8602f3d143 tags:
``` python ``` python
abot(next_prompt) abot(next_prompt)
``` ```
%% Cell type:code id:eb5bf29d-22f9-4c0d-aea6-7e9c99e71835 tags: %% Cell type:code id:eb5bf29d-22f9-4c0d-aea6-7e9c99e71835 tags:
``` python ``` python
abot.messages abot.messages
``` ```
%% Cell type:code id:a67add73-b3c3-42be-9c54-f8a6ac828869 tags: %% Cell type:code id:a67add73-b3c3-42be-9c54-f8a6ac828869 tags:
``` python ``` python
next_prompt = "Observation: {}".format(eval("37 + 20")) next_prompt = "Observation: {}".format(eval("37 + 20"))
print(next_prompt) print(next_prompt)
``` ```
%% Cell type:code id:801fda04-9756-4ae4-9990-559216d38be8 tags: %% Cell type:code id:801fda04-9756-4ae4-9990-559216d38be8 tags:
``` python ``` python
abot(next_prompt) abot(next_prompt)
``` ```
%% Cell type:code id:56f7b9f4-289f-498d-8bc8-da9bb7365d52 tags: %% Cell type:code id:56f7b9f4-289f-498d-8bc8-da9bb7365d52 tags:
``` python ``` python
abot.messages abot.messages
``` ```
%% Cell type:markdown id:155ee9b3-a4f9-43dd-b23e-0f268f72f198 tags: %% Cell type:markdown id:155ee9b3-a4f9-43dd-b23e-0f268f72f198 tags:
### Automate the ReAct action execution ### Automate the ReAct action execution
%% Cell type:code id:5b2196f8-88e6-4eb4-82b0-cf251a07e313 tags: %% Cell type:code id:5b2196f8-88e6-4eb4-82b0-cf251a07e313 tags:
``` python ``` python
import re import re
# automate the action execution above to make the whole ReAct (Thought - Action- Observsation) process fully automated # automate the action execution above to make the whole ReAct (Thought - Action- Observsation) process fully automated
action_re = re.compile('^Action: (\w+): (.*)$') # python regular expression to selection action action_re = re.compile('^Action: (\w+): (.*)$') # python regular expression to selection action
``` ```
%% Cell type:code id:ea5710d6-5d9a-46ff-a275-46311257d9fd tags: %% Cell type:code id:ea5710d6-5d9a-46ff-a275-46311257d9fd tags:
``` python ``` python
def query(question, max_turns=5): def query(question, max_turns=5):
i = 0 i = 0
bot = Agent(prompt) # set system prompt bot = Agent(prompt) # set system prompt
next_prompt = question next_prompt = question
while i < max_turns: while i < max_turns:
i += 1 i += 1
result = bot(next_prompt) result = bot(next_prompt)
print(result) print(result)
actions = [ actions = [
action_re.match(a) action_re.match(a)
for a in result.split('\n') for a in result.split('\n')
if action_re.match(a) if action_re.match(a)
] ]
if actions: if actions:
# There is an action to run # There is an action to run
action, action_input = actions[0].groups() action, action_input = actions[0].groups()
if action not in known_actions: if action not in known_actions:
raise Exception("Unknown action: {}: {}".format(action, action_input)) raise Exception("Unknown action: {}: {}".format(action, action_input))
print(" -- running {} {}".format(action, action_input)) print(" -- running {} {}".format(action, action_input))
# key to make the agent process fully automated: # key to make the agent process fully automated:
# programtically call the external func with arguments, with the info returned by LLM # programtically call the external func with arguments, with the info returned by LLM
observation = known_actions[action](action_input) observation = known_actions[action](action_input)
print("Observation:", observation) print("Observation:", observation)
next_prompt = "Observation: {}".format(observation) next_prompt = "Observation: {}".format(observation)
else: else:
return return
``` ```
%% Cell type:markdown id:232d0818-7580-424b-9538-1e2b1c15360b tags: %% Cell type:markdown id:232d0818-7580-424b-9538-1e2b1c15360b tags:
#### Using model "llama3-8b-8192", the code below will cause an invalid syntax error because the Action returned is calculate: (average_dog_weight: Border Collie) + (average_dog_weight: Scottish Terrier), instead of the expected "Action: average_dog_weight: Border Collie". #### Using model "llama3-8b-8192", the code below will cause an invalid syntax error because the Action returned is calculate: (average_dog_weight: Border Collie) + (average_dog_weight: Scottish Terrier), instead of the expected "Action: average_dog_weight: Border Collie".
%% Cell type:code id:bb0095f3-b3f1-48cf-b3fb-36049b6b91f0 tags: %% Cell type:code id:bb0095f3-b3f1-48cf-b3fb-36049b6b91f0 tags:
``` python ``` python
question = """I have 2 dogs, a border collie and a scottish terrier. \ question = """I have 2 dogs, a border collie and a scottish terrier. \
What is their combined weight""" What is their combined weight"""
query(question) query(question)
``` ```
%% Cell type:markdown id:952ffac8-5ec2-48f3-8049-d03c130dad0d tags: %% Cell type:markdown id:952ffac8-5ec2-48f3-8049-d03c130dad0d tags:
#### Prompt engineering in action: #### Prompt engineering in action:
REPLACE "Use Thought to describe your thoughts about the question you have been asked. Use Action to run one of the actions available to you - then return PAUSE." with REPLACE "Use Thought to describe your thoughts about the question you have been asked. Use Action to run one of the actions available to you - then return PAUSE." with
"First, use Thought to describe your thoughts about the question you have been asked, and generate Action to run one of the actions available to you, then return PAUSE." "First, use Thought to describe your thoughts about the question you have been asked, and generate Action to run one of the actions available to you, then return PAUSE."
%% Cell type:code id:ec791ad6-b39a-4f46-b149-704c23d6c506 tags: %% Cell type:code id:ec791ad6-b39a-4f46-b149-704c23d6c506 tags:
``` python ``` python
prompt = """ prompt = """
You run in a loop of Thought, Action, PAUSE, Observation. You run in a loop of Thought, Action, PAUSE, Observation.
At the end of the loop you output an Answer. At the end of the loop you output an Answer.
First, use Thought to describe your thoughts about the question you have been asked, and generate Action to run one of the actions available to you, then return PAUSE. First, use Thought to describe your thoughts about the question you have been asked, and generate Action to run one of the actions available to you, then return PAUSE.
Observation will be the result of running those actions. Observation will be the result of running those actions.
Your available actions are: Your available actions are:
calculate: calculate:
e.g. calculate: 4 * 7 / 3 e.g. calculate: 4 * 7 / 3
Runs a calculation and returns the number - uses Python so be sure to use floating point syntax if necessary Runs a calculation and returns the number - uses Python so be sure to use floating point syntax if necessary
average_dog_weight: average_dog_weight:
e.g. average_dog_weight: Collie e.g. average_dog_weight: Collie
returns average weight of a dog when given the breed returns average weight of a dog when given the breed
Example session: Example session:
Question: How much does a Bulldog weigh? Question: How much does a Bulldog weigh?
Thought: I should look the dogs weight using average_dog_weight Thought: I should look the dogs weight using average_dog_weight
Action: average_dog_weight: Bulldog Action: average_dog_weight: Bulldog
PAUSE PAUSE
You will be called again with this: You will be called again with this:
Observation: A Bulldog weights 51 lbs Observation: A Bulldog weights 51 lbs
You then output: You then output:
Answer: A bulldog weights 51 lbs Answer: A bulldog weights 51 lbs
""".strip() """.strip()
``` ```
%% Cell type:code id:90bcf731-4d89-473b-98e1-53826da149f9 tags: %% Cell type:code id:90bcf731-4d89-473b-98e1-53826da149f9 tags:
``` python ``` python
question = """I have 2 dogs, a border collie and a scottish terrier. \ question = """I have 2 dogs, a border collie and a scottish terrier. \
What is their combined weight""" What is their combined weight"""
query(question) query(question)
``` ```
%% Cell type:markdown id:09d30a8f-3783-4ee5-a48e-7d89e22a508a tags: %% Cell type:markdown id:09d30a8f-3783-4ee5-a48e-7d89e22a508a tags:
### Bonus: Port the Agent Implementation to LangGraph ### Bonus: Port the Agent Implementation to LangGraph
%% Cell type:code id:6b5ed82e-2d70-45ac-b026-904da211f81a tags: %% Cell type:code id:6b5ed82e-2d70-45ac-b026-904da211f81a tags:
``` python ``` python
!pip install langchain !pip install langchain
!pip install langgraph !pip install langgraph
!pip install langchain_openai !pip install langchain_openai
!pip install langchain_community !pip install langchain_community
!pip install httpx !pip install httpx
!pip install langchain-groq !pip install langchain-groq
``` ```
%% Cell type:code id:a8ed3b90-688e-4aa2-8e43-e951af29a57f tags: %% Cell type:code id:a8ed3b90-688e-4aa2-8e43-e951af29a57f tags:
``` python ``` python
from langgraph.graph import StateGraph, END from langgraph.graph import StateGraph, END
from typing import TypedDict, Annotated from typing import TypedDict, Annotated
import operator import operator
from langchain_core.messages import AnyMessage, SystemMessage, HumanMessage, ToolMessage from langchain_core.messages import AnyMessage, SystemMessage, HumanMessage, ToolMessage
from langchain_openai import ChatOpenAI from langchain_openai import ChatOpenAI
from langchain_community.tools.tavily_search import TavilySearchResults from langchain_community.tools.tavily_search import TavilySearchResults
``` ```
%% Cell type:code id:c555f945-7db0-4dc9-9ea5-5632bf941fe4 tags: %% Cell type:code id:c555f945-7db0-4dc9-9ea5-5632bf941fe4 tags:
``` python ``` python
from langchain_groq import ChatGroq from langchain_groq import ChatGroq
model = ChatGroq(temperature=0, model_name="llama3-8b-8192") model = ChatGroq(temperature=0, model_name="llama3-8b-8192")
``` ```
%% Cell type:code id:7755a055-fa1f-474f-b558-230cc5a67a33 tags: %% Cell type:code id:7755a055-fa1f-474f-b558-230cc5a67a33 tags:
``` python ``` python
from langchain_core.tools import tool from langchain_core.tools import tool
from langgraph.prebuilt import ToolNode from langgraph.prebuilt import ToolNode
@tool @tool
def calculate(what): def calculate(what):
"""Runs a calculation and returns the number.""" """Runs a calculation and returns the number."""
return eval(what) return eval(what)
@tool @tool
def average_dog_weight(name): def average_dog_weight(name):
"""Returns the average weight of a dog.""" """Returns the average weight of a dog."""
if name in "Scottish Terrier": if name in "Scottish Terrier":
return("Scottish Terriers average 20 lbs") return("Scottish Terriers average 20 lbs")
elif name in "Border Collie": elif name in "Border Collie":
return("a Border Collies average weight is 37 lbs") return("a Border Collies average weight is 37 lbs")
elif name in "Toy Poodle": elif name in "Toy Poodle":
return("a toy poodles average weight is 7 lbs") return("a toy poodles average weight is 7 lbs")
else: else:
return("An average dog weights 50 lbs") return("An average dog weights 50 lbs")
``` ```
%% Cell type:code id:4a003862-8fd2-45b1-8fe4-78d7cd5888d9 tags: %% Cell type:code id:4a003862-8fd2-45b1-8fe4-78d7cd5888d9 tags:
``` python ``` python
prompt = """ prompt = """
You run in a loop of Thought, Action, Observation. You run in a loop of Thought, Action, Observation.
At the end of the loop you output an Answer At the end of the loop you output an Answer
Use Thought to describe your thoughts about the question you have been asked. Use Thought to describe your thoughts about the question you have been asked.
Use Action to run one of the actions available to you. Use Action to run one of the actions available to you.
Observation will be the result of running those actions. Observation will be the result of running those actions.
Your available actions are: Your available actions are:
calculate: calculate:
e.g. calculate: 4 * 7 / 3 e.g. calculate: 4 * 7 / 3
Runs a calculation and returns the number - uses Python so be sure to use floating point syntax if necessary Runs a calculation and returns the number - uses Python so be sure to use floating point syntax if necessary
average_dog_weight: average_dog_weight:
e.g. average_dog_weight: Collie e.g. average_dog_weight: Collie
returns average weight of a dog when given the breed returns average weight of a dog when given the breed
Example session: Example session:
Question: How much does a Bulldog weigh? Question: How much does a Bulldog weigh?
Thought: I should look the dogs weight using average_dog_weight Thought: I should look the dogs weight using average_dog_weight
Action: average_dog_weight: Bulldog Action: average_dog_weight: Bulldog
You will be called again with this: You will be called again with this:
Observation: A Bulldog weights 51 lbs Observation: A Bulldog weights 51 lbs
You then output: You then output:
Answer: A bulldog weights 51 lbs Answer: A bulldog weights 51 lbs
""".strip() """.strip()
``` ```
%% Cell type:code id:471c0aa7-547f-4d5f-9e99-73ef47101d41 tags: %% Cell type:code id:471c0aa7-547f-4d5f-9e99-73ef47101d41 tags:
``` python ``` python
class AgentState(TypedDict): class AgentState(TypedDict):
messages: Annotated[list[AnyMessage], operator.add] messages: Annotated[list[AnyMessage], operator.add]
``` ```
%% Cell type:code id:530e8a60-085a-4485-af03-bafc6b2c1d88 tags: %% Cell type:code id:530e8a60-085a-4485-af03-bafc6b2c1d88 tags:
``` python ``` python
class Agent: class Agent:
def __init__(self, model, tools, system=""): def __init__(self, model, tools, system=""):
self.system = system self.system = system
graph = StateGraph(AgentState) graph = StateGraph(AgentState)
graph.add_node("llm", self.call_llm) graph.add_node("llm", self.call_llm)
graph.add_node("action", self.take_action) graph.add_node("action", self.take_action)
graph.add_conditional_edges( graph.add_conditional_edges(
"llm", "llm",
self.exists_action, self.exists_action,
{True: "action", False: END} {True: "action", False: END}
) )
graph.add_edge("action", "llm") graph.add_edge("action", "llm")
graph.set_entry_point("llm") graph.set_entry_point("llm")
self.graph = graph.compile() self.graph = graph.compile()
self.tools = {t.name: t for t in tools} self.tools = {t.name: t for t in tools}
self.model = model.bind_tools(tools) self.model = model.bind_tools(tools)
def exists_action(self, state: AgentState): def exists_action(self, state: AgentState):
result = state['messages'][-1] result = state['messages'][-1]
return len(result.tool_calls) > 0 return len(result.tool_calls) > 0
def call_llm(self, state: AgentState): def call_llm(self, state: AgentState):
messages = state['messages'] messages = state['messages']
if self.system: if self.system:
messages = [SystemMessage(content=self.system)] + messages messages = [SystemMessage(content=self.system)] + messages
message = self.model.invoke(messages) message = self.model.invoke(messages)
return {'messages': [message]} return {'messages': [message]}
def take_action(self, state: AgentState): def take_action(self, state: AgentState):
tool_calls = state['messages'][-1].tool_calls tool_calls = state['messages'][-1].tool_calls
results = [] results = []
for t in tool_calls: for t in tool_calls:
print(f"Calling: {t}") print(f"Calling: {t}")
result = self.tools[t['name']].invoke(t['args']) result = self.tools[t['name']].invoke(t['args'])
results.append(ToolMessage(tool_call_id=t['id'], name=t['name'], content=str(result))) results.append(ToolMessage(tool_call_id=t['id'], name=t['name'], content=str(result)))
print("Back to the model!") print("Back to the model!")
return {'messages': results} return {'messages': results}
``` ```
%% Cell type:code id:3db8dcea-d4eb-46df-bd90-55acd4c5520a tags: %% Cell type:code id:3db8dcea-d4eb-46df-bd90-55acd4c5520a tags:
``` python ``` python
abot = Agent(model, [calculate, average_dog_weight], system=prompt) abot = Agent(model, [calculate, average_dog_weight], system=prompt)
``` ```
%% Cell type:code id:72c62e36-9321-40d2-86d8-b3c9caf3020f tags: %% Cell type:code id:72c62e36-9321-40d2-86d8-b3c9caf3020f tags:
``` python ``` python
messages = [HumanMessage(content="How much does a Toy Poodle weigh?")] messages = [HumanMessage(content="How much does a Toy Poodle weigh?")]
result = abot.graph.invoke({"messages": messages}) result = abot.graph.invoke({"messages": messages})
result['messages'], result['messages'][-1].content result['messages'], result['messages'][-1].content
# the code above will cause an error because Llama 3 8B incorrectly returns an extra "calculate" tool call # the code above will cause an error because Llama 3 8B incorrectly returns an extra "calculate" tool call
``` ```
%% Cell type:code id:56b4c622-b306-4aa3-84e6-4ccd6d6f272f tags: %% Cell type:code id:56b4c622-b306-4aa3-84e6-4ccd6d6f272f tags:
``` python ``` python
# using the Llama 3 70B will fix the error # using the Llama 3 70B will fix the error
model = ChatGroq(temperature=0, model_name="llama3-70b-8192") model = ChatGroq(temperature=0, model_name="llama3-70b-8192")
abot = Agent(model, [calculate, average_dog_weight], system=prompt) abot = Agent(model, [calculate, average_dog_weight], system=prompt)
``` ```
%% Cell type:code id:629ca375-979a-45d7-bad8-7240ae9ad844 tags: %% Cell type:code id:629ca375-979a-45d7-bad8-7240ae9ad844 tags:
``` python ``` python
# Toy Poodle case sensitive here - can be fixed easily by modifying def average_dog_weight # Toy Poodle case sensitive here - can be fixed easily by modifying def average_dog_weight
messages = [HumanMessage(content="How much does a Toy Poodle weigh?")] messages = [HumanMessage(content="How much does a Toy Poodle weigh?")]
result = abot.graph.invoke({"messages": messages}) result = abot.graph.invoke({"messages": messages})
result['messages'], result['messages'][-1].content result['messages'], result['messages'][-1].content
``` ```
%% Cell type:code id:30e253ae-e742-4df8-92e6-fadfc3826003 tags: %% Cell type:code id:30e253ae-e742-4df8-92e6-fadfc3826003 tags:
``` python ``` python
messages = [HumanMessage(content="I have 2 dogs, a border collie and a scottish terrier. What are their average weights? Total weight?")] messages = [HumanMessage(content="I have 2 dogs, a border collie and a scottish terrier. What are their average weights? Total weight?")]
result = abot.graph.invoke({"messages": messages}) result = abot.graph.invoke({"messages": messages})
``` ```
%% Cell type:code id:238ec75c-4ff6-4561-bb0a-895530a61e47 tags: %% Cell type:code id:238ec75c-4ff6-4561-bb0a-895530a61e47 tags:
``` python ``` python
result['messages'], result['messages'][-1].content result['messages'], result['messages'][-1].content
``` ```
......
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"import os \n", "import os\n",
"os.environ['GROQ_API_KEY'] = 'your_groq_api_key' # get a free key at https://console.groq.com/keys" "os.environ['GROQ_API_KEY'] = 'your_groq_api_key' # get a free key at https://console.groq.com/keys"
] ]
}, },
......
%% Cell type:markdown id:2ba1b4ef-3b96-4e7e-b5d0-155b839db73c tags: %% Cell type:markdown id:2ba1b4ef-3b96-4e7e-b5d0-155b839db73c tags:
<a href="https://colab.research.google.com/github/meta-llama/llama-recipes/blob/main/recipes/quickstart/agents/dlai/Functions_Tools_and_Agents_with_LangChain_L1_Function_Calling.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> <a href="https://colab.research.google.com/github/meta-llama/llama-recipes/blob/main/recipes/quickstart/agents/DeepLearningai_Course_Notebooks/Functions_Tools_and_Agents_with_LangChain_L1_Function_Calling.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
%% Cell type:markdown id:f91905c8-21ca-4d81-9614-b9c7344d08c3 tags: %% Cell type:markdown id:f91905c8-21ca-4d81-9614-b9c7344d08c3 tags:
This notebook ports the DeepLearning.AI short course [Functions, Tools and Agents with LangChain Lesson 1 OpenAI Function Calling](https://learn.deeplearning.ai/courses/functions-tools-agents-langchain/lesson/2/openai-function-calling) to using Llama 3. This notebook ports the DeepLearning.AI short course [Functions, Tools and Agents with LangChain Lesson 1 OpenAI Function Calling](https://learn.deeplearning.ai/courses/functions-tools-agents-langchain/lesson/2/openai-function-calling) to using Llama 3.
You should take the course before or after going through this notebook to have a deeper understanding. You should take the course before or after going through this notebook to have a deeper understanding.
%% Cell type:code id:31bfe801-e24d-459b-8b3f-e91a34024368 tags: %% Cell type:code id:31bfe801-e24d-459b-8b3f-e91a34024368 tags:
``` python ``` python
!pip install groq !pip install groq
``` ```
%% Cell type:code id:88659373-0deb-45eb-8934-0b02d70bd047 tags: %% Cell type:code id:88659373-0deb-45eb-8934-0b02d70bd047 tags:
``` python ``` python
import json import json
# Example dummy function hard coded to return the same weather # Example dummy function hard coded to return the same weather
# In production, this could be your backend API or an external API # In production, this could be your backend API or an external API
def get_current_weather(location, unit="fahrenheit"): def get_current_weather(location, unit="fahrenheit"):
"""Get the current weather in a given location""" """Get the current weather in a given location"""
weather_info = { weather_info = {
"location": location, "location": location,
"temperature": "72", "temperature": "72",
"unit": unit, "unit": unit,
"forecast": ["sunny", "windy"], "forecast": ["sunny", "windy"],
} }
return json.dumps(weather_info) return json.dumps(weather_info)
known_functions = { known_functions = {
"get_current_weather": get_current_weather "get_current_weather": get_current_weather
} }
``` ```
%% Cell type:code id:359a584a-5b26-4497-afb4-72b63027edb9 tags: %% Cell type:code id:359a584a-5b26-4497-afb4-72b63027edb9 tags:
``` python ``` python
# https://console.groq.com/docs/tool-use#models # https://console.groq.com/docs/tool-use#models
# Groq API endpoints support tool use for programmatic execution of specified operations through requests with explicitly defined # Groq API endpoints support tool use for programmatic execution of specified operations through requests with explicitly defined
# operations. With tool use, Groq API model endpoints deliver structured JSON output that can be used to directly invoke functions. # operations. With tool use, Groq API model endpoints deliver structured JSON output that can be used to directly invoke functions.
from groq import Groq from groq import Groq
import os import os
import json import json
client = Groq(api_key = 'your_groq_api_key' # get a free key at https://console.groq.com/keys') client = Groq(api_key = 'your_groq_api_key' # get a free key at https://console.groq.com/keys')
``` ```
%% Cell type:code id:5cc17dc9-2827-4d39-a13d-a4ed5f53c8e6 tags: %% Cell type:code id:5cc17dc9-2827-4d39-a13d-a4ed5f53c8e6 tags:
``` python ``` python
functions = [ functions = [
{ {
"name": "get_current_weather", "name": "get_current_weather",
"description": "Get the current weather in a given location", "description": "Get the current weather in a given location",
"parameters": { "parameters": {
"type": "object", "type": "object",
"properties": { "properties": {
"location": { "location": {
"type": "string", "type": "string",
"description": "The city and state, e.g. San Francisco, CA", "description": "The city and state, e.g. San Francisco, CA",
}, },
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
}, },
"required": ["location"], "required": ["location"],
}, },
} }
] ]
tools = [ tools = [
{ {
"type": "function", "type": "function",
"function": { "function": {
"name": "get_current_weather", "name": "get_current_weather",
"description": "Get the current weather in a given location", "description": "Get the current weather in a given location",
"parameters": { "parameters": {
"type": "object", "type": "object",
"properties": { "properties": {
"location": { "location": {
"type": "string", "type": "string",
"description": "The city and state, e.g. San Francisco, CA", "description": "The city and state, e.g. San Francisco, CA",
}, },
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
}, },
"required": ["location"], "required": ["location"],
}, },
} }
} }
] ]
``` ```
%% Cell type:code id:5a64d28e-b169-4855-b3c2-d6722c56394c tags: %% Cell type:code id:5a64d28e-b169-4855-b3c2-d6722c56394c tags:
``` python ``` python
messages = [ messages = [
{ {
"role": "user", "role": "user",
"content": "What's the weather like in Boston?" "content": "What's the weather like in Boston?"
} }
] ]
``` ```
%% Cell type:code id:a733c1e1-c7f2-4707-b1be-02179df0abc6 tags: %% Cell type:code id:a733c1e1-c7f2-4707-b1be-02179df0abc6 tags:
``` python ``` python
response = client.chat.completions.create( response = client.chat.completions.create(
model="llama3-70b-8192", model="llama3-70b-8192",
messages=messages, messages=messages,
functions=functions, functions=functions,
#tools=tools, # you can also replace functions with tools, as specified in https://console.groq.com/docs/tool-use #tools=tools, # you can also replace functions with tools, as specified in https://console.groq.com/docs/tool-use
max_tokens=4096, max_tokens=4096,
temperature=0 temperature=0
) )
``` ```
%% Cell type:code id:9058073d-cf91-4747-9860-7e2a1d774acf tags: %% Cell type:code id:9058073d-cf91-4747-9860-7e2a1d774acf tags:
``` python ``` python
response response
``` ```
%% Cell type:code id:ffd4ed64-0436-499e-a7e5-4224833b72f3 tags: %% Cell type:code id:ffd4ed64-0436-499e-a7e5-4224833b72f3 tags:
``` python ``` python
response_message = response.choices[0].message response_message = response.choices[0].message
response_message response_message
``` ```
%% Cell type:code id:5458444a-a448-4c5b-b06c-47ab6cd25626 tags: %% Cell type:code id:5458444a-a448-4c5b-b06c-47ab6cd25626 tags:
``` python ``` python
response_message.content response_message.content
``` ```
%% Cell type:code id:c669a048-1a3e-43e9-b98f-d0b6a3a0f4c0 tags: %% Cell type:code id:c669a048-1a3e-43e9-b98f-d0b6a3a0f4c0 tags:
``` python ``` python
response_message.function_call response_message.function_call
``` ```
%% Cell type:code id:27f3de5d-5110-486e-8b07-5086939d364d tags: %% Cell type:code id:27f3de5d-5110-486e-8b07-5086939d364d tags:
``` python ``` python
json.loads(response_message.function_call.arguments) json.loads(response_message.function_call.arguments)
``` ```
%% Cell type:code id:b69e6497-9e68-47d4-99ae-d45db6c1a8db tags: %% Cell type:code id:b69e6497-9e68-47d4-99ae-d45db6c1a8db tags:
``` python ``` python
args = json.loads(response_message.function_call.arguments) args = json.loads(response_message.function_call.arguments)
``` ```
%% Cell type:code id:f41a7162-9ce8-4353-827b-f6f3bb278218 tags: %% Cell type:code id:f41a7162-9ce8-4353-827b-f6f3bb278218 tags:
``` python ``` python
get_current_weather(args) get_current_weather(args)
``` ```
%% Cell type:code id:bb0546f2-de55-417a-9b38-66787b673fb7 tags: %% Cell type:code id:bb0546f2-de55-417a-9b38-66787b673fb7 tags:
``` python ``` python
function_call = response.choices[0].message.function_call function_call = response.choices[0].message.function_call
function_call function_call
``` ```
%% Cell type:code id:0dd1fcf0-7105-4cad-82b5-22ce3b24fc07 tags: %% Cell type:code id:0dd1fcf0-7105-4cad-82b5-22ce3b24fc07 tags:
``` python ``` python
function_call.name, function_call.arguments function_call.name, function_call.arguments
``` ```
%% Cell type:code id:e6d58efe-0ada-48a2-b12b-6bff948a2983 tags: %% Cell type:code id:e6d58efe-0ada-48a2-b12b-6bff948a2983 tags:
``` python ``` python
# by defining and using known_functions, we can programatically call function # by defining and using known_functions, we can programatically call function
function_response = known_functions[function_call.name](function_call.arguments) function_response = known_functions[function_call.name](function_call.arguments)
``` ```
%% Cell type:code id:cee6ca19-6924-4a7b-ba7f-7b1a33344ca0 tags: %% Cell type:code id:cee6ca19-6924-4a7b-ba7f-7b1a33344ca0 tags:
``` python ``` python
function_response function_response
``` ```
%% Cell type:code id:8480be29-3326-4d95-8742-dff976a7ab2e tags: %% Cell type:code id:8480be29-3326-4d95-8742-dff976a7ab2e tags:
``` python ``` python
# add the message returned by tool and query LLM again to get final answer # add the message returned by tool and query LLM again to get final answer
messages.append( messages.append(
{ {
"role": "function", "role": "function",
"name": function_call.name, "name": function_call.name,
"content": function_response, "content": function_response,
} }
) )
``` ```
%% Cell type:code id:4a42e35f-c601-4c14-8de5-bdbba01dc622 tags: %% Cell type:code id:4a42e35f-c601-4c14-8de5-bdbba01dc622 tags:
``` python ``` python
messages messages
``` ```
%% Cell type:code id:f9a2d1ee-9e41-480a-a5cc-62c273d3a179 tags: %% Cell type:code id:f9a2d1ee-9e41-480a-a5cc-62c273d3a179 tags:
``` python ``` python
response = client.chat.completions.create( response = client.chat.completions.create(
model="llama3-70b-8192", model="llama3-70b-8192",
messages=messages, messages=messages,
temperature=0 temperature=0
) )
response.choices[0].message.content response.choices[0].message.content
``` ```
%% Cell type:code id:54019c56-11cf-465a-a440-296081adee93 tags: %% Cell type:code id:54019c56-11cf-465a-a440-296081adee93 tags:
``` python ``` python
messages = [ messages = [
{ {
"role": "user", "role": "user",
"content": "hi!", "content": "hi!",
} }
] ]
``` ```
%% Cell type:code id:922724ec-1744-4ccf-9a86-5f1823dce0e0 tags: %% Cell type:code id:922724ec-1744-4ccf-9a86-5f1823dce0e0 tags:
``` python ``` python
response = client.chat.completions.create( response = client.chat.completions.create(
model="llama3-70b-8192", model="llama3-70b-8192",
messages=messages, messages=messages,
functions=functions, functions=functions,
function_call="none", # default is auto (let LLM decide if using function call or not. can also be none, or a dict {{"name": "func_name"} function_call="none", # default is auto (let LLM decide if using function call or not. can also be none, or a dict {{"name": "func_name"}
temperature=0 temperature=0
) )
``` ```
%% Cell type:code id:04c3152a-f51b-45cb-a27c-0672337520b4 tags: %% Cell type:code id:04c3152a-f51b-45cb-a27c-0672337520b4 tags:
``` python ``` python
print(response) print(response)
``` ```
%% Cell type:code id:582fac7c-0de7-420c-8150-038e74be4b9a tags: %% Cell type:code id:582fac7c-0de7-420c-8150-038e74be4b9a tags:
``` python ``` python
response_message = response.choices[0].message response_message = response.choices[0].message
response_message response_message
``` ```
%% Cell type:code id:e3d62357-04c9-459c-b36a-89e58444ea63 tags: %% Cell type:code id:e3d62357-04c9-459c-b36a-89e58444ea63 tags:
``` python ``` python
messages = [ messages = [
{ {
"role": "user", "role": "user",
"content": "hi!", "content": "hi!",
} }
] ]
response = client.chat.completions.create( response = client.chat.completions.create(
model="llama3-70b-8192", model="llama3-70b-8192",
messages=messages, messages=messages,
functions=functions, functions=functions,
function_call="auto", # default is auto (let LLM decide if using function call or not. can also be none, or a dict {{"name": "func_name"} function_call="auto", # default is auto (let LLM decide if using function call or not. can also be none, or a dict {{"name": "func_name"}
temperature=0 temperature=0
) )
print(response) print(response)
``` ```
%% Cell type:code id:632df69d-85bc-4e44-814c-7c1d2fe97228 tags: %% Cell type:code id:632df69d-85bc-4e44-814c-7c1d2fe97228 tags:
``` python ``` python
messages = [ messages = [
{ {
"role": "user", "role": "user",
"content": "hi!", "content": "hi!",
} }
] ]
response = client.chat.completions.create( response = client.chat.completions.create(
model="llama3-70b-8192", model="llama3-70b-8192",
messages=messages, messages=messages,
functions=functions, functions=functions,
function_call="none", # default is auto (let LLM decide if using function call or not. can also be none, or a dict {{"name": "func_name"} function_call="none", # default is auto (let LLM decide if using function call or not. can also be none, or a dict {{"name": "func_name"}
temperature=0 temperature=0
) )
print(response) print(response)
``` ```
%% Cell type:code id:c773ab17-a620-44eb-877f-9e0bc23fb00b tags: %% Cell type:code id:c773ab17-a620-44eb-877f-9e0bc23fb00b tags:
``` python ``` python
messages = [ messages = [
{ {
"role": "user", "role": "user",
"content": "What's the weather in Boston?", "content": "What's the weather in Boston?",
} }
] ]
response = client.chat.completions.create( response = client.chat.completions.create(
model="llama3-70b-8192", model="llama3-70b-8192",
messages=messages, messages=messages,
functions=functions, functions=functions,
function_call="none", # default is auto (let LLM decide if using function call or not. can also be none, or a dict {{"name": "func_name"} function_call="none", # default is auto (let LLM decide if using function call or not. can also be none, or a dict {{"name": "func_name"}
temperature=0 temperature=0
) )
print(response) print(response)
``` ```
%% Cell type:code id:c4a8ee80-83ae-4189-837c-54bb9c93c315 tags: %% Cell type:code id:c4a8ee80-83ae-4189-837c-54bb9c93c315 tags:
``` python ``` python
messages = [ messages = [
{ {
"role": "user", "role": "user",
"content": "hi!", "content": "hi!",
} }
] ]
response = client.chat.completions.create( response = client.chat.completions.create(
model="llama3-70b-8192", model="llama3-70b-8192",
messages=messages, messages=messages,
functions=functions, functions=functions,
function_call={"name": "get_current_weather"}, # default is auto (let LLM decide if using function call or not. can also be none, or a dict {{"name": "func_name"} function_call={"name": "get_current_weather"}, # default is auto (let LLM decide if using function call or not. can also be none, or a dict {{"name": "func_name"}
temperature=0 temperature=0
) )
print(response) print(response)
``` ```
%% Cell type:code id:daa5801a-2e71-4630-a8cd-7e84d1214f51 tags: %% Cell type:code id:daa5801a-2e71-4630-a8cd-7e84d1214f51 tags:
``` python ``` python
messages = [ messages = [
{ {
"role": "user", "role": "user",
"content": "What's the weather like in Boston!", "content": "What's the weather like in Boston!",
} }
] ]
response = client.chat.completions.create( response = client.chat.completions.create(
model="llama3-70b-8192", model="llama3-70b-8192",
messages=messages, messages=messages,
functions=functions, functions=functions,
function_call={"name": "get_current_weather"}, # default is auto (let LLM decide if using function call or not. can also be none, or a dict {{"name": "func_name"} function_call={"name": "get_current_weather"}, # default is auto (let LLM decide if using function call or not. can also be none, or a dict {{"name": "func_name"}
temperature=0 temperature=0
) )
print(response) print(response)
``` ```
%% Cell type:code id:de5924d4-4225-48d1-a390-e44f3167d547 tags: %% Cell type:code id:de5924d4-4225-48d1-a390-e44f3167d547 tags:
``` python ``` python
function_call = response.choices[0].message.function_call function_call = response.choices[0].message.function_call
function_call.name, function_call.arguments function_call.name, function_call.arguments
``` ```
%% Cell type:code id:fb9f3340-b905-47f3-a478-cf3d786faa1f tags: %% Cell type:code id:fb9f3340-b905-47f3-a478-cf3d786faa1f tags:
``` python ``` python
args = json.loads(response.choices[0].message.function_call.arguments) args = json.loads(response.choices[0].message.function_call.arguments)
observation = known_functions[function_call.name](args) observation = known_functions[function_call.name](args)
``` ```
%% Cell type:code id:3c31e9b5-99ed-46f3-8849-133c71ea87d4 tags: %% Cell type:code id:3c31e9b5-99ed-46f3-8849-133c71ea87d4 tags:
``` python ``` python
observation observation
``` ```
%% Cell type:code id:b73c550e-5aa2-49de-8422-0c3e706f1df4 tags: %% Cell type:code id:b73c550e-5aa2-49de-8422-0c3e706f1df4 tags:
``` python ``` python
messages.append( messages.append(
{ {
"role": "function", "role": "function",
"name": function_call.name, "name": function_call.name,
"content": observation, "content": observation,
} }
) )
``` ```
%% Cell type:code id:c60302f1-07e2-4f22-bd60-b54e1ea2e3db tags: %% Cell type:code id:c60302f1-07e2-4f22-bd60-b54e1ea2e3db tags:
``` python ``` python
messages messages
``` ```
%% Cell type:code id:a35f7f3d-4e39-4744-b5e3-2065e67eea28 tags: %% Cell type:code id:a35f7f3d-4e39-4744-b5e3-2065e67eea28 tags:
``` python ``` python
response = client.chat.completions.create( response = client.chat.completions.create(
model="llama3-70b-8192", model="llama3-70b-8192",
messages=messages, messages=messages,
) )
print(response) print(response)
``` ```
%% Cell type:code id:7d4745e1-0477-4b6b-84de-9c82e0bc2452 tags: %% Cell type:code id:7d4745e1-0477-4b6b-84de-9c82e0bc2452 tags:
``` python ``` python
response.choices[0].message.content response.choices[0].message.content
``` ```
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment