Langchain - Getting Started
This blog outlines the step-by-step process for getting started with a Langchain demo.
LLM Creation
Go to the Azure portal and create an Azure OpenAI service. Within that service, navigate to the Foundry portal and deploy a cost-effective gpt-4o-minimodel (use version: 2024-07-18).
Simple Chat
Create chat.py
1from azure.identity import DefaultAzureCredential, get_bearer_token_provider
2from langchain_openai import AzureChatOpenAI
3from langchain_core.messages import SystemMessage, HumanMessage
4
5
6def create_client(model_name: str = "gpt-4o-mini") -> AzureChatOpenAI:
7 """Create Azure OpenAI client using Azure AD authentication
8
9 Args:
10 model_name: Model to use. Options: "gpt-4o-mini"
11 """
12 token_provider = get_bearer_token_provider(
13 DefaultAzureCredential(exclude_powershell_credential=True),
14 "https://cognitiveservices.azure.com/.default",
15 )
16
17 return AzureChatOpenAI(
18 model=model_name,
19 azure_endpoint="https://testazureopenairesource11.openai.azure.com/",
20 azure_ad_token_provider=token_provider,
21 api_version="2024-12-01-preview",
22 )
23
24
25def test_connection() -> bool:
26 """Test Azure OpenAI connection"""
27 try:
28 client = create_client()
29 messages = [
30 SystemMessage(content="You are a helpful assistant."),
31 HumanMessage(content="Reply with exactly: CONNECTION_OK"),
32 ]
33 response = client.invoke(messages)
34 return "CONNECTION_OK" in response.content
35 except Exception as e:
36 print(f"❌ Azure OpenAI connection failed: {e}")
37 return False
38
39
40SYSTEM_MESSAGE = SystemMessage(content="You are a helpful assistant.")
41
42
43def send_message(prompt: str, client: AzureChatOpenAI | None = None) -> str:
44 client = client or create_client()
45 messages = [SYSTEM_MESSAGE, HumanMessage(content=prompt)]
46 response = client.invoke(messages)
47 return response.content
48
49
50def interactive_chat():
51 client = create_client()
52 print("Type `exit` to quit.")
53 while True:
54 prompt = input("You: ").strip()
55 if not prompt or prompt.lower() in ("exit", "quit"):
56 break
57 reply = send_message(prompt, client)
58 print("Assistant:", reply)
59
60
61def main():
62 ok = test_connection()
63 print("✅ CONNECTION_OK" if ok else "❌ connection failed")
64
65 reply = send_message("Give me a 2-line summary of the Python logging module.")
66 print("Assistant:", reply)
67
68 interactive_chat()
69
70
71if __name__ == "__main__":
72 main()
Agent
Create a new Python file named create_agent.py. This file will demonstrate how a tool is triggered within an agent.
1from azure.identity import DefaultAzureCredential, get_bearer_token_provider
2from langchain.agents import create_agent
3from langchain_openai import AzureChatOpenAI
4
5
6def get_weather(city: str) -> str:
7 """Get weather for a given city."""
8 return f"It's always sunny in {city}!"
9
10
11def create_model(model_name: str = "gpt-4o-mini") -> AzureChatOpenAI:
12 """Create a LangChain Azure OpenAI chat model using Azure AD auth."""
13 token_provider = get_bearer_token_provider(
14 DefaultAzureCredential(exclude_powershell_credential=True),
15 "https://cognitiveservices.azure.com/.default",
16 )
17
18 return AzureChatOpenAI(
19 model=model_name,
20 azure_endpoint="https://testazureopenairesource11.openai.azure.com/",
21 azure_ad_token_provider=token_provider,
22 api_version="2024-12-01-preview",
23 )
24
25
26def run_agent(user_query: str) -> str:
27 model = create_model()
28 agent = create_agent(
29 model=model,
30 tools=[get_weather],
31 system_prompt="You are a helpful assistant.",
32 )
33
34 result = agent.invoke(
35 {"messages": [{"role": "user", "content": user_query}]}
36 )
37 return result["messages"][-1].content
38
39
40if __name__ == "__main__":
41 prompt = "What is the weather in San Francisco?"
42 print(f"User: {prompt}")
43 print(f"Assistant: {run_agent(prompt)}")
Written by Binwei@Shanghai