import os
from langchain_community.llms import DeepInfra
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
os.environ["DEEPINFRA_API_TOKEN"] = "<your DeepInfra API token>"
llm = DeepInfra(model_id="deepseek-ai/DeepSeek-V3")
llm.model_kwargs = {
"temperature": 0.7,
"repetition_penalty": 1.2,
"max_new_tokens": 250,
"top_p": 0.9,
}
# Basic inference
print(llm.invoke("Who let the dogs out?"))
# Streaming inference
for chunk in llm.stream("Who let the dogs out?"):
print(chunk)
# Chain with prompt template
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = prompt | llm
print(llm_chain.invoke("Can penguins reach the North pole?"))