I’m working on a program that automatically generates elementary functions in Python according to a user prompt in the form of a .txt file. This user prompt also specifies how many code samples should be generated and that’s the part I’m having trouble with.
I want it to generate all the code samples in a single invocation, which should be possible according to this:
https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#request-body
And this: https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.azure.AzureChatOpenAI.html#langchain_openai.chat_models.azure.AzureChatOpenAI.n
I’ve tried several approaches to try and get multiple completions (specified at the bottom of the code snippet) and I even tried to hard-code n=3 in the initial declaration of the model, but I keep getting a single response.
I would appreciate any help with this.
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
from langchain_openai import AzureChatOpenAI
import os
import re
def extract_vars(file_path):
# Implemented using regex so the implementation is not relevant.
# Tested it and the output is as expected.
return {
"function_name": function_name,
"function_signature": function_signature,
"description": description,
"examples": examples,
"temperatures": temperatures,
"num_samples": num_samples,
"unit_tests": unit_tests
}
# The underlying LLM model
model = AzureChatOpenAI(
azure_deployment='gpt-35-16k',
api_key='MY_KEY',
azure_endpoint='MY_ENDPOINT',
api_version='2023-12-01-preview',
max_tokens=1500,
)
# Output parser to convert the model's output to a string
parser = StrOutputParser()
# The prompt templates for the system and human messages
system_message_prompt = SystemMessagePromptTemplate.from_template(
"You are an expert Python coder."
"You are asked to implement a function."
"You will be given the function signature, description and 0-3 examples."
"You need to provide the implementation of the function."
)
human_message_prompt = HumanMessagePromptTemplate.from_template("n".join(
[
"signature: {function_signature};",
"description: {description}.",
"{examples}nn",
"LLM Response: n"
]
))
# The chat prompt template
messages = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
# Used to pipline the invocation process (instance of RunnableSequence)
chain = messages | model | parser
def invoke_LLM(function_signature, description, examples, temperature, num_samples):
"""Invokes the LLM model with the given prompt information.
Uses the function signature, description and examples to invoke the LLM
model and set it to the specified temperature and generate the specified
number of implementations. The implementations are then returned.
"""
input_dict = {
"function_signature": function_signature,
"description": description,
"examples": "n".join([f"example: {example};" for example in examples])
}
config_dict = {
"temperature": temperature,
"n": num_samples
}
# Approach 1
return chain.invoke(input=input_dict, config=config_dict)
# Approach 2
return chain.with_config(**config_dict).invoke(input=input_dict)
# Approach 3
return chain.with_config(temperature=temperature, n=num_samples).invoke(input=input_dict)
# Approach 4
model.config["temperature"] = temperature
model.config["n"] = num_samples
return chain.invoke(input=input_dict)
Example for testing:
function_signature = "sqrt_list(l:list) -> float"
description = "A function named sqrt_list that takes a list of integers and returns the square root of the sum of that list, up to two decimal places."
examples: "Example: sqrt_list([1,7,8]) -> 4.0;"
temperature: 0.8;
num_samples = 3;
res = invoke_LLM(function_signature, description, examples, temperature, num_samples)
print (res)
# Expecting a list of 3 strings where each string is an implementation