Experimenting with using a language model to improve the input prompt, then use that output as the actual prompt for the model, then returning the result. It’s a bit of a play on the “critique” approach. Some of the outputs were interesting but I need a better way to evaluate the results.

import sys
import openai

MODEL = "gpt-3.5-turbo-16k"

IMPROVER_PROMPT = """
You are an expert prompt writer for a language model. Please convert the user's message into an effective prompt that will be sent to a language model to produce a helpful and useful response.

Output the improved prompt only.
"""

def generate_improved_prompt(prompt: str) -> str:
    completion = openai.ChatCompletion.create(
        model=MODEL,
        temperature=1.0,
        messages=[
            {
                "role": "system",
                "content": IMPROVER_PROMPT,
            },
            {
                "role": "user",
                "content": prompt,
            },
        ],
    )
    return completion.choices[0].message.content

def generate_completion(prompt: str) -> dict:
    completion = openai.ChatCompletion.create(
        model=MODEL,
        temperature=1.0,
        messages=[
            {
                "role": "user",
                "content": prompt,
            },
        ],
    )
    return completion.choices[0].message.content

def main():
    prompt = ' '.join(sys.argv[1:])
    standard_result = generate_completion(prompt)
    print("Standard completion:")
    print(standard_result)
    improved_prompt = generate_improved_prompt(prompt)
    print("\nImproved prompt:")
    print(improved_prompt)
    improved_result = generate_completion(improved_prompt)
    print("Improved completion:")
    print(improved_result)
    return improved_result

if __name__ == "__main__":
    main()