How do I get ChatGPT responses to highlight the code in the terminal?
In a previous post, I played around with ChatGPT to query PDFs stored in Google Drive.
I use the Things app as my task manager, and sometimes play around with the AppleScript to move around things. No pun intended.
The makers of the app provide this handly PDF manual for all things applescript + things. I wanted to see if I could get ChatGPT to query this, and help answer questions like - "how do I add a new todo, how do I add a todo to a specific project" etc.
I was able to do that beautifully.
But now, I wanted to see if I could get the code returned to be highlighted.
So, I edited the code to this -
Basically, this was two steps -
- Have ChatGPT return the code wrapped in backticks
- Have Python recognize the backticks, and use the termcolor library to highlight the code.
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
from langchain.document_loaders import GoogleDriveLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.prompts import PromptTemplate
import re
from termcolor import colored
folder_id = "YOUR_GOOGLE_DRIVE_FOLDER_ID"
loader = GoogleDriveLoader(
folder_id=folder_id,
recursive=False
)
docs = loader.load()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=4000, chunk_overlap=0, separators=[" ", ",", "\n"]
)
texts = text_splitter.split_documents(docs)
embeddings = OpenAIEmbeddings()
db = Chroma.from_documents(texts, embeddings)
retriever = db.as_retriever()
prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Recognize any applscript in the output, and include the code in backticks along with the programming language to enable syntax highlighting.
{context}
Question: {question}
Answer:"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")
# qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever)
chain_type_kwargs = {"prompt": PROMPT}
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, chain_type_kwargs=chain_type_kwargs)
def highlight_code_blocks(text):
pattern = r'```([^`]+)```'
colored_text = text
def highlight_keywords(match):
code_block = match.group(1)
keywords = ['tell', 'end', 'set', 'to', 'new', 'with', 'properties']
for keyword in keywords:
code_block = re.sub(r'\b{}\b'.format(keyword), colored(keyword, 'red'), code_block)
return "```" + code_block + "```"
colored_text = re.sub(pattern, highlight_keywords, colored_text)
return colored_text
while True:
query = input("> ")
answer = qa.run(query)
highlighted_answer = highlight_code_blocks(answer)
print(highlighted_answer)