a b/local_llm.py
1
import requests
2
import configparser
3
4
URL = "http://127.0.0.1:5000/v1/chat/completions"
5
6
HEADERS = {
7
    "Content-Type": "application/json"
8
}
9
10
HISTORY = []
11
12
def local_llm_call(prompt):
13
    HISTORY.append({"role": "user", "content": prompt})
14
    data = {
15
        "mode": "chat",
16
        "character": "Example",
17
        "messages": HISTORY
18
    }
19
    # Call local API for fallacy detection or other tasks
20
    response = requests.post(URL, headers=HEADERS, json=data, verify=False)
21
    assistant_message = response.json()['choices'][0]['message']['content']
22
    HISTORY.append({"role": "assistant", "content": assistant_message})
23
    print(assistant_message)
24
    return assistant_message
25
26
if __name__ == '__main__':
27
    prompt = "In order to make homemade bread, follow these steps:\n1)"
28
    local_llm_call(prompt)