Skip to content

Ai

get_completions(messages, stream=False, response_model=None, max_tokens=1000, temperature=0, top_p=1, seed=100, full_response=False, client=None)

Returns a response from the azure openai model.

Parameters:

Name Type Description Default
messages list
required
stream bool
False
response_model BaseModel
None
monitor
required
max_tokens int
1000
temperature int
0
top_p int
1
seed int
100
full_response bool
False
client
None

Returns:

Name Type Description
response str | BaseModel | None

str | BaseModel | None :

Source code in src/ml/ai.py
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
def get_completions(
    messages: list,
    stream: bool = False,
    response_model: BaseModel = None,  # Use Instructor library
    max_tokens: int = 1000,
    temperature: int = 0,
    top_p: int = 1,
    seed: int = 100,
    full_response: bool = False,
    client=None,
) -> str | BaseModel | None:
    """Returns a response from the azure openai model.

    Args:
        messages:
        stream:
        response_model:
        monitor:
        max_tokens:
        temperature:
        top_p:
        seed:
        full_response:
        client:

    Returns:
        response : str | BaseModel | None :
    """
    if not client:
        client = chat_client

    input_dict = {
        "model": chat_model_name,
        "messages": messages,
        "max_tokens": max_tokens,
        "temperature": temperature,
        "top_p": top_p,
        "seed": seed,
        "stream": stream,
    }
    if response_model:
        # if you use local models instead of openai models, the response_model feature may not work
        client = instructor.from_openai(chat_client, mode=instructor.Mode.JSON)
        input_dict["response_model"] = response_model

    if stream:
        raise NotImplementedError("Stream is not supported right now. Please set stream to False.")

    try:
        response = client.chat.completions.create(**input_dict)
    except Exception as e:
        logger.exception(f"Error in chat GPT: {e}")
        logger.error("chat GPT response: None")
        return None

    if full_response or response_model:
        return response
    else:
        return response.choices[0].message.content

get_rag_response(user_input)

Return the response after running RAG.

Parameters:

Name Type Description Default
user_input
required
settings
required
conversation_id
required

Returns:

Name Type Description
response
Source code in src/ml/ai.py
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
def get_rag_response(user_input):
    """Return the response after running RAG.

    Args:
        user_input:
        settings:
        conversation_id:

    Returns:
        response:

    """
    logger.info(f"Running RAG")

    context = get_related_document_ai_search(user_input)
    formatted_user_input = f"question :{user_input}, \n\n contexte : \n{context}."
    logger.info(f"RAG - final formatted prompt: {formatted_user_input}")

    response = get_completions(
        messages=[
            {
                "role": "system",
                "content": "Tu est un chatbot qui répond aux questions.",
            },
            {"role": "user", "content": formatted_user_input},
        ],
    )
    return response

run_azure_ai_search_indexer()

Run the azure ai search index.

Returns:

Name Type Description
res

response

Source code in src/ml/ai.py
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
def run_azure_ai_search_indexer():
    """Run the azure ai search index.

    Returns:
            res: response
    """
    headers = {
        "Content-Type": "application/json",
        "api-key": settings.AZURE_SEARCH_API_KEY,
    }
    params = {"api-version": "2024-07-01"}
    url = f"{settings.AZURE_SEARCH_SERVICE_ENDPOINT}/indexers('{settings.AZURE_SEARCH_INDEXER_NAME}')/run"

    res = requests.post(url=url, headers=headers, params=params)
    logger.debug(f"run_azure_ai_search_index response: {res.status_code}")
    return res