Skip to content

Ai

get_completions(messages, stream=False, response_model=None, max_tokens=1000, temperature=0, top_p=1, seed=100, full_response=False, client=None)

Returns a response from the azure openai model.

Parameters:

Name Type Description Default
messages list
required
stream bool
False
response_model BaseModel
None
monitor
required
max_tokens int
1000
temperature int
0
top_p int
1
seed int
100
full_response bool
False
client
None

Returns:

Name Type Description
response str | BaseModel | None

str | BaseModel | None :

Source code in src/ml/ai.py
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
def get_completions(
    messages: list,
    stream: bool = False,
    response_model: BaseModel = None,  # Use Instructor library
    max_tokens: int = 1000,
    temperature: int = 0,
    top_p: int = 1,
    seed: int = 100,
    full_response: bool = False,
    client=None,
) -> str | BaseModel | None:
    """Returns a response from the azure openai model.

    Args:
        messages:
        stream:
        response_model:
        monitor:
        max_tokens:
        temperature:
        top_p:
        seed:
        full_response:
        client:

    Returns:
        response : str | BaseModel | None :
    """
    input_dict = {
        "model": "aasasa",
        "messages": messages,
        "max_tokens": max_tokens,
        "temperature": temperature,
        "top_p": top_p,
        "seed": seed,
        "stream": stream,
    }
    # if response_model:
    #     # if you use local models instead of openai models, the response_model feature may not work
    #     client = instructor.from_openai(chat_client, mode=instructor.Mode.JSON)
    #     input_dict["response_model"] = response_model

    if stream:
        raise NotImplementedError("Stream is not supported right now. Please set stream to False.")

get_rag_response(user_input)

Return the response after running RAG.

Parameters:

Name Type Description Default
user_input
required
settings
required
conversation_id
required

Returns:

Name Type Description
response
Source code in src/ml/ai.py
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
def get_rag_response(user_input):
    """Return the response after running RAG.

    Args:
        user_input:
        settings:
        conversation_id:

    Returns:
        response:

    """
    logger.info(f"Running RAG")

    context = get_related_document_ai_search(user_input)
    formatted_user_input = f"question :{user_input}, \n\n contexte : \n{context}."
    logger.info(f"RAG - final formatted prompt: {formatted_user_input}")

    response = get_completions(
        messages=[
            {
                "role": "system",
                "content": "Tu est un chatbot qui répond aux questions.",
            },
            {"role": "user", "content": formatted_user_input},
        ],
    )
    return response

run_azure_ai_search_indexer()

Run the azure ai search index.

Returns:

Name Type Description
res

response

Source code in src/ml/ai.py
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
def run_azure_ai_search_indexer():
    """Run the azure ai search index.

    Returns:
            res: response
    """
    headers = {
        "Content-Type": "application/json",
        "api-key": settings.AZURE_SEARCH_API_KEY,
    }
    params = {"api-version": "2024-07-01"}
    url = f"{settings.AZURE_SEARCH_SERVICE_ENDPOINT}/indexers('{settings.AZURE_SEARCH_INDEXER_NAME}')/run"

    res = requests.post(url=url, headers=headers, params=params)
    logger.debug(f"run_azure_ai_search_index response: {res.status_code}")
    return res