File size: 2,261 Bytes
210a49b
 
 
798fa73
210a49b
 
 
 
798fa73
210a49b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
798fa73
210a49b
 
 
 
 
 
798fa73
210a49b
 
 
 
 
 
 
798fa73
210a49b
 
 
 
798fa73
210a49b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
_target_: flow_modules.aiflows.ChatWithDemonstrationsFlowModule.ChatWithDemonstrationsFlow.instantiate_from_default_config
name: "SimpleQA_Flow_with_Demonstrations"
description: "A sequential flow that answers questions with demonstrations"

input_interface: # Connector between the "input data" and the Flow
  - "questions"
output_interface: # Connector between the Flow's output and the caller
  - "answer"

subflows_config:
  demonstration_flow:
    name: "proxy of DemonstrationsAtomicFlow"
    description: "A flow that answers questions with demonstrations"
    data: 
      - query_data:
          query: "What is the capital of Turkey?"
        response_data:
          response: "Istambul, my sir."
      - query_data:
          query: "what is the capital of Germany?"
        response_data:
          response: "Berlin, my sir."
    query_prompt_template:
      template: |2-
        Answer the following question: {{query}}
      input_variables:
        - "query"
    response_prompt_template:
      template: |2-
        {{response}}
      input_variables:
        - response

  chat_flow:
    name: "proxy SimpleQA_Flow"
    description: "A flow that answers questions"
    # ~~~ Input interface specification ~~~
    input_interface_non_initialized:
      - "question"

    # ~~~ backend model parameters ~~
    backend:
      _target_: aiflows.backends.llm_lite.LiteLLMBackend
      api_infos: ???
      model_name:
        openai: "gpt-3.5-turbo"
        azure: "azure/gpt-4"

      # ~~~ generation_parameters ~~
      n: 1
      max_tokens: 3000
      temperature: 0.3

      top_p: 0.2
      frequency_penalty: 0
      presence_penalty: 0

    n_api_retries: 6
    wait_time_between_retries: 20

    # ~~~ Prompt specification ~~~
    system_message_prompt_template:
      _target_: aiflows.prompt_template.JinjaPrompt
      template: |2-
        You are a helpful chatbot that truthfully answers questions. Answer in a similar way to your previous replies.
      input_variables: []
      partial_variables: {}

    init_human_message_prompt_template:
      _target_: aiflows.prompt_template.JinjaPrompt
      template: |2-
        Answer the following question: {{question}}
      input_variables: ["question"]
      partial_variables: {}