File size: 3,857 Bytes
1beb8a2
 
40a0d94
 
 
 
93c74cc
 
5fbaf2a
40a0d94
93c74cc
 
1beb8a2
df437d3
5fbaf2a
 
 
1beb8a2
 
40a0d94
 
 
 
 
 
1beb8a2
40a0d94
1beb8a2
40a0d94
 
1beb8a2
40a0d94
 
 
1beb8a2
 
5fbaf2a
1beb8a2
 
5fbaf2a
1beb8a2
40a0d94
 
 
1beb8a2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5eb5cb9
 
df437d3
34a9244
 
 
 
5eb5cb9
df437d3
34a9244
 
 
 
40a0d94
df437d3
5eb5cb9
 
 
 
 
1beb8a2
5eb5cb9
40a0d94
1beb8a2
df437d3
5fbaf2a
5eb5cb9
 
 
 
 
 
 
1beb8a2
 
 
 
 
 
 
 
 
5fbaf2a
1beb8a2
5fbaf2a
1beb8a2
 
a8e421a
1beb8a2
 
a8e421a
5fbaf2a
1beb8a2
 
 
5fbaf2a
1beb8a2
 
 
 
 
 
 
 
 
 
 
 
 
df437d3
1beb8a2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
"""A simple script to run a Flow that can be used for development and debugging."""

import os

import hydra

import aiflows
from aiflows.backends.api_info import ApiInfo
from aiflows.utils.general_helpers import read_yaml_file, quick_load_api_keys

from aiflows import logging
from aiflows.flow_cache import CACHING_PARAMETERS, clear_cache

from aiflows.utils import serving
from aiflows.workers import run_dispatch_worker_thread
from aiflows.messages import FlowMessage
from aiflows.interfaces import KeyInterface
from aiflows.utils.colink_utils import start_colink_server
from aiflows.workers import run_dispatch_worker_thread

CACHING_PARAMETERS.do_caching = False  # Set to True in order to disable caching
# clear_cache() # Uncomment this line to clear the cache

logging.set_verbosity_debug()


dependencies = [
    {"url": "aiflows/ControllerExecutorFlowModule", "revision": os.getcwd()}
]

from aiflows import flow_verse
flow_verse.sync_dependencies(dependencies)
if __name__ == "__main__":
    
    #1. ~~~~~ Set up a colink server ~~~~
    FLOW_MODULES_PATH = "./"
    
    cl = start_colink_server()


    #2. ~~~~~Load flow config~~~~~~
    root_dir = "."
    cfg_path = os.path.join(root_dir, "demo.yaml")
    cfg = read_yaml_file(cfg_path)
    
    #2.1 ~~~ Set the API information ~~~
    # OpenAI backend
    api_information = [ApiInfo(backend_used="openai",
                              api_key = os.getenv("OPENAI_API_KEY"))]
    # # Azure backend
    # api_information = ApiInfo(backend_used = "azure",
    #                           api_base = os.getenv("AZURE_API_BASE"),
    #                           api_key = os.getenv("AZURE_OPENAI_KEY"),
    #                           api_version =  os.getenv("AZURE_API_VERSION") )
    
    
    quick_load_api_keys(cfg, api_information, key="api_infos")

    
    #3. ~~~~ Serve The Flow ~~~~
    
    
    # serving.recursive_serve_flow(
    #     cl = cl,
    #     flow_class_name="flow_modules.aiflows.ControllerExecutorFlowModule.WikiSearchAtomicFlow",
    #     flow_endpoint="WikiSearchAtomicFlow",
    # )
    
    # serving.serve_flow(
    #     cl = cl,
    #     flow_class_name="flow_modules.aiflows.ControllerExecutorFlowModule.ControllerAtomicFlow",
    #     flow_endpoint="ControllerAtomicFlow",
    # )
    
    serving.recursive_serve_flow(
        cl = cl,
        flow_class_name="flow_modules.aiflows.ControllerExecutorFlowModule.ControllerExecutorFlow",
        flow_endpoint="ControllerExecutorFlow",
    )

    #4. ~~~~~Start A Worker Thread~~~~~
    run_dispatch_worker_thread(cl)

    #5. ~~~~~Mount the flow and get its proxy~~~~~~
    proxy_flow= serving.get_flow_instance(
        cl=cl,
        flow_endpoint="ControllerExecutorFlow",
        user_id="local",
        config_overrides = cfg
    )
    
 
     
    
    #6. ~~~ Get the data ~~~
    data = {
        "id": 0,
        "goal": "Answer the following question: What is the profession and date of birth of Michael Jordan?",
    }
   
    
    #option1: use the FlowMessage class
    input_message = FlowMessage(
        data=data,
    )

    #option2: use the proxy_flow
    #input_message = proxy_flow.package_input_message(data = data)
    
    #7. ~~~ Run inference ~~~
    future = proxy_flow.get_reply_future(input_message)
    
    #uncomment this line if you would like to get the full message back
    #reply_message = future.get_message()
    reply_data = future.get_data()
    
    # ~~~ Print the output ~~~
    print("~~~~~~Reply~~~~~~")
    print(reply_data)
    
    
    #8. ~~~~ (Optional) apply output interface on reply ~~~~
    # output_interface = KeyInterface(
    #     keys_to_rename={"api_output": "answer"},
    # )
    # print("Output: ", output_interface(reply_data))
    
    
    #9. ~~~~~Optional: Unserve Flow~~~~~~
    # serving.delete_served_flow(cl, "FlowModule")