97 lines
2.6 KiB
C++
97 lines
2.6 KiB
C++
|
|
#include "config.h"
|
|
#include "jsondata.h"
|
|
#include "server.h"
|
|
#include "zapi.h"
|
|
#include <iostream>
|
|
#include <memory>
|
|
#include <string>
|
|
|
|
std::string get_key(const std::string& env_key)
|
|
{
|
|
char* v = getenv(env_key.c_str());
|
|
if (v) {
|
|
return std::string(v);
|
|
}
|
|
return "";
|
|
}
|
|
|
|
std::string show_api(const std::string& api)
|
|
{
|
|
if (api.empty()) {
|
|
return "NULL";
|
|
}
|
|
if (api.size() > 10) {
|
|
return api.substr(0, 10) + "...";
|
|
} else {
|
|
return "NULL";
|
|
}
|
|
}
|
|
|
|
int main(int argc, char* argv[])
|
|
{
|
|
std::cout << "Version: 0.9.0, config name: openai.ini" << std::endl;
|
|
if (argc < 2) {
|
|
std::cout << "note: you can give port to start." << std::endl;
|
|
return 0;
|
|
}
|
|
|
|
#ifdef _WIN32
|
|
system("chcp 65001");
|
|
#endif
|
|
|
|
int port = std::stoi(argv[1]);
|
|
|
|
Configuration config;
|
|
if (!ConfigSet::parse_config(config, "openai.ini")) {
|
|
std::cerr << "parse config failed." << std::endl;
|
|
return -1;
|
|
}
|
|
|
|
auto key = get_key(config.api_env_key);
|
|
std::cout << "api_key:" << config.api_env_key << std::endl;
|
|
std::cout << "assis_name:" << config.assistant_name << std::endl;
|
|
std::cout << "base_url:" << config.base_url << std::endl;
|
|
std::cout << "model_name:" << config.model_name << std::endl;
|
|
std::cout << "max_tokens:" << config.max_tokens << std::endl;
|
|
std::cout << "api:" << show_api(key) << std::endl;
|
|
|
|
if (show_api(key) == "NULL") {
|
|
std::cerr << "api is invalid." << std::endl;
|
|
return -1;
|
|
}
|
|
|
|
auto api = std::make_shared<COpenAI>();
|
|
api->set_base(config.base_url, key);
|
|
|
|
auto json = std::make_shared<CJsonOper>(config.user_name, config.model_name, config.assistant_name);
|
|
asio::io_context io_context;
|
|
Server server(io_context, port);
|
|
|
|
// 测试是否正常
|
|
// auto req_str = json->format_request("你好啊,介绍一下自己。");
|
|
// std::string out;
|
|
// if (!api->post(req_str, out)) {
|
|
// std::cout << "post error!" << std::endl;
|
|
// return -2;
|
|
// }
|
|
|
|
// std::cout << "post success!" << std::endl;
|
|
// auto re = json->parse(out);
|
|
// std::string use = "本次tokens消耗:" + std::to_string(re.prompt_tokens) + "+" + std::to_string(re.completion_tokens) + "="
|
|
// +
|
|
// std::to_string(re.total_tokens);
|
|
|
|
// std::cout << use << std::endl;
|
|
// std::cout << re.reasoning_content << std::endl;
|
|
// std::cout << re.message_content << std::endl;
|
|
// std::cout << "success." << std::endl;
|
|
|
|
server.set_worker(api, json);
|
|
server.set_token(config.max_tokens);
|
|
server.start();
|
|
io_context.run();
|
|
|
|
return 0;
|
|
}
|