『VRM_AI』用のOpenAI_API.exeのヒントレシピ(自分で拡張したい人向け)

上記の記事でご紹介した3Dモデル姿のAIとお喋りできるソフト『VRM_AI』ですが、こちらのソフトはGUIの部分はUnity C#、AIとの連携部分はPythonで構成されています。簡単に説明すると
見た目の部分:"D:\desktop\VRM_AI_v0.3\VRM_AI.exe"
中身の部分:"D:\desktop\VRM_AI_v0.3\OpenAI_API\OpenAI_API.exe"
といった役割をそれぞれ持っています。

今回はその"D:\desktop\VRM_AI_v0.3\OpenAI_API\OpenAI_API.exe"のレシピを公開します。
ぶっちゃけ普通にVRM_AIを使いたいだけならこの記事は読む必要がありませんが、VRM_AIをベースにAITuberを作りたい人は自身でスクリプトを書く必要があるのでその一助になればと思います。というわけで早速レシピ公開です。

OpenAI_API.py

from flask import Flask, request
import openai
import ChatGPT
import json
import os
import configparser
import atexit
import sys
import pathlib

app = Flask("__init__")

@app.route("/", methods=['POST'])
def post():
	text = request.form['inputtext'] 
	dpath = os.path.dirname(sys.argv[0])
	parentspath = pathlib.Path(sys.argv[0]).resolve().parents[1]
	dpath = os.path.dirname(sys.argv[0])
	CONFIG_PATH = str(parentspath) + '/config.ini'
	CONFIG_PARSER = configparser.ConfigParser()
	CONFIG_PARSER.read(CONFIG_PATH,'utf-8')
	CONFIG = CONFIG_PARSER['OpenAI_API']

	if text == "voice_to_text":
		wav_path = dpath + '/wav/input.wav'
		API_Key = CONFIG['API_Key']
		openai.api_key = API_Key
		audio_file= open(wav_path, "rb")
		transcript = openai.Audio.transcribe("whisper-1", audio_file)
		res  = transcript["text"]
		res = res.encode('utf-8')
		return res
	else:
		CharactorName = CONFIG['CharactorName']
		log_path = dpath + '/log/log.json'
		setting_path = str(parentspath) + '/Charactor_settings.txt'
		inputtext = CharactorName + "." + text
		is_file = os.path.isfile(log_path)
	
		if is_file:
			with open(setting_path, 'r', encoding='utf-8') as f:
				system_settings = f.readlines()
			system_settings = ''.join(system_settings) 
			f.close()

			log_open = open(log_path,"r",encoding="utf-8")
			log =json.load(log_open)
			log_open.close()
			res =  ChatGPT.completion(inputtext, system_settings, log)
		else:
			with open(setting_path, 'r', encoding='utf-8') as f:
				system_settings = f.readlines()
			system_settings = ''.join(system_settings) 
			f.close()
			res =  ChatGPT.completion(inputtext, system_settings, [])
		return res

parentspath = pathlib.Path(sys.argv[0]).resolve().parents[1]
dpath = os.path.dirname(sys.argv[0])
CONFIG_PATH = str(parentspath) + '/config.ini'
CONFIG_PARSER = configparser.ConfigParser()
CONFIG_PARSER.read(CONFIG_PATH,'utf-8')
CONFIG = CONFIG_PARSER['OpenAI_API']
host = CONFIG['host']
port = CONFIG['port']
log_path = dpath + '/log/log.json'
is_file = os.path.isfile(log_path)
if is_file:
	os.remove(log_path) 

app.run(host, port)

ChatGPT.py

import openai
import json
import sys
import os
import configparser
import pathlib

def completion(new_message_text:str, settings_text:str = '', past_messages:list = []):
    
    dpath = os.path.dirname(sys.argv[0])
    parentspath = pathlib.Path(sys.argv[0]).resolve().parents[1]
    dpath = os.path.dirname(sys.argv[0])
    CONFIG_PATH = str(parentspath) + '/config.ini'
    CONFIG_PARSER = configparser.ConfigParser()
    CONFIG_PARSER.read(CONFIG_PATH,'utf-8')
    CONFIG = CONFIG_PARSER['OpenAI_API']
    API_Key = CONFIG['API_Key']
    log_path = dpath + '/log/log.json'
    CharactorName = CONFIG['CharactorName']

    openai.api_key = API_Key
    
    if len(past_messages) == 0 and len(settings_text) != 0:
        system = {"role": "system", "content": settings_text}
        past_messages.append(system)
    new_message = {"role": "user", "content": new_message_text}
    past_messages.append(new_message)

    with open(log_path, mode='w', encoding='utf-8') as file:
        json.dump(past_messages, file, ensure_ascii=False)

    result = openai.ChatCompletion.create(
        model="gpt-3.5-turbo",
        stop = ["]","。"],
        max_tokens = 80,
        messages=past_messages
    )

    response_message = {"role": "assistant", "content": result.choices[0].message.content}
    past_messages.append(response_message)
    response_message_text = result.choices[0].message.content
    response_message_text = response_message_text.replace("\n", "")
    response_message_text = response_message_text.replace("\"", "")
    response_message_text = response_message_text.replace(" ", "")
    response_message_text = response_message_text.replace("[", "")
    response_message_text = response_message_text.replace("]", "")
    response_message_text = response_message_text.replace("「", "")
    response_message_text = response_message_text.replace("」", "")
    response_message_text = response_message_text.replace("【", "")
    response_message_text = response_message_text.replace("】", "")
    response_message_text = response_message_text.replace(":", "")
    response_message_text = response_message_text.replace(":", "")
    response_message_text = response_message_text.replace("#", "")
    response_message_text = response_message_text.replace(CharactorName, "")

    res = response_message_text.split(',')
    if len(res) == 3:
        Emo = res[0]
        Emo_Weight = res[1]
        Message = res[2]
        if '悲' in Emo:
            Emo = "Sad"
        if '怒' in Emo:
            Emo = "Angry"
        if '驚' in Emo:
            Emo = "Surprised"
        if '幸' in Emo:
            Emo = "Happy"
        if '穏' in Emo:
            Emo = "Relaxed"
        response_message_text = Emo +","+ Emo_Weight +","+ Message

    print(response_message_text)

    return response_message_text

この二つのコードは

parentspath = pathlib.Path(sys.argv[0]).resolve().parents[1]
dpath = os.path.dirname(sys.argv[0])

の二行がミソになります。
ビルドする場合、config.ini等の外部ファイルのパス取得方法が変わります。
https://xn--kazunaka-1s4gofop3l.com/flask-pyinstaller/#outline__4

また、こちらのexeはPyInstallerでビルドしているのですがその時に使ったコマンドの紹介です。

python -m PyInstaller OpenAI_API.py --onefile --collect-data openai --collect-data aiohttp --collect-data multidict --collect-data attr --collect-data yarl --collect-data idna --collect-data async_timeout --collect-data http --collect-data charset_normalizer --collect-data aiosignal --collect-data frozenlist --collect-data requests --collect-data urllib3 --collect-data certifi --collect-data importlib

ビルド時に生成されるOpenAI_API.specは以下。
OpenAI_API.spec

# -*- mode: python ; coding: utf-8 -*-
from PyInstaller.utils.hooks import collect_data_files

datas = []
datas += collect_data_files('openai')
datas += collect_data_files('aiohttp')
datas += collect_data_files('multidict')
datas += collect_data_files('attr')
datas += collect_data_files('yarl')
datas += collect_data_files('idna')
datas += collect_data_files('async_timeout')
datas += collect_data_files('http')
datas += collect_data_files('charset_normalizer')
datas += collect_data_files('aiosignal')
datas += collect_data_files('frozenlist')
datas += collect_data_files('requests')
datas += collect_data_files('urllib3')
datas += collect_data_files('certifi')
datas += collect_data_files('importlib')


block_cipher = None


a = Analysis(
    ['OpenAI_API.py'],
    pathex=[],
    binaries=[],
    datas=datas,
    hiddenimports=[],
    hookspath=[],
    hooksconfig={},
    runtime_hooks=[],
    excludes=[],
    win_no_prefer_redirects=False,
    win_private_assemblies=False,
    cipher=block_cipher,
    noarchive=False,
)
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)

exe = EXE(
    pyz,
    a.scripts,
    a.binaries,
    a.zipfiles,
    a.datas,
    [],
    name='OpenAI_API',
    debug=False,
    bootloader_ignore_signals=False,
    strip=False,
    upx=True,
    upx_exclude=[],
    runtime_tmpdir=None,
    console=True,
    disable_windowed_traceback=False,
    argv_emulation=False,
    target_arch=None,
    codesign_identity=None,
    entitlements_file=None,
)

以上、解説も何もなしのレシピ公開でした・・・!

この記事が気に入ったらサポートをしてみませんか?