Merge pull request 'raliz' (#1) from raliz into master

Reviewed-on: #1
This commit is contained in:
zjt 2024-07-03 10:57:35 +08:00
commit 38c01b93d3
13 changed files with 35976 additions and 3648 deletions

View File

@ -5,7 +5,12 @@ const nextConfig = {
},
eslint: {
ignoreDuringBuilds: true,
}
},
webpack: (config) => {
config.resolve.fallback = { fs: false };
return config;
},
};
export default nextConfig;

1807
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -23,5 +23,10 @@
"postcss": "^8",
"tailwindcss": "^3.4.1",
"typescript": "^5"
},
"browser": {
"fs": false,
"os": false,
"path": false
}
}

View File

@ -2,6 +2,10 @@
@tailwind components;
@tailwind utilities;
* {
box-sizing: border-box;
}
:root {
--foreground-rgb: 0, 0, 0;
--background-start-rgb: 214, 219, 220;

View File

@ -1,16 +1,60 @@
"use client"
import dynamic from 'next/dynamic';
import { useEffect, useRef, useState } from 'react';
import '@/deps/live2d.min.js'
import useVoice2Txt from "@/hooks/useVoice2txt";
import useTxt2Voice from '@/hooks/useTxt2Voice';
import axios from 'axios';
import * as PIXI from 'pixi.js';
import { Live2DModel } from 'pixi-live2d-display/cubism2';
"use client";
import { useEffect, useRef, useState } from "react";
import "@/deps/cubism5.js";
import useVoice2Txt from "@/hooks/useVoice2txt";
import useTxt2Voice from "@/hooks/useTxt2Voice";
import axios from "axios";
import * as PIXI from "pixi.js";
import { Live2DModel } from "pixi-live2d-display/cubism4";
import { useSearchParams } from "next/navigation";
import useRequest from "@/hooks/use-openai-request";
import txt2Voice from "@/hooks/txt2VoiceAPI";
// fake list
const opList = [
{
id: 1,
title: "幸运转盘",
src: "http://picdown.jchysoft.com/uiIcon/xingyunzhuanpan.png",
},
{
id: 2,
title: "领券中心",
src: "http://picdown.jchysoft.com/uiIcon/lingjuanzhongxin.png",
}, // bjt错别字将错就错版
{
id: 3,
title: "0元抽",
src: "http://picdown.jchysoft.com/uiIcon/0yuanchou.png",
},
{
id: 4,
title: "欧宝赏",
src: "http://picdown.jchysoft.com/uiIcon/oubanshang.png",
}, // bjt错别字
];
//import "@/deps/live2dcubismcore.min.js"
export default function Home() {
const [useVoice, setUseVoice] = useState(false);
const query = useSearchParams();
const characterId = query.get("id");
const token = query.get("token");
localStorage.setItem("token", token || "");
const { complete, completion: data, isLoading, abort } = useRequest();
const voice2txt = (txt: string) => {
fetch("sharkapiBaseUrl/voice/txt2voice", {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `${token}`,
},
body: JSON.stringify({
txt: txt,
}),
})
}
function draggable(model: any) {
model.buttonMode = true;
model.on("pointerdown", (e: any) => {
@ -35,8 +79,6 @@ export default function Home() {
foreground.alpha = 0.2;
model.addChild(foreground);
checkbox("Model Frames", (checked: any) => (foreground.visible = checked));
}
function addHitAreaFrames(model: any) {
@ -46,11 +88,11 @@ export default function Home() {
model.addChild(hitAreaFrames);
checkbox("Hit Area Frames", (checked: any) => (hitAreaFrames.visible = checked));
} catch (err) {
}
checkbox(
"Hit Area Frames",
(checked: any) => (hitAreaFrames.visible = checked)
);
} catch (err) { }
}
function checkbox(name: any, onChange: any) {
@ -74,81 +116,48 @@ export default function Home() {
onChange(checkbox.checked);
}
const send = (inputText: string) => {
setResponse(inputText)
setResponse(inputText);
if (!inputText) return;
console.log(inputText)
let data = JSON.stringify({
"messages": [
{
"content": `回答用户的问题,尽可能简短。`,
"role": "system"
},
{
"content": inputText,
"role": "user"
}
],
"model": "deepseek-chat",
"frequency_penalty": 0,
"max_tokens": 2048,
"presence_penalty": 0,
"stop": null,
"stream": false,
"temperature": 1,
"top_p": 1
});
let config = {
method: 'post',
maxBodyLength: Infinity,
url: 'https://api.deepseek.com/chat/completions',
headers: {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': 'Bearer sk-dd24ae704e8d4939aeed8f050d04d36b'
},
data: data
complete(1, [], [{
content: inputText,
role: "user",
}]);
};
try {
axios(config)
.then((response) => {
console.log(`response`, response);
console.log(response.data);
typeof speak !== 'undefined' && speak(response.data.choices[0].message.content) || model!.motion('tap_body');;
setResponse(response.data.choices[0].message.content);
})
.catch((error) => {
setResponse(error!.toString());
console.log(error);
});
} catch (error) {
setResponse(error!.toString());
console.log(error);
useEffect(() => {
(async () => {
if (data) {
setResponse(data);
if (typeof speak !== "undefined" && isLoading === false) {
const base64Voice = "data:audio/mp3;base64," + await txt2Voice(data, 4);
const audio = document.createElement("audio");
audio.src = base64Voice;
audio.play();
} else {
model!.motion("tap_body");
}
}
})();
}
}, [data, isLoading]);
const { start, end, text, isListening, error } = useVoice2Txt({
lang: 'cmn-Hans-CN',
continuous: false
lang: "cmn-Hans-CN",
continuous: false,
});
const {
isSpeaking,
speak,
stop
} = useTxt2Voice();
const { isSpeaking, speak, stop } = useTxt2Voice();
const [inputText, setInputText] = useState("");
const isMouthOpen = useRef(false);
const [response, setResponse] = useState("");
const [response, setResponse] = useState("来和我聊天吧~");
useEffect(() => {
console.log(text, error)
console.log(text, error);
if (!text) return;
send(text);
if (error) {
setResponse(error);
return;
}
// 先叉了这里防止消息展示error
// if (error) {
// setResponse(error);
// return;
// }
}, [text, error]);
const [model, setModel] = useState<Live2DModel>();
useEffect(() => {
@ -158,21 +167,24 @@ export default function Home() {
}, [isSpeaking]);
useEffect(() => {
if (!isListening && !isSpeaking) { }
if (!isListening && !isSpeaking) {
}
}, [isListening]);
useEffect(() => {
// expose PIXI to window so that this plugin is able to
// reference window.PIXI.Ticker to automatically update Live2D models
//@ts-ignore
typeof window !== 'undefined' && (window.PIXI = PIXI);
typeof window !== "undefined" && (window.PIXI = PIXI);
(async function () {
const app = new PIXI.Application({
view: document.getElementById('canvas') as HTMLCanvasElement,
backgroundAlpha: 1
view: document.getElementById("canvas") as HTMLCanvasElement,
backgroundAlpha: 0,
});
const model = await Live2DModel.from('https://cdn.jsdelivr.net/gh/guansss/pixi-live2d-display/test/assets/shizuku/shizuku.model.json');
const model = await Live2DModel.from(
"https://cdn.jsdelivr.net/gh/guansss/pixi-live2d-display/test/assets/haru/haru_greeter_t03.model3.json"
);
app.stage.addChild(model);
const scaleX = (innerWidth * 0.4) / model.width;
@ -187,36 +199,117 @@ export default function Home() {
addFrame(model);
addHitAreaFrames(model);
setModel(model);
model.on('hit', (hitAreas) => {
if (hitAreas.includes('body')) {
model.motion('tap_body');
model.motion('speak')
model.on("hit", (hitAreas) => {
if (hitAreas.includes("body")) {
model.motion("tap_body");
model.motion("speak");
}
});
console.log("ok")
})();
}, [])
return (
}, []);
<main>
{
typeof window !== 'undefined'
&& typeof window.Live2D !== 'undefined'
&& (<div className='flex w-full flex-col h-full items-center justify-center relative'>
<canvas className='w-full h-full' id="canvas"></canvas>
<div className='absolute right-[20vw] top-4 bg-white rounded w-[20vw] h-auto text-sm text-black'>{response ? response : "请输入文字和我聊天吧"}</div>
<div id="control"></div>
<button onClick={start}></button>
<button onClick={end}></button>
<input className='text-black' value={inputText} onChange={(e) => {
return (
<main className="w-full h-full bg-blue-200">
{typeof window !== "undefined" &&
typeof window.Live2DCubismCore !== "undefined" && (
<div className="flex w-full flex-col h-full items-center justify-center relative text-white">
{/* live2d */}
<canvas className="w-full " id="canvas"></canvas>
{/* 旧叉 */}
{/* <div className="absolute right-[20vw] top-4 bg-orange-300 rounded w-[20vw] h-auto text-sm">
{response ? response : "请输入文字和我聊天吧"}
</div> */}
{/* 角色信息 */}
<div className="absolute left-[4vw] top-4 bg-orange-300 rounded-full w-[36vw] h-auto text-sm flex flex-nowrap px-2 py-1 gap-2">
<div className="w-10 h-10 rounded-full bg-white"></div>
<div className="text-xl"></div>
</div>
{/* 消息泡泡 */}
<div className="absolute left-1/2 -translate-x-1/2 top-[10vh] bg-black bg-opacity-30 rounded-2xl w-[60vw] h-auto p-2 whitespace-normal break-words">
{response ? response : "请输入文字和我聊天吧"}
</div>
{/* 选项 */}
<div className="absolute right-0 top-[20vh] w-[16vw] flex flex-col flex-nowrap items-center gap-10">
{opList.map((item) => (
<div
key={item.id}
className="bg-orange-300 rounded-l-full w-full h-auto text-sm flex flex-col flex-nowrap items-center pl-1 py-1"
>
<div className="w-6 h-6 overflow-hidden">
<img
// 这图片中图标和空白的比例太邪门了
className="w-full h-full scale-[2.5]"
src={item.src}
alt={item.title}
/>
</div>
<div className="text-xs">{item.title}</div>
</div>
))}
</div>
{/* 旧叉 */}
{/* <div id="control"></div> */}
{/* <button onClick={start}></button>
<button onClick={end}></button> */}
{/* 输入框 */}
<div className="absolute w-[90vw] h-10 bottom-2 left-1/2 -translate-x-1/2 bg-white rounded-xl flex flex-nowrap items-center justify-evenly gap-2 px-2 py-1">
<button
className="w-6 h-6 overflow-hidden"
onClick={() => {
// 切换语音/键盘
useVoice ? setUseVoice(false) : setUseVoice(true);
}}
>
<img
className="w-full h-full scale-[2.5]"
src={`http://picdown.jchysoft.com/uiIcon/${useVoice ? "jianpan" : "yuyin"
}.png`}
alt="输入方式"
/>
</button>
{useVoice ? (
<button
onMouseDown={start}
onMouseUp={end}
onTouchStart={start}
onTouchEnd={end}
className="h-full bg-gray-300 flex-1 text-center rounded active:bg-gray-500 focus:outline-none select-none"
>
</button>
) : (
<input
className="h-full text-black rounded flex-1 bg-gray-200 pl-2"
value={inputText}
onChange={(e) => {
setInputText(e.target.value);
console.log(e.target.value)
}}></input>
<button onClick={() => {
console.log(e.target.value);
}}
></input>
)}
<button
className="w-6 h-6 overflow-hidden"
onClick={() => {
send(inputText);
setInputText("");
}}></button>
</div>)
}
}}
>
<img
className="w-full h-full scale-[2.5]"
src="http://picdown.jchysoft.com/uiIcon/fasong.png"
alt="发送按钮"
/>
</button>
</div>
</div>
)}
</main>
);
}

View File

@ -17,7 +17,7 @@ export default function RootLayout({
<head>
{/* <script src="http://publicjs.supmiao.com/live2dcubismcore.min.js"></script> */}
</head>
<body>{children}</body>
<body className="w-screen h-screen">{children}</body>
</html>
);
}

12
src/consts/consts.ts Normal file
View File

@ -0,0 +1,12 @@
const apiBaseUrl = 'https://sharkai.data.vaala.tech/v1';
const imBaseUrl = 'http://localhost:3000/';
const chatBaseUrl = 'wss://chatserver.data.vaala.tech/chat/completions';
const ossUrl = 'http://localhost:3000/oss/';
const GlobalData = {
apiBaseUrl,
imBaseUrl,
chatBaseUrl,
ossUrl,
}
export default GlobalData;

17882
src/deps/cubism5.js Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

22
src/hooks/txt2VoiceAPI.ts Normal file
View File

@ -0,0 +1,22 @@
import GlobalData from "@/consts/consts";
export default async function txt2Voice(txt: string, person?: number) {
const token = localStorage.getItem('token');
const url = GlobalData.apiBaseUrl + '/voice/txt2voice';
// 发送POST请求
const res = await fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': token || ""
},
body: JSON.stringify({
txt: txt,
person: person
})
});
const data = await res.json();
console.log(data)
return data.body.voiceInBase64 as string;
}

View File

@ -0,0 +1,109 @@
import { useEffect, useRef, useState } from "react";
import GlobalData from "../consts/consts";
type FailedReason = string
type AIRequest = () => {
isLoading: boolean;
completion: string | null;
error: string | null;
complete: (promptId: number, args: string[], messages?: any[]) => void;
abort: () => void;
}
const useRequest: AIRequest = () => {
// 请求返回的数据
const completion = useRef<string>("");
const [data, setData] = useState<string>("");
// 请求返回的错误信息
const [error, setError] = useState<string | null>(null);
// 请求的loading 状态
const [isLoading, setLoading] = useState(false);
const openai = useRef<WebSocket | null>(null)
const abort = () => {
if (openai.current) {
openai.current.close();
openai.current = null;
setLoading(false);
}
}
async function complete(promptId: number, args: string[], messages?: any[]) {
const token = localStorage.getItem("token");
try {
setError("");
setData("");
completion.current = "";
if (messages) {
openai.current = new WebSocket(`${GlobalData.chatBaseUrl}?token=${token}&X-AI-Provider=sharkplus`);
openai.current.onopen = () => {
console.log('连接成功');
if (openai.current) {
openai.current.onmessage = (e) => {
if (e.data.toString().trim() != '"[DONE]"') {
const data = JSON.parse(e.data);
if (data.finish_reason) {
setLoading(false);
openai.current.close();
openai.current = null;
} else {
completion.current += data.choices[0]?.delta?.content || "";
setData(completion.current);
}
} else {
setLoading(false);
openai.current.close();
openai.current = null;
}
}
openai.current.send(JSON.stringify({
model: 'deepseek-chat-sharkplus',
stream: true,
temperature: 0.7,
messages: messages,
}))
}
}
} else {
openai.current = new WebSocket(`${GlobalData.chatBaseUrl}?token=${token}&X-AI-Provider=sharkplus`);
openai.current.onopen = () => {
console.log('连接成功');
if (openai.current) {
openai.current.onmessage = (e) => {
if (e.data.toString().trim() != '"[DONE]"') {
const data = JSON.parse(e.data);
if (data.finish_reason) {
setLoading(false);
openai.current.close();
openai.current = null;
} else {
completion.current += data.choices[0]?.delta?.content || "";
setData(completion.current);
}
} else {
setLoading(false);
openai.current.close();
openai.current = null;
}
}
openai.current.send(JSON.stringify({
model: 'deepseek-chat-sharkplus',
stream: true,
temperature: 0.7,
messages: [],
promptTemplateID: promptId,
args: args,
}))
}
}
}
setLoading(true);
} catch (err) {
setError(JSON.stringify(err));
}
}
return { complete, completion: data, error, isLoading, abort };
}
export default useRequest;

View File

@ -1,4 +1,4 @@
import { useEffect, useState } from "react";
import { useEffect, useState, useMemo } from "react";
type Options = {
lang?: 'cmn-Hans-CN' | 'en-US' | 'ja-JP',
continuous?: boolean,
@ -22,7 +22,8 @@ function useVoice2Txt(options: Options): Voice2Txt {
const [isListening, setIsListening] = useState(false);
const [error, setError] = useState<string | null>(null);
//@ts-ignore
const recognition = new webkitSpeechRecognition() || new SpeechRecognition();
const recognition = useMemo(() => new webkitSpeechRecognition() || new SpeechRecognition(), []);
useEffect(() => {
for (let key in options) {
recognition[key] = options[key];
}
@ -31,6 +32,19 @@ function useVoice2Txt(options: Options): Voice2Txt {
} else {
console.log(recognition);
}
recognition.onresult = function (event) {
setIsListening(false);
setText(event.results[0][0].transcript)
console.log("转换完成", event)
console.log(event.results[0][0].transcript)
}
//@ts-ignore
recognition.onerror = (e) => {
setError(e)
}
}, [])
function start() {
if (isListening) return;
setIsListening(true);
@ -48,17 +62,6 @@ function useVoice2Txt(options: Options): Voice2Txt {
}
//@ts-ignore
// 当调用recognition的stop的时候会触发此对象的onresult事件然后我们在这里获取我们的转换结果。
recognition.onresult = function (event) {
setIsListening(false);
setText(event.results[0][0].transcript)
console.log("转换完成", event)
console.log(event.results[0][0].transcript)
}
//@ts-ignore
recognition.onerror = (e) => {
setError(e)
}
return { text, start, end, isListening, error }
}

514
yarn.lock

File diff suppressed because it is too large Load Diff