Comments (2)
Never supported. You can use proxy tools such as WireGuard or Clash etc.
from freegpt35.
thank you very much!I've added the proxy and it worked. Thanks you
const express = require("express");
const bodyParser = require("body-parser");
const axios = require("axios");
const https = require("https");
const { encode } = require("gpt-3-encoder");
const { randomUUID, randomInt, createHash } = require("crypto");
const { config } = require("dotenv");
//add proxy
const HttpsProxyAgent = require('https-proxy-agent');
const HttpProxyAgent = require('http-proxy-agent');
config();
// Constants for the server and API configuration
//const port = process.env.SERVER_PORT || 3040;
const port = process.env.SERVER_PORT || 3004;
const baseUrl = "https://chat.openai.com";
const apiUrl = ${baseUrl}/backend-api/conversation
;
const refreshInterval = 60000; // Interval to refresh token in ms
const errorWait = 120000; // Wait time in ms after an error
const newSessionRetries = parseInt(process.env.NEW_SESSION_RETRIES) || 5;
const userAgent = process.env.USER_AGENT || "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36";
// Initialize global variables to store the session token and device ID
let token;
let oaiDeviceId;
// Function to wait for a specified duration
const wait = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
function GenerateCompletionId(prefix = "cmpl-") {
const characters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
const length = 28;
for (let i = 0; i < length; i++) {
prefix += characters.charAt(Math.floor(Math.random() * characters.length));
}
return prefix;
}
async function* chunksToLines(chunksAsync) {
let previous = "";
for await (const chunk of chunksAsync) {
const bufferChunk = Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk);
previous += bufferChunk;
let eolIndex;
while ((eolIndex = previous.indexOf("\n")) >= 0) {
// line includes the EOL
const line = previous.slice(0, eolIndex + 1).trimEnd();
if (line === "data: [DONE]") break;
if (line.startsWith("data: ")) yield line;
previous = previous.slice(eolIndex + 1);
}
}
}
async function* linesToMessages(linesAsync) {
for await (const line of linesAsync) {
const message = line.substring("data :".length);
yield message;
}
}
async function* StreamCompletion(data) {
yield* linesToMessages(chunksToLines(data));
}
//add proxy
// Create proxy agents for HTTP and HTTPS protocols
const httpProxy = process.env.HTTP_PROXY || 'http://127.0.0.1:10811';
const httpsProxy = process.env.HTTPS_PROXY || 'http://127.0.0.1:10811';
const httpProxyAgent = new HttpProxyAgent(httpProxy);
const httpsProxyAgent = new HttpsProxyAgent(httpsProxy);
// Setup axios instance for API requests with predefined configurations
const axiosInstance = axios.create({
//httpsAgent: new https.Agent({ rejectUnauthorized: false }),
httpsAgent: new https.Agent({
rejectUnauthorized: false,
proxy: httpsProxyAgent,
}),
httpAgent: httpProxyAgent,
headers: {
accept: "/",
"accept-language": "en-US,en;q=0.9",
"cache-control": "no-cache",
"content-type": "application/json",
"oai-language": "en-US",
origin: baseUrl,
pragma: "no-cache",
referer: baseUrl,
"sec-ch-ua": '"Google Chrome";v="123", "Not:A-Brand";v="8", "Chromium";v="123"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Windows"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": userAgent,
},
});
function generateProofToken(seed, diff, userAgent) {
const cores = [8, 12, 16, 24];
const screens = [3000, 4000, 6000];
const core = cores[randomInt(0, cores.length)];
const screen = screens[randomInt(0, screens.length)];
const now = new Date(Date.now() - 8 * 3600 * 1000);
const parseTime = now.toUTCString().replace("GMT", "GMT-0500 (Eastern Time)");
const config = [core + screen, parseTime, 4294705152, 0, userAgent];
const diffLen = diff.length / 2;
for (let i = 0; i < 100000; i++) {
config[3] = i;
const jsonData = JSON.stringify(config);
const base = Buffer.from(jsonData).toString("base64");
const hashValue = createHash("sha3-512")
.update(seed + base)
.digest();
if (hashValue.toString("hex").substring(0, diffLen) <= diff) {
const result = "gAAAAAB" + base;
return result;
}
}
const fallbackBase = Buffer.from("${seed}"
).toString("base64");
return "gAAAAABwQ8Lk5FbGpA2NcR9dShT6gYjU7VxZ4D" + fallbackBase;
}
// Function to get a new session ID and token from the OpenAI API
async function getNewSession(retries = 0) {
let newDeviceId = randomUUID();
try {
const response = await axiosInstance.post(
${baseUrl}/backend-anon/sentinel/chat-requirements
,
{},
{
headers: { "oai-device-id": newDeviceId },
}
);
let session = response.data;
session.deviceId = newDeviceId;
console.log(
`System: Successfully refreshed session ID and token. ${
!token ? "(Now it's ready to process requests)" : ""
}`
);
oaiDeviceId = newDeviceId;
token = session.token;
return session;
} catch (error) {
await wait(500);
return retries < newSessionRetries ? getNewSession(retries + 1) : null;
}
}
// Middleware to enable CORS and handle pre-flight requests
function enableCORS(req, res, next) {
res.header("Access-Control-Allow-Origin", "");
res.header("Access-Control-Allow-Headers", "");
res.header("Access-Control-Allow-Methods", "GET, POST, OPTIONS");
if (req.method === "OPTIONS") {
return res.status(200).end();
}
next();
}
// Middleware to handle chat completions
async function handleChatCompletion(req, res) {
console.log(
"Request:",
${req.method} ${req.originalUrl}
,
${req.body?.messages?.length ?? 0} messages
,
req.body.stream ? "(stream-enabled)" : "(stream-disabled)"
);
try {
let session = await getNewSession();
if (!session) {
res.write(
JSON.stringify({
status: false,
error: {
message: Error getting a new session, please try again later, if the issue persists, please open an issue on the GitHub repository.
,
type: "invalid_request_error",
},
})
);
return res.end();
}
let proofToken = generateProofToken(
session.proofofwork.seed,
session.proofofwork.difficulty,
userAgent
);
const body = {
action: "next",
messages: req.body.messages.map((message) => ({
author: { role: message.role },
content: { content_type: "text", parts: [message.content] },
})),
parent_message_id: randomUUID(),
model: "text-davinci-002-render-sha",
timezone_offset_min: -180,
suggestions: [],
history_and_training_disabled: true,
conversation_mode: { kind: "primary_assistant" },
websocket_request_id: randomUUID(),
};
let promptTokens = 0;
let completionTokens = 0;
for (let message of req.body.messages) {
promptTokens += encode(message.content).length;
}
const response = await axiosInstance.post(apiUrl, body, {
responseType: "stream",
headers: {
"oai-device-id": session.deviceId,
"openai-sentinel-chat-requirements-token": session.token,
"openai-sentinel-proof-token": proofToken,
},
});
// Set the response headers based on the request type
if (req.body.stream) {
res.setHeader("Content-Type", "text/event-stream");
res.setHeader("Cache-Control", "no-cache");
res.setHeader("Connection", "keep-alive");
} else {
res.setHeader("Content-Type", "application/json");
}
let fullContent = "";
let requestId = GenerateCompletionId("chatcmpl-");
let created = Math.floor(Date.now() / 1000); // Unix timestamp in seconds
let finish_reason = null;
let error;
for await (const message of StreamCompletion(response.data)) {
// Skip heartbeat detection
if (message.match(/^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6}$/))
continue;
const parsed = JSON.parse(message);
if (parsed.error) {
error = `Error message from OpenAI: ${parsed.error}`;
finish_reason = "stop";
break;
}
let content = parsed?.message?.content?.parts[0] ?? "";
let status = parsed?.message?.status ?? "";
for (let message of req.body.messages) {
if (message.content === content) {
content = "";
break;
}
}
switch (status) {
case "in_progress":
finish_reason = null;
break;
case "finished_successfully":
let finish_reason_data =
parsed?.message?.metadata?.finish_details?.type ?? null;
switch (finish_reason_data) {
case "max_tokens":
finish_reason = "length";
break;
case "stop":
default:
finish_reason = "stop";
}
break;
default:
finish_reason = null;
}
if (content === "") continue;
let completionChunk = content.replace(fullContent, "");
completionTokens += encode(completionChunk).length;
if (req.body.stream) {
let response = {
id: requestId,
created: created,
object: "chat.completion.chunk",
model: "gpt-3.5-turbo",
choices: [
{
delta: {
content: completionChunk,
},
index: 0,
finish_reason: finish_reason,
},
],
};
res.write(`data: ${JSON.stringify(response)}\n\n`);
}
fullContent = content.length > fullContent.length ? content : fullContent;
}
if (req.body.stream) {
res.write(
`data: ${JSON.stringify({
id: requestId,
created: created,
object: "chat.completion.chunk",
model: "gpt-3.5-turbo",
choices: [
{
delta: {
content: error ?? "",
},
index: 0,
finish_reason: finish_reason,
},
],
})}\n\n`
);
} else {
res.write(
JSON.stringify({
id: requestId,
created: created,
model: "gpt-3.5-turbo",
object: "chat.completion",
choices: [
{
finish_reason: finish_reason,
index: 0,
message: {
content: error ?? fullContent,
role: "assistant",
},
},
],
usage: {
prompt_tokens: promptTokens,
completion_tokens: completionTokens,
total_tokens: promptTokens + completionTokens,
},
})
);
}
res.end();
} catch (error) {
if (!res.headersSent) res.setHeader("Content-Type", "application/json");
res.write(
JSON.stringify({
status: false,
error: {
message:
"An error occurred. Please try again. Additionally, ensure that your request complies with OpenAI's policy.",
type: "invalid_request_error",
},
})
);
res.end();
}
}
// Initialize Express app and use middlewares
const app = express();
app.use(bodyParser.json());
app.use(enableCORS);
// Route to handle POST requests for chat completions
app.post("/v1/chat/completions", handleChatCompletion);
// 404 handler for unmatched routes
app.use((req, res) =>
res.status(404).send({
status: false,
error: {
message: The requested endpoint (${req.method.toLocaleUpperCase()} ${req.path}) was not found. https://github.com/missuo/FreeGPT35
,
type: "invalid_request_error",
},
})
);
// Start the server and the session ID refresh loop
app.listen(port, async () => {
console.log(馃挕 Server is running at http://localhost:${port}
);
console.log();
console.log(馃敆 Local Base URL: http://localhost:${port}/v1
);
console.log(馃敆 Local Endpoint: http://localhost:${port}/v1/chat/completions
);
console.log();
console.log("馃摑 Original TS Source By: Pawan.Krd");
console.log("馃摑 Modified By: Vincent");
console.log();
setTimeout(async () => {
while (true) {
try {
await getNewSession();
await wait(refreshInterval);
} catch (error) {
console.error("Error refreshing session ID, retrying in 2 minute...");
console.error("If this error persists, your country may not be supported yet.");
console.error("If your country was the issue, please consider using a U.S. VPN.");
await wait(errorWait);
}
}
}, 0);
});
from freegpt35.
Related Issues (20)
- Can't get session ID HOT 1
- 建议增加登录选项
- 使用不了报错 HOT 14
- 可以打个广告吗?基于此技术,做了一个免费的al聊天应用, 和一些杰出的人沟通,获取智慧 HOT 1
- message": "The requested endpoint (GET /v1/chat/completions) was not found. please make sure what are you doing now HOT 1
- 突然又可以用了~感謝~~
- 希望可以添加对tts的支持, Whisper api
- 出现这样的问题Error message from OpenAI: Our systems have detected unusual activity from your device. Please refresh your browser and try again later.
- 不支持OPTIONS请求,导致nextchat跟freegpt不是同一个域名的时候,有跨域拦截问题
- Error getting a new session HOT 10
- docker部署后无法访问 HOT 4
- Request: POST /v1/chat/completions 11 messages (stream-enabled) HOT 4
- 我已经部署了PawanOsman的gpt,这个和你的项目有什么区别
- 现在这个项目是不能用了,对吧
- 人工智能+网络运维数智化转型
- 上午还好好,下午就不行了 HOT 7
- 请问和文章结尾的PawanOsman/ChatGPT有啥区别
- 请问这个是什么问题?
- confused about setup
- Is this project no longer available on Chinese mainland? HOT 4
Recommend Projects
-
React
A declarative, efficient, and flexible JavaScript library for building user interfaces.
-
Vue.js
🖖 Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.
-
Typescript
TypeScript is a superset of JavaScript that compiles to clean JavaScript output.
-
TensorFlow
An Open Source Machine Learning Framework for Everyone
-
Django
The Web framework for perfectionists with deadlines.
-
Laravel
A PHP framework for web artisans
-
D3
Bring data to life with SVG, Canvas and HTML. 📊📈🎉
-
Recommend Topics
-
javascript
JavaScript (JS) is a lightweight interpreted programming language with first-class functions.
-
web
Some thing interesting about web. New door for the world.
-
server
A server is a program made to process requests and deliver data to clients.
-
Machine learning
Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.
-
Visualization
Some thing interesting about visualization, use data art
-
Game
Some thing interesting about game, make everyone happy.
Recommend Org
-
Facebook
We are working to build community through open source technology. NB: members must have two-factor auth.
-
Microsoft
Open source projects and samples from Microsoft.
-
Google
Google ❤️ Open Source for everyone.
-
Alibaba
Alibaba Open Source for everyone
-
D3
Data-Driven Documents codes.
-
Tencent
China tencent open source team.
from freegpt35.