Compare commits

...

7 Commits

Author SHA1 Message Date
bhattaraijay05
458d810b2b v1.1.4 2023-05-06 23:38:30 +05:30
bhattaraijay05
3a7c45e3a9 added local model support 2023-05-05 19:10:13 +05:30
bhattaraijay05
5702725005 added local model runner 2023-05-05 18:57:44 +05:30
bhattaraijay05
b5df983bb6 v1.1.3 2023-05-05 13:06:37 +05:30
bhattaraijay05
679642e866 changed models path 2023-05-05 13:05:12 +05:30
bhattaraijay05
7f5d34afea Merge branch 'multiple-models' 2023-05-05 12:52:34 +05:30
Jay
168436e9d2
Merge pull request #51 from ohmplatform/docker-app
Docker app
2023-04-19 14:15:27 +05:30
6 changed files with 39 additions and 9 deletions

View File

@ -26,6 +26,15 @@ allows users to run alpaca models on their local machine.
- [Yarn](https://classic.yarnpkg.com/en/docs/install/#windows-stable)
- [Git](https://git-scm.com/downloads)
# If you want to run the project
```sh
git clone --recursive https://github.com/ohmplatform/FreedomGPT.git freedom-gpt
cd freedom-gpt
yarn install
yarn start:prod
```
# If you want to contribute to the project
## Working with the repository

View File

@ -6,6 +6,7 @@ import type { ForgeConfig } from "@electron-forge/shared-types";
import * as dotenv from "dotenv";
import { mainConfig } from "./webpack.main.config";
import { rendererConfig } from "./webpack.renderer.config";
dotenv.config();
const config: ForgeConfig = {
@ -14,7 +15,7 @@ const config: ForgeConfig = {
process.platform === "win32"
? "./src/appicons/icons/win/icon.ico"
: "./src/appicons/icons/mac/ico",
extraResource: "./src/models",
extraResource: "./models",
osxSign: {
identity: "Developer ID Application: Age of AI, LLC (TS4W464GMN)",
optionsForFile: () => {
@ -52,8 +53,11 @@ const config: ForgeConfig = {
{
name: "FreedomGPT",
setupIcon: "./src/appicons/icons/win/icon.ico",
certificateFile: process.env.WINDOWS_PFX_FILE as string,
certificatePassword: process.env.WIN_CERTIFICATE_PASSWORD as string,
certificateFile: process.env["WINDOWS_PFX_FILE"],
certificatePassword: process.env["WINDOWS_PFX_PASSWORD"],
owners: "Age of AI, LLC",
authors: "Age of AI, LLC",
copyright: "Age of AI, LLC",
},
["win32"]
),

View File

@ -1,7 +1,7 @@
{
"name": "freedomgpt",
"productName": "freedomgpt",
"version": "1.1.2",
"version": "1.1.4",
"description": "Our goal is to illustrate that AI Safety cannot be achieved through censorship. Let information flow freely, no matter how discomforting - because the truth shall set us free.",
"main": ".webpack/main",
"scripts": {

View File

@ -19,6 +19,9 @@ const io = new Server(server, {
},
});
const usePackaged =
process.env.npm_lifecycle_event === "start:prod" ? true : false;
const homeDir = app.getPath("home");
const DEFAULT_MODEL_LOCATION = homeDir + "/FreedomGPT";
@ -37,11 +40,25 @@ let program: import("child_process").ChildProcessWithoutNullStreams = null;
const deviceisWindows = process.platform === "win32";
const CHAT_APP_LOCATION = app.isPackaged
? process.resourcesPath + "/models/llama/main"
: deviceisWindows
? process.cwd() + "/llama.cpp/build/bin/Release/main"
: process.cwd() + "/llama.cpp/main";
// const CHAT_APP_LOCATION = app.isPackaged
// ? process.resourcesPath + "/models/llama/main"
// : deviceisWindows
// ? process.cwd() + "/llama.cpp/build/bin/Release/main"
// : process.cwd() + "/llama.cpp/main";
const isDev: boolean = app.isPackaged ? false : true;
const CHAT_APP_LOCATION = deviceisWindows
? isDev
? usePackaged
? process.cwd() + "/src/model/windows/main"
: process.cwd() + "/llama.cpp/build/bin/Release/main"
: process.resourcesPath + "/models/llama/main"
: isDev
? usePackaged
? process.cwd() + "/src/model/mac/main"
: process.cwd() + "/llama.cpp/main"
: process.resourcesPath + "/models/llama/main";
io.on("connection", (socket) => {
const totalRAM = os.totalmem() / 1024 ** 3;

BIN
src/model/mac/main Executable file

Binary file not shown.

BIN
src/model/windows/main.exe Normal file

Binary file not shown.