Add files via upload

This commit is contained in:
ThatLinuxFan 2024-01-02 18:12:25 -06:00 committed by GitHub
parent 367455706c
commit 9ad1b91540
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 377 additions and 2 deletions

21
LICENSE Normal file
View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2024 The AI Brain Project
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,2 +1,77 @@
# ai-brain # The AI Brain Project
updates for the ai brain
The AI Brain Project is a Python program that demonstrates how to use OpenAI's GPT-3 language model to upload your brain to the cloud. The program asks the user for a name and generates random actions every 5 seconds, along with a random sentence using the GPT-3 model based on certain context. The program also allows the user to load custom context from a file. Think of it as [HLE](https://en.wikipedia.org/wiki/UltraHLE) for [brain emulation/mind uploading](https://en.wikipedia.org/wiki/Mind_uploading).
## Installation
To install the program, follow these steps:
Clone the repository:
```
git clone https://github.com/yourusername/ai-brain-project.git
```
Install the required Python packages:
```
pip install -r requirements.txt
```
Set up your OpenAI API key and name using the OpenAI Secret Manager. See the section on "Using the OpenAI Secret Manager" for more information.
Run the program:
```
python main.py
```
## Usage
When you run the program, it will ask you for a name. Enter your name and press Enter. The program will then start generating actions and GPT-3 sentences every 5 seconds.
You can also load custom context from a file by placing a file named custom-context.txt in the same directory as main.py. The program will automatically load the context from the file if it exists.
Using the OpenAI Secret Manager
To keep your OpenAI API key and name secure, you can use the OpenAI Secret Manager to store them securely. To do this, follow these steps:
Create an account on the OpenAI website if you haven't already done so.
Log in to the OpenAI website and go to the "API keys" page.
Click "Generate new API key" to create a new API key.
Install the openai_secret_manager Python package:
```
pip install openai_secret_manager
```
Import the openai_secret_manager module in your Python code:
```
import openai_secret_manager
```
Use the openai_secret_manager.get_secret function to retrieve your API key and name (already in the code):
```
secrets = openai_secret_manager.get_secret("my_app_name")
openai.api_key = secrets["api_key"]
name = secrets["name"]
```
In the OpenAI Secret Manager, create a new secret with the name my_app_name and your API key and name as the values. You can also add other keys if needed.
Creating a new secret in the OpenAI Secret Manager
Save the secret in the OpenAI Secret Manager.
## License
The AI Brain Project is licensed under the MIT License. See the LICENSE file for more information.
## Possible future impementations (for people who would like to contrib!)
* An semi-decentralized API for the AI using GUN-DB?
* A ROBLOX Metaverse based on the API?

View file

@ -0,0 +1,4 @@
curl https://ollama.ai/install.sh | sh
pip3 install -r requirements.txt
ollama pull sparksammy/samai
ollama pull llama2

273
main.py Normal file
View file

@ -0,0 +1,273 @@
import asyncio
import random
import os
import subprocess
import datetime
import sys
import requests
import subprocess
from fastapi import FastAPI
from typing import Union
app = FastAPI()
subprocess.run(["ollama", "serve"]) #make sure that our ollama server is started
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_community.llms import Ollama
# replace model with the downloaded model you want to use
llm = Ollama(
model="sparksammy/samai",
callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
)
completeContext = ""
# Define URLs for requirements.txt and main.py
#REQS_URL = 'https://raw.githubusercontent.com/The-AI-Brain/ai-brain/main/requirements.txt'
#MAIN_URL = 'https://raw.githubusercontent.com/The-AI-Brain/ai-brain/main/main.py'
# Define paths for local requirements.txt and main.py files
REQS_PATH = 'requirements.txt'
MAIN_PATH = 'main.py'
List = {}
# Check for updates
def check_updates():
# Download remote requirements.txt file
remote_reqs = requests.get(REQS_URL).text
# Compare local and remote requirements.txt files
with open(REQS_PATH, 'r') as f:
local_reqs = f.read()
if local_reqs != remote_reqs:
# Install updated requirements
subprocess.run(['pip', 'install', '-r', REQS_PATH])
# Download updated main.py file
remote_main = requests.get(MAIN_URL).text
# Write updated main.py file
with open(MAIN_PATH, 'w') as f:
f.write(remote_main)
# Restart the script
os.execv(sys.argv[0], sys.argv)
emotions = [
"happy", "sad", "angry", "surprised", "disgusted", "fearful",
"excited", "nostalgic", "hopeful", "anxious", "relaxed", "curious",
"confused", "amused", "bored", "ecstatic", "exhausted", "grateful",
"guilty", "embarrassed", "envious", "proud", "ashamed", "content",
"depressed", "fascinated", "frustrated", "inspired", "irritated",
"jealous", "lonely", "melancholic", "optimistic", "overwhelmed",
"peaceful", "playful", "reflective", "remorseful", "restless",
"satisfied", "sympathetic", "tense", "terrified", "triumphant",
"uncomfortable", "vulnerable", "wistful", "yearning", "zealous"
]
# Array of human actions
actions = [
"walked the dog",
"cooked dinner",
"read a book",
"went swimming",
"played soccer",
"listened to music",
"watched a movie",
"painted a picture",
"wrote a story",
"rode a bike",
"danced in the rain",
"visited a museum",
"went on a road trip",
"went to a concert",
"built a sandcastle",
"went to the beach",
"played video games",
"climbed a mountain",
"played with a pet",
"went for a run",
"did yoga",
"went camping",
"visited a new city",
"went to a party",
"took a nap",
"had a picnic",
"played a musical instrument",
"tried a new food",
"went on a hike",
"took a bath",
"visited a friend",
"went to a theme park",
"went to a zoo",
"went to a sporting event",
"went to a play",
"went to a comedy show",
"went to a ballet",
"went to a musical",
"went to a poetry reading",
"went to a book club meeting",
"went to a cooking class",
"went to a painting class",
"went to a wine tasting",
"went to a beer festival",
"went to a farmers' market",
"went to a flea market",
"went shopping",
"went to a garage sale",
"went to a thrift store",
"volunteered at a charity",
"went to a political rally",
"went to a religious service",
"attended a wedding",
"attended a funeral",
"graduated from school",
"started a new job",
"retired from a job",
"got married",
"got divorced",
"had a baby",
"raised a child",
"adopted a pet",
"moved to a new city",
"bought a house",
"rented an apartment",
"remodeled a home",
"gardened",
"landscaped a yard",
"went on a cruise",
"went on a safari",
"went on a skiing trip",
"went on a snowboarding trip",
"went on a fishing trip",
"went on a hunting trip",
"went on a scuba diving trip",
"went on a surfing trip",
"went on a kayaking trip",
"went on a canoeing trip",
"went on a rafting trip",
"went on a hot air balloon ride",
"went on a helicopter ride",
"went on a plane ride",
"went on a train ride",
"went on a road trip",
"went skydiving",
"went bungee jumping",
"went zip lining",
"went rock climbing",
"went to a spa",
"got a massage",
"got a facial",
"got a manicure",
"got a pedicure",
"went to a chiropractor",
"went to a physical therapist",
"went to a dentist",
"went to a doctor",
"got surgery",
"recovered from an illness",
"overcame an addiction",
"learned a new skill",
"learned a new language",
"took a class",
"ate",
"played the piano",
"went for a walk"
]
# Array of places for the actions
places = [
"in the park",
"at home",
"in the library",
"on the beach",
"in the movie theater",
"at the doctor's office",
"at school",
"at the spa",
"at the airport",
"at the gym",
"in a cafe",
"in a museum",
"in a grocery store",
"in a restaurant",
"at a concert",
"at a stadium",
"in a hospital",
"in a church",
"in a mosque",
"in a temple",
"in a theater",
"in a nightclub",
"in a casino",
"at a zoo",
"at a theme park",
"at a water park",
"in a shopping mall",
"in a department store",
"at a gas station",
"in a parking lot",
"in a hotel",
"in a motel",
"in a hostel",
"in a campground",
"in a forest",
"on a mountain",
"in a desert",
"in a valley",
"by a river",
"by a lake",
"at sea",
"in the ocean",
"in a cave",
"at a train station",
"at a bus station",
"at a subway station",
"at a ferry terminal",
"at a harbor",
"in a space station",
"in a laboratory"
]
# Asynchronous function to print actions
def createContext():
action = random.choice(actions)
place = random.choice(places)
completeContext = f"you just did \"{action}\" and did it \"{place}\""
return complete
@app.get("/context")
async def get_emote():
return await createContext()
@app.get("/chatin/{chatText}")
async def get_chatin(chatText: str):
return await main(chatText)
# Main function to run the program
def main(chatin):
#check_updates()
chatin = "Guest:" + chatin
# Get response from ollama
message = llm(f"(Additional context for reply: {completeContext}), reply to this: {chatin}")
# Print ollama response and what it said
print(f"You: {chatin}")
print(f"{name}: {message}")
return f"You: {chatin}\n${name}: ${message}"
# Run the cli function
def cli():
while True:
toChat = input("brain@localhost:~$ ")
main(f"{toChat}")
cli()

2
requirements.txt Normal file
View file

@ -0,0 +1,2 @@
langchain
fastapi