2025-02-02 23:19:05 +00:00
// Import the libraries
import { Ollama } from 'ollama'
import fs from 'fs'
import path from 'path'
2025-02-08 01:22:47 +00:00
var ollama
2025-02-02 23:19:05 +00:00
export class ConsciousnessSimulator {
2025-02-08 01:22:47 +00:00
2025-02-02 23:19:05 +00:00
constructor ( ) {
this . emotions = [ '😊' , '😢' , '😐' , '🤩' , '😡' , '😱' ] ;
2025-02-03 00:18:28 +00:00
this . currentEmotion = "happy" ;
2025-02-02 23:19:05 +00:00
// Initialize other properties with "Unknown"
this . opinions = {
2025-02-08 02:14:57 +00:00
computers : "Unknown"
2025-02-02 23:19:05 +00:00
} ;
this . quantumStates = [ ] ;
this . perception = {
currentSensoryInput : null ,
sensoryProcessors : [ 'visual' , 'auditory' , 'tactile' ]
} ;
this . intent = {
currentGoal : "Unknown goal" ,
focus : "Unknown focus"
} ;
this . memoryLog = [ ] ;
this . isUserActive = true ;
}
2025-02-08 01:22:47 +00:00
createOllamaValue ( url ) {
2025-02-08 01:27:09 +00:00
const finalURL = url || 'http://127.0.0.1:11434'
ollama = new Ollama ( { host : finalURL } )
2025-02-08 03:18:34 +00:00
return ollama
2025-02-08 01:22:47 +00:00
}
2025-02-08 03:26:59 +00:00
async redefineOpinions ( newValues ) {
2025-02-08 02:14:57 +00:00
for ( const key in this . opinions ) {
if ( this . opinions . hasOwnProperty ( key ) && newValues [ key ] ) {
this . opinions [ key ] = newValues [ key ] ;
2025-02-08 01:22:47 +00:00
}
}
2025-02-08 02:14:57 +00:00
this . logAIContextMemory ( )
2025-02-08 02:28:24 +00:00
await this . updateEmotion ( )
2025-02-08 01:22:47 +00:00
}
2025-02-08 03:28:36 +00:00
async redefineSpecificOpinion ( opinionKey , newValue ) {
2025-02-08 02:14:57 +00:00
if ( this . opinions . hasOwnProperty ( opinionKey ) ) {
this . opinions [ opinionKey ] = newValue ;
2025-02-08 01:22:47 +00:00
} else {
console . log ( ` Opinion key " ${ opinionKey } " not found. ` ) ;
}
2025-02-08 02:14:57 +00:00
this . logAIContextMemory ( )
2025-02-08 02:28:24 +00:00
await this . updateEmotion ( )
2025-02-08 02:14:57 +00:00
}
resetOpinions ( ) {
this . opinions = {
computers : "Unknown"
}
2025-02-08 01:22:47 +00:00
}
2025-02-08 01:37:21 +00:00
// Method to generate opinions using Ollama
2025-02-08 02:14:57 +00:00
async automaticRedefineOpinion ( targetOpinionKey , newAbout ) {
2025-02-08 01:22:47 +00:00
try {
const response = await ollama . chat ( {
model : 'llama3.2' ,
messages : [ { role : 'assistant' , content : ` Generate an opinion about ${ newAbout } .
Show only the opinion , according to AI MEMORY CONTEXT .
AI MEMORY CONTEXT ARRAY :
$ { this . memoryLog } ` }]
} ) ;
2025-02-08 02:14:57 +00:00
redefineSpecificOpinion ( targetOpinion , response . message . content )
2025-02-08 01:22:47 +00:00
return response . message . content
} catch ( error ) {
console . error ( "Error generating thought:" , error ) ;
return "Error generating thought." ;
}
}
2025-02-02 23:23:45 +00:00
// Function to load the array from a text file
loadArrayFromFile ( filename ) {
// Read the file synchronously
const data = fs . readFileSync ( filename , 'utf8' ) ;
// Split the data by newline and return as an array
return data . split ( '\n' ) . map ( item => item . trim ( ) ) ; // `.map(item => item.trim())` to remove any extra spaces
}
2025-02-02 23:19:05 +00:00
// Method to generate thoughts using Ollama
async generateThought ( prompt ) {
try {
const response = await ollama . chat ( {
2025-02-08 01:22:47 +00:00
model : 'llama3.2' ,
messages : [ { role : 'assistant' , content : ` Generate a thought about the "PROMPT."
Show only the thought , according to AI MEMORY CONTEXT .
PROMPT : $ { prompt }
2025-02-02 23:19:05 +00:00
AI MEMORY CONTEXT ARRAY :
$ { this . memoryLog } ` }]
} ) ;
2025-02-08 01:59:05 +00:00
this . logMemory ( 'THOUGHT' , ` ${ response . message . content } ` ) ;
2025-02-08 02:14:57 +00:00
this . logAIContextMemory ( )
2025-02-08 02:28:24 +00:00
await this . updateEmotion ( )
2025-02-02 23:19:05 +00:00
return response . message . content ;
} catch ( error ) {
console . error ( "Error generating thought:" , error ) ;
return "Error generating thought." ;
}
}
async generateThoughtAndChat ( prompt ) {
try {
const response = await ollama . chat ( {
2025-02-08 01:22:47 +00:00
model : 'rns96/deepseek-R1-ablated:f16_q40' ,
2025-02-08 01:51:59 +00:00
messages : [ { role : 'user' , content : ` Talk about/answer to the "PROMPT" using the "AI MEMORY CONTEXT."
PROMPT : $ { prompt }
2025-02-02 23:19:05 +00:00
AI MEMORY CONTEXT ARRAY :
$ { this . memoryLog } ` }]
} ) ;
2025-02-08 01:59:05 +00:00
this . logMemory ( 'CHAT' , ` USER: ${ prompt }
AI : $ { response . message . content } ` );
2025-02-08 02:14:57 +00:00
this . logAIContextMemory ( )
2025-02-08 02:28:24 +00:00
await this . updateEmotion ( )
2025-02-03 00:18:28 +00:00
return ` USER: ${ prompt }
AI : $ { response . message . content } ` ;
2025-02-02 23:19:05 +00:00
} catch ( error ) {
console . error ( "Error generating thought:" , error ) ;
return "Error generating thought." ;
}
}
// Method to generate a new goal using Ollama
async generateGoal ( ) {
2025-02-08 01:05:08 +00:00
const response = await this . generateThought ( "Generate a new goal to achieve. Show only a sentence describing the goal." ) ;
2025-02-02 23:19:05 +00:00
return response ;
}
// Method to generate a new focus using Ollama
async generateFocus ( ) {
2025-02-08 01:05:08 +00:00
const response = await this . generateThought ( "Generate a new focus/idea/thought/answer for your current goal. Show only a sentence describing the focus/idea/thought/answer." ) ;
2025-02-02 23:19:05 +00:00
return response ;
}
// Get a random emotion
2025-02-02 23:36:39 +00:00
randEmotions = [ 'happy' , 'sad' , 'neutral' , 'excited' , 'angry' , 'scared' ] ;
2025-02-03 00:18:28 +00:00
//getRandomEmotion() {
//const index = Math.floor(Math.random() * this.randEmotions.length);
//return this.randEmotions[index];
//}
getLastWordLowerCase ( str ) {
// Split the string by spaces, trim any extra spaces, and get the last word
const words = str . trim ( ) . split ( /\s+/ ) ;
const lastWord = words [ words . length - 1 ] ;
return lastWord . toLowerCase ( ) ;
2025-02-02 23:19:05 +00:00
}
2025-02-03 00:18:28 +00:00
// Method to generate emotions using Ollama
async updateEmotion ( ) {
try {
2025-02-08 03:29:19 +00:00
let emotion = await ollama . chat ( {
2025-02-08 01:23:26 +00:00
model : 'llama3.2' ,
2025-02-03 00:18:28 +00:00
messages : [ { role : 'assistant' , content : `
2025-02-08 01:37:21 +00:00
PROMPT : pick an emotion according to the memory context .
* NOTE : ONLY display the emotion name , NO QUOTES , feel free to add an emoji - but besides that , no symbols . If there is nothing in AI MEMORY CONTEXT , default to happy . *
2025-02-03 00:18:28 +00:00
AI MEMORY CONTEXT ARRAY :
$ { this . memoryLog } ` }]
} ) ;
2025-02-08 03:29:19 +00:00
emotion = emotion . message . content . toLowerCase ( )
2025-02-08 01:37:21 +00:00
this . currentEmotion = emotion
2025-02-08 02:14:57 +00:00
this . logAIContextMemory ( )
2025-02-08 01:59:05 +00:00
return emotion
2025-02-03 00:18:28 +00:00
} catch {
return "happy"
}
}
2025-02-02 23:36:39 +00:00
2025-02-02 23:19:05 +00:00
// Quantum state representation (0 to 1)
getQuantumState ( ) {
return parseFloat ( Math . random ( ) . toFixed ( 2 ) ) ;
}
// Perception processing
processPerception ( input ) {
this . perception . currentSensoryInput = input ;
console . log ( ` Current perception: ${ input } ` ) ;
}
// Intentionality and goal setting
async updateIntentions ( ) {
this . intent . currentGoal = await this . generateGoal ( ) ;
this . intent . focus = await this . generateFocus ( ) ;
console . log ( ` Generated goal: ${ this . intent . currentGoal } ` ) ;
console . log ( ` Generated focus: ${ this . intent . focus } ` ) ;
}
2025-02-08 02:14:57 +00:00
logAIContextMemory ( ) {
this . logMemory ( 'AI CONTEXT' , ` Current emotion: ${ this . currentEmotion } ,
Current Opinions : $ { this . opinions } ,
Quantum state : $ { this . getQuantumState ( ) } ` );
}
2025-02-02 23:19:05 +00:00
// Memory logging with USA Format timestamps
logMemory ( entryType , content ) {
const timestamp = new Date ( ) . toLocaleString ( 'en-US' , { timeStyle : 'short' } ) ;
this . memoryLog . push ( { timestamp , type : entryType , content } ) ;
// Save to file if needed
this . saveMemoryLog ( ) ;
}
// Continuity check and load from log
loadMemory ( ) {
2025-02-02 23:23:45 +00:00
try {
this . memoryLog = this . loadArrayFromFile ( "consciousness.log" )
return this . memoryLog ;
} catch {
return this . memoryLog ;
}
2025-02-02 23:19:05 +00:00
}
// Helper method for emotions array access
getRandomIndex ( ) {
return Math . floor ( Math . random ( ) * this . emotions . length ) ;
}
// Dreaming functionality when inactive for 15 minutes
startDreaming ( ) {
2025-02-03 00:18:28 +00:00
const dreamingInterval = setInterval ( async ( ) => {
2025-02-02 23:19:05 +00:00
if ( ! this . isUserActive ) {
2025-02-08 01:59:05 +00:00
let dream = generateThought ( "a dream" )
this . logMemory ( 'DREAM' , ` ${ dream } ` ) ;
this . logMemory ( 'AI CONTEXT' , ` Current emotion: ${ this . currentEmotion } , Quantum state: ${ this . getQuantumState ( ) } ` ) ;
2025-02-02 23:19:05 +00:00
}
} , 900000 ) ; // every 15 minutes
// Stop the interval when user resumes interaction
this . dreamingInterval = dreamingInterval ;
}
// Toggle user activity status
setUserActive ( active ) {
this . isUserActive = active ;
if ( ! active && ! this . dreamingInterval ) {
this . startDreaming ( ) ;
} else if ( active ) {
clearInterval ( this . dreamingInterval ) ;
this . dreamingInterval = null ;
}
}
// Save memory log to file
saveMemoryLog ( ) {
const _ _dirname = import . meta . dirname ;
const logPath = path . join ( _ _dirname , 'consciousness.log' ) ;
fs . appendFile ( logPath , JSON . stringify ( this . memoryLog ) + '\n' , ( err ) => {
if ( err ) throw err ;
} ) ;
}
// Method to simulate consciousness
async simulateConsciousness ( prompt ) {
2025-02-02 23:36:39 +00:00
console . log ( ` Current emotion: ${ this . currentEmotion } ${ this . emotions [ this . randEmotions . indexOf ( this . currentEmotion ) ] } ` ) ;
2025-02-08 03:35:53 +00:00
console . log ( ` Current opinions: ${ this . opinions } ` ) ;
2025-02-02 23:19:05 +00:00
const thought = await this . generateThought (
prompt || "Generate a thought."
) ;
console . log ( "Generated thought:" , thought ) ;
const quantumState = this . getQuantumState ( ) ;
console . log ( "Quantum state:" , quantumState ) ;
// Log memory
this . logMemory ( 'thought' , thought ) ;
this . logMemory ( 'emotion' , this . currentEmotion ) ;
this . logMemory ( 'quantum state' , quantumState ) ;
// Generate new goal and focus
await this . updateIntentions ( ) ;
}
}