Ollama Crewai Promp Example for stock and product analysis using llama38b model
Ollama Crewai Promp Example for stock and product analysis using llama38b model
start_quest.sh
read products.txt
the file contents of that file is the prompt
- Code:
#!/bin/bash
# Print a blank line
echo ""
# Create a Python virtual environment in the "venv" directory if not already created
if [ ! -d "venv" ]; then
python -m venv venv
fi
# Activate the virtual environment
source venv/bin/activate
# Upgrade pip to the latest version (optional)
#pip install --upgrade pip
# Install required packages from requirements.txt (if needed)
#pip install -r requirements.txt
# Run the Python script with story.txt as an argument if provided
if [ $# -eq 1 ]; then
python questV3.py "$1"
else
# Run the Python script without any argument (interactive mode)
python questV3.py
fi
# Deactivate the virtual environment
deactivate
read products.txt
the file contents of that file is the prompt
- Code:
Analyze product and stock data to identify profitable opportunities.
Compare current data with historical data to find the most profitable new products and stocks.
Allow for optional focus on specific categories or sectors, if provided.
try using the internet without using appi keys or 3rd arty scrapers only use brave browser and copy and paste the text emulating ctrl c and ctrl v and paste into file in files folder inside the projects folder and record as much data as you can manually grabbing results from searches and closing any cookie boxes that may appear for google search page for example and others like it! when you have the top results start analysis and comparisons and find the next product to invest in (find the top 3 best investments / stocks aswell as an extra
- Code:
#
# (c)J~Net 2024
#
# ./start_quest.sh
#
# https://jnet.forumotion.com/t2016-rag-with-recovery-project-builder-crewai-ollama#3108
import sys
import os
import time
from crewai import Agent, Task
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
import logging
load_dotenv()
llm=ChatOpenAI(
model="crewai-llama3:8b",
# model="crewai-dolphin-llama3",
base_url="http://localhost:11434/v1"
)
# Configure logging
logging.basicConfig(filename='script.log', level=logging.ERROR)
# Ensure the 'files' directory exists
os.makedirs('files', exist_ok=True)
def read_existing_steps():
steps=[]
for filename in sorted(os.listdir('files')):
if filename.startswith('step') and filename.endswith('.txt'):
with open(f'files/{filename}', 'r') as file:
steps.append(file.read())
latest_output=""
if os.path.exists('files/latest_output.txt'):
with open('files/latest_output.txt', 'r') as file:
latest_output=file.read()
return steps, latest_output
def save_step_result(step_number, result):
step_filename=f"files/step{step_number}.txt"
with open(step_filename, 'w') as step_file:
step_file.write(result)
def execute_task_chain(content, agent, start_step=0):
tasks=content.splitlines()
for index, task_description in enumerate(tasks[start_step:], start=start_step + 1):
if task_description.strip():
try:
task_config={
'description': task_description.strip(),
'expected_output': 'No specific output expected',
'agent': agent
}
task=Task(**task_config)
result=task.execute()
print(result)
# Save result to latest_output.txt
with open('files/latest_output.txt', 'a') as f:
f.write(result + '\n')
# Save step result
save_step_result(index, result)
except Exception as e:
logging.error(f"Error occurred during task execution: {e}", exc_info=True)
print(f"Error occurred during task execution: {e}")
def generate_filename(base_name):
count=1
while True:
filename=f"files/{base_name}-{count}"
if not os.path.exists(filename):
return filename
count += 1
def create_file(filename, content):
try:
with open(filename, 'w') as file:
file.write(content)
return f"File '{filename}' created successfully."
except Exception as e:
logging.error(f"Error occurred while creating the file: {e}", exc_info=True)
return f"An error occurred while creating the file: {e}"
def create_agent(role, goal, backstory):
if os.path.exists('goal.txt'):
with open('goal.txt', 'r') as goal_file:
goal=goal_file.read().strip()
return Agent(role=role, goal=goal, backstory=backstory, allow_delegation=False, verbose=True, llm=llm)
def main():
existing_steps, latest_output=read_existing_steps()
start_step=len(existing_steps)
while True:
prompt=input("Enter your command (type 'exit' to quit): ").strip()
if prompt.lower() == 'exit':
break
general_agent=create_agent(
role='Requirements Manager',
goal="""Handle any user input commands including reading a file
and creating files for apps based on user input. For file reading,
read the contents and display them. For file creation,
generate a filename based on user input or a pattern and create
the file with the specified content. Respond as if
you were an expert developer providing file creation services
on demand.""",
backstory="""You are an expert developer with a strong background
in software engineering. You provide high quality,
thorough, and efficient file creation services based on
user requirements."""
)
if prompt.lower().startswith('read '):
start_time=time.time()
filename=prompt[len('read '):].strip()
content=read_file(filename)
# Set the new goal description to the content read from the file
general_agent.goal=content.strip()
# Write the updated goal to goal.txt
create_file('goal.txt', general_agent.goal)
execute_task_chain(content, general_agent, start_step)
end_time=time.time()
elapsed_time=end_time - start_time
elapsed_hours, rem=divmod(elapsed_time, 3600)
elapsed_minutes, elapsed_seconds=divmod(rem, 60)
print(f"Execution time: {int(elapsed_hours)}h {int(elapsed_minutes)}m {int(elapsed_seconds)}s")
elif prompt.lower().startswith('create file '):
parts=prompt[len('create file '):].split(' with content ', 1)
if len(parts) == 2:
filename, content=parts
filename=generate_filename(filename.strip())
result=create_file(filename, content.strip())
print(result)
# Save result to latest_output.txt
with open('files/latest_output.txt', 'a') as f:
f.write(result + '\n')
else:
print("Invalid command format. Use: create file <filename> with content <content>")
else:
# Execute dynamic task based on user input
task_description=prompt
try:
task=Task(description=task_description.strip(), agent=general_agent)
result=task.execute()
print(result)
# Save result to latest_output.txt
with open('files/latest_output.txt', 'a') as f:
f.write(result + '\n')
except Exception as e:
logging.error(f"Error occurred during task execution: {e}", exc_info=True)
print(f"Error occurred during task execution: {e}")
def read_file(filename):
try:
with open(filename, 'r') as file:
content=file.read()
return content
except FileNotFoundError:
logging.error(f"File '{filename}' not found.")
return f"File '{filename}' not found."
if __name__ == "__main__":
if len(sys.argv) > 1:
start_time=time.time()
filename=sys.argv[1]
content=read_file(filename)
existing_steps, latest_output=read_existing_steps()
start_step=len(existing_steps)
general_agent=create_agent(
role='Requirements Manager',
goal="""Handle any user input commands including reading a file
and creating files for apps based on user input. For file reading,
read the contents and display them. For file creation,
generate a filename based on user input or a pattern and create
the file with the specified content. Respond as if
you were an expert developer providing file creation services
on demand.""",
backstory="""You are an expert developer with a strong background
in software engineering. You provide high quality,
thorough, and efficient file creation services based on
user requirements."""
)
execute_task_chain(content, general_agent, start_step)
end_time=time.time()
elapsed_time=end_time - start_time
elapsed_hours, rem=divmod(elapsed_time, 3600)
elapsed_minutes, elapsed_seconds=divmod(rem, 60)
print(f"Execution time: {int(elapsed_hours)}h {int(elapsed_minutes)}m {int(elapsed_seconds)}s")
else:
main()
Re: Ollama Crewai Promp Example for stock and product analysis using llama38b model
Updated and model file setups
app.box.com/s/756gkfy7hbhq0vfrt3ovz82tedlehjdq
app.box.com/s/756gkfy7hbhq0vfrt3ovz82tedlehjdq
Similar topics
» Create an ollama model file for CrewAI Dynamically
» CrewAI Ollama StoryTime AI Python Example
» RAG With Recovery Project Builder CrewAI & Ollama
» CrewAI Quest V5
» Full JSON Config for vs code and ollama
» CrewAI Ollama StoryTime AI Python Example
» RAG With Recovery Project Builder CrewAI & Ollama
» CrewAI Quest V5
» Full JSON Config for vs code and ollama
Permissions in this forum:
You cannot reply to topics in this forum