Skip to content

Commit

Permalink
feat: add db & storage
Browse files Browse the repository at this point in the history
  • Loading branch information
octadion committed Feb 28, 2024
1 parent c308d3e commit d1a6bbe
Show file tree
Hide file tree
Showing 3 changed files with 92 additions and 9 deletions.
20 changes: 19 additions & 1 deletion .github/workflows/build-and-deploy.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,15 @@ on:
env:
REGISTRY: registry.digitalocean.com/visionllm #registry.digitalocean.com/visionllm
IMAGE_NAME: vision-llm
DB_HOST: ${{ secrets.DB_HOST }}
DB_PORT: ${{ secrets.DB_PORT }}
DB_NAME: ${{ secrets.DB_NAME }}
DB_USER: ${{ secrets.DB_USER }}
DB_PASS: ${{ secrets.DB_PASS }}
S3_REGION: ${{ secrets.S3_REGION }}
S3_ENDPOINT: ${{ secrets.S3_ENDPOINT }}
S3_KEY: ${{ secrets.S3_KEY }}
S3_SECRET: ${{ secrets.S3_SECRET }}

#3
jobs:
Expand Down Expand Up @@ -55,7 +64,7 @@ jobs:
username: ${{ secrets.USERNAME }}
key: ${{ secrets.SSHKEY }}
passphrase: ${{ secrets.PASSPHRASE }}
envs: IMAGE_NAME,REGISTRY,{{ secrets.DIGITALOCEAN_ACCESS_TOKEN }},GITHUB_SHA
envs: IMAGE_NAME,REGISTRY,{{ secrets.DIGITALOCEAN_ACCESS_TOKEN }},GITHUB_SHA,DB_HOST,DB_PORT,DB_NAME,DB_USER,DB_PASS,S3_REGION,S3_ENDPOINT,S3_KEY,S3_SECRET
script: |
# Login to registry
docker login -u ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }} -p ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }} registry.digitalocean.com
Expand All @@ -68,4 +77,13 @@ jobs:
-p 8001:8001 \
--restart always \
--name $(echo $IMAGE_NAME) \
--env DB_HOST=$(echo $DB_HOST) \
--env DB_PORT=$(echo $DB_PORT) \
--env DB_NAME=$(echo $DB_NAME) \
--env DB_USER=$(echo $DB_USER) \
--env DB_PASS=$(echo $DB_PASS) \
--env S3_REGION=$(echo $S3_REGION) \
--env S3_ENDPOINT=$(echo $S3_ENDPOINT) \
--env S3_KEY=$(echo $S3_KEY) \
--env S3_SECRET=$(echo $S3_SECRET) \
$(echo $REGISTRY)/$(echo $IMAGE_NAME):$(echo $GITHUB_SHA | head -c7)
76 changes: 69 additions & 7 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,37 @@
from langchain import PromptTemplate, LLMChain
from huggingface_hub import hf_hub_download
from gtts import gTTS
import psycopg2
import boto3
from datetime import datetime
from dotenv import load_dotenv

load_dotenv()
app = Flask(__name__)
app.config['SECRET_KEY'] = 'visionllm'
app.config['VIDEO_UPLOAD_FOLDER'] = 'data/videos'
app.config['IMAGE_UPLOAD_FOLDER'] = 'data/images'

conn = psycopg2.connect(
host=os.getenv("DB_HOST"),
port=os.getenv("DB_PORT"),
dbname=os.getenv("DB_NAME"),
user=os.getenv("DB_USER"),
password=os.getenv("DB_PASS")
)
s3 = boto3.client('s3',
region_name=os.getenv("S3_REGION"),
endpoint_url=os.getenv("S3_ENDPOINT"),
aws_access_key_id=os.getenv("S3_KEY"),
aws_secret_access_key=os.getenv("S3_SECRET"))

objects = None
response = None
image_id = None
audio_id = None
video_id = None
text_id = None

def generate_frames(path_x = ''):
yolo_output = video_detection(path_x)
for detection_, objects_detected in yolo_output:
Expand Down Expand Up @@ -49,20 +72,33 @@ def video():

@app.route('/image', methods = ['POST'])
def image():
global image_id
img = request.files['file']
filename = secure_filename(img.filename)
img.save(os.path.join(app.config['IMAGE_UPLOAD_FOLDER'], filename))
with open(os.path.join(app.config['IMAGE_UPLOAD_FOLDER'], filename), "rb") as data:
s3.upload_fileobj(data, "results", "images/" + filename)
cur = conn.cursor()
image_url = "https://visionllm.sgp1.digitaloceanspaces.com/results/images/" + filename
cur.execute("INSERT INTO images (data) VALUES (%s) RETURNING id", (image_url,))
image_id = cur.fetchone()[0]
conn.commit()
cur.close()
return Response(generate_frames_image(path_x=os.path.join(app.config['IMAGE_UPLOAD_FOLDER'], filename)), mimetype='multipart/x-mixed-replace; boundary=frame')

@app.route('/get_objects', methods = ['GET'])
def get_objects():
global objects
global objects, text_id
cur = conn.cursor()
cur.execute("INSERT INTO texts (object) VALUES (%s) RETURNING id", (objects,))
text_id = cur.fetchone()[0]
conn.commit()
cur.close()
return jsonify(result=objects)

@app.route('/llm')
def llm():
global response
global objects
global response, objects
n_gpu_layers = -1
llm = LlamaCpp(
streaming = True,
Expand Down Expand Up @@ -90,15 +126,41 @@ def llm():

@app.route('/get_response', methods = ['GET'])
def get_response():
global response
global response, text_id
cur = conn.cursor()
cur.execute("UPDATE texts SET response = %s WHERE id = %s", (response, text_id))
conn.commit()
cur.close()
return jsonify(result=response)

@app.route('/gtts')
def gtts():
global response
global response, audio_id
speech = gTTS(text=response, lang='en', slow=False)
speech.save('./runs/test/output_audio.wav')
return send_file('./runs/test/output_audio.wav', mimetype='audio/wav')
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
filename = "output_audio_" + timestamp + ".wav"
speech.save('./runs/test/' + filename)
with open('./runs/test/' + filename, "rb") as data:
s3.upload_fileobj(data, "results", "audios/" + filename)
audio_url = "https://visionllm.sgp1.digitaloceanspaces.com/results/audios/" + filename
cur = conn.cursor()
cur.execute("INSERT INTO audios (data) VALUES (%s) RETURNING id", (audio_url,))
audio_id = cur.fetchone()[0]
conn.commit()
cur.close()
return send_file('./runs/test/' + filename, mimetype='audio/wav')

@app.route('/save_prediction')
def save_prediction():
global image_id, audio_id, text_id
cur = conn.cursor()
if image_id is not None and text_id is not None and audio_id is not None:
cur.execute("INSERT INTO predictions (image_id, text_id, audio_id) VALUES (%s, %s, %s)", (image_id, text_id, audio_id))
conn.commit()
cur.close()
return jsonify(result="Prediction saved successfully")
else:
return jsonify(result="Error: image_id, text_id, or audio_id is None")

if __name__ == "__main__":
app.run(host='0.0.0.0', port='8001')
5 changes: 4 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,7 @@ torch
accelerate
llama-cpp-python==0.2.6
gtts
flask
flask
psycopg2
boto3
python-dotenv

0 comments on commit d1a6bbe

Please sign in to comment.