Skip to content

Commit

Permalink
Merge pull request #42 from CS3219-AY2425S1/yitong/d5-frontend
Browse files Browse the repository at this point in the history
Add AI Hint Function
  • Loading branch information
yitong241 authored Nov 12, 2024
2 parents 9cfb85e + c1eaaf9 commit 8d2883a
Show file tree
Hide file tree
Showing 21 changed files with 920 additions and 44 deletions.
Binary file modified .DS_Store
Binary file not shown.
Binary file modified backend/.DS_Store
Binary file not shown.
103 changes: 103 additions & 0 deletions backend/ai-hint-service/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# Virtual Environment
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# FastAPI specific
.env
.env.local
.env.*.local

# IDE - VSCode
.vscode/
*.code-workspace
.history/

# IDE - PyCharm
.idea/
*.iml
*.iws
.idea_modules/

# IDE - Jupyter Notebook
.ipynb_checkpoints
*.ipynb

# Logs
logs/
*.log
log.txt

# Database
*.db
*.sqlite
*.sqlite3

# Coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/

# macOS
.DS_Store
.AppleDouble
.LSOverride
._*

# Windows
Thumbs.db
ehthumbs.db
Desktop.ini
$RECYCLE.BIN/

# Linux
*~
.fuse_hidden*
.directory
.Trash-*
.nfs*

# Project specific
data/
temp/
config.local.py
local_settings.py

.env
24 changes: 24 additions & 0 deletions backend/ai-hint-service/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# backend/ai-hint-service/Dockerfile

FROM python:3.9-slim

# Set environment variables
ENV PYTHONDONTWRITEBYTECODE=1
ENV PYTHONUNBUFFERED=1

# Set work directory
WORKDIR /app

# Install dependencies
COPY requirements.txt .
RUN pip install --upgrade pip
RUN pip install -r requirements.txt

# Copy application code
COPY ./app /app/app

# Expose port
EXPOSE 8000

# Command to run the application
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]
2 changes: 2 additions & 0 deletions backend/ai-hint-service/app/.env.sample
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
OPENAI_API_KEY="API_KEY"
QUESTION_SERVICE_URL = "http://question:3002"
37 changes: 37 additions & 0 deletions backend/ai-hint-service/app/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
from fastapi import FastAPI
from openai import OpenAI
from .routes import hint, code_analysis, ai_answer
from fastapi.middleware.cors import CORSMiddleware
from dotenv import load_dotenv
import os

load_dotenv()

app = FastAPI(
title="AI Hint Service",
description="Provides AI-generated hints, code complexity analysis, and model answers.",
version="1.0.0",
)


origins = [
"*",
]

app.add_middleware(
CORSMiddleware,
allow_origins=origins, # Or ["*"] to allow all
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)

# Include routers
app.include_router(hint.router, prefix="/api/hint", tags=["Hint"])
app.include_router(code_analysis.router, prefix="/api/code-analysis", tags=["Code Analysis"])
app.include_router(ai_answer.router, prefix="/api/ai_answer", tags=["Model Answer"])

# Root endpoint
@app.get("/")
def read_root():
return {"message": "AI Hint Service is up and running."}
37 changes: 37 additions & 0 deletions backend/ai-hint-service/app/routes/ai_answer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
from fastapi import APIRouter, HTTPException
from ..services.openai_service import generate_ai_answer
from ..schemas.ai_answer import AiAnswerRequest, AiAnswerResponse
from typing import Optional

router = APIRouter()

@router.post("/", response_model=AiAnswerResponse)
async def get_ai_answer(request: AiAnswerRequest):
"""
Generate a model answer for the given question ID.
"""
# Placeholder: Fetch question description from the question service
question_description = fetch_question_description(request.question_id)
if not question_description:
raise HTTPException(status_code=404, detail="Question not found.")

try:
ai_answer = generate_ai_answer(question_description, language=request.language)
print(ai_answer)
return AiAnswerResponse(ai_answer=ai_answer)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))

def fetch_question_description(question_id: int) -> Optional[str]:
# Implement the logic to fetch question description from the question service
import requests
QUESTION_SERVICE_URL = "http://question:3002"
try:
response = requests.get(f"{QUESTION_SERVICE_URL}/{question_id}")
if response.status_code == 200:
data = response.json()
return data.get("description")
else:
return None
except Exception as e:
return None
16 changes: 16 additions & 0 deletions backend/ai-hint-service/app/routes/code_analysis.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
from fastapi import APIRouter, HTTPException
from ..services.openai_service import analyze_code_complexity
from ..schemas.code_analysis import CodeAnalysisRequest, CodeAnalysisResponse

router = APIRouter()

@router.post("/", response_model=CodeAnalysisResponse)
async def get_code_analysis(request: CodeAnalysisRequest):
"""
Analyze the complexity of the provided code.
"""
try:
result = analyze_code_complexity(request.code, request.language)
return CodeAnalysisResponse(**result)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
40 changes: 40 additions & 0 deletions backend/ai-hint-service/app/routes/hint.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
from fastapi import APIRouter, HTTPException
from ..services.openai_service import generate_hint
from ..schemas.hint import HintResponse
from typing import Optional
import requests
import os

router = APIRouter()

@router.get("/{question_id}", response_model=HintResponse)
async def get_hint(question_id: int):
"""
Generate a hint for the given question ID.
"""
question_description = fetch_question_description(question_id)
if not question_description:
raise HTTPException(status_code=404, detail="Question not found.")

try:
hint = generate_hint(question_description)
return HintResponse(hint=hint)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))

def fetch_question_description(question_id: int) -> Optional[str]:
# Implement the logic to fetch question description from the question service
# For example, make an HTTP request to the question service's API

QUESTION_SERVICE_URL = os.getenv("QUESTION_SERVICE_URL")
try:
link = f"{QUESTION_SERVICE_URL}/{question_id}"
print(link)
response = requests.get(link)
if response.status_code == 200:
data = response.json()
return data.get("description")
else:
return None
except Exception as e:
return None
8 changes: 8 additions & 0 deletions backend/ai-hint-service/app/schemas/ai_answer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
from pydantic import BaseModel

class AiAnswerRequest(BaseModel):
question_id: int
language: str

class AiAnswerResponse(BaseModel):
ai_answer: str
9 changes: 9 additions & 0 deletions backend/ai-hint-service/app/schemas/code_analysis.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from pydantic import BaseModel

class CodeAnalysisRequest(BaseModel):
code: str
language: str # e.g., "python", "cpp", "java"

class CodeAnalysisResponse(BaseModel):
# complexity: str
analysis: str
4 changes: 4 additions & 0 deletions backend/ai-hint-service/app/schemas/hint.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
from pydantic import BaseModel

class HintResponse(BaseModel):
hint: str
49 changes: 49 additions & 0 deletions backend/ai-hint-service/app/services/openai_service.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
import openai

model = 'gpt-3.5-turbo-0125'

def generate_hint(question_description: str) -> str:
prompt = f"Provide a concise hint to achieve the most efficient time complexity for the following programming problem:\n\n{question_description}\n\nHint:"
completion = openai.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
)

hint = completion.choices[0].message.content
return hint

def analyze_code_complexity(code: str, language: str) -> dict:
prompt = f"Analyze the following {language} code for its time and space complexity. Provide a detailed explanation.\n\nCode:\n{code}\n\nAnalysis:"
print(prompt)
completion = openai.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
)
analysis = completion.choices[0].message.content
# if "O(" in analysis:
# start = analysis.find("O(")
# end = analysis.find(")", start) + 1
# complexity = analysis[start:end]
# else:
# complexity = "Complexity could not be determined."
# return {"complexity": complexity, "analysis": analysis}
return {"analysis": analysis}

def generate_ai_answer(question_description: str, language: str) -> str:
prompt = f"Provide a complete and optimized {language} solution to achieve the most efficient time complexity for the following programming problem:\n\n{question_description}\n\nSolution:"
print(prompt)
completion = openai.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
)
ai_answer = completion.choices[0].message.content
return ai_answer
6 changes: 6 additions & 0 deletions backend/ai-hint-service/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
fastapi
uvicorn
openai
pydantic
python-dotenv
requests
8 changes: 8 additions & 0 deletions docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,13 @@ services:
ports:
- "3005:3005"

ai-hint-service:
build: ./backend/ai-hint-service
ports:
- "3006:8000"
depends_on:
- question # Assuming 'question' service is needed to fetch question descriptions

frontend:
build: ./frontend
env_file:
Expand All @@ -87,3 +94,4 @@ services:
- question
- collaboration
- rabbitmq
- ai-hint-service # Ensure frontend waits for ai-hint-service
1 change: 1 addition & 0 deletions frontend/.env.sample
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,5 @@ REACT_APP_USER_SERVICE_URL=http://localhost/api/users/
REACT_APP_MATCHING_SERVICE_URL=http://localhost:3003
REACT_APP_COMMUNICATION_SERVICE_URL=http://localhost:3004
REACT_APP_COLLABORATION_SERVICE_URL=http://localhost:3005
REACT_APP_AI_HINT_URL=http://localhost:3006
REACT_APP_VIDEO_SERVICE_PORT=9000
Loading

0 comments on commit 8d2883a

Please sign in to comment.