Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
19 changes: 18 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,5 +1,22 @@
# Python
__pycache__/
*.pyc
*.pyo
*.pyd

# Virtual environment
.venv/

# Build artifacts
wordgrid_solver/target/
wordgrid_solver/wheels/

# Output files
output/
*.pdf
*.svg

*.pptx
*.cpython
*.pyc
test*
_*
61 changes: 52 additions & 9 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,17 +31,60 @@ README.md # Documentation
```

## 🚀 Getting Started
1. Place your word list in `Words/words.txt` (200 words per topic).
2. Run the script:
```bash
python main.py
```
3. Voilà! Your puzzle book is ready in PDF format.

### Prerequisites

* Python 3.8+
* Rust toolchain (including `cargo`)
* `maturin` (can be installed via pip)

### Building and Running

1. **Clone the repository:**
```bash
git clone <repository-url>
cd BOOP
```

2. **Create and activate a Python virtual environment:**
```bash
python -m venv .venv
source .venv/bin/activate # On Windows use `.venv\Scripts\activate`
```

3. **Install Python dependencies:**
```bash
pip install -r requirements.txt
```

4. **Build the Rust extension:**
Navigate to the Rust project directory and build the extension using Maturin. This compiles the Rust code and makes it available to your Python environment.
```bash
cd wordgrid_solver/wordgrid_solver
maturin develop
cd ../.. # Return to the root directory
```

5. **Run the FastAPI server:**
```bash
python -m uvicorn api:app --host 0.0.0.0 --port 8000 --reload
```
The API will be available at `http://localhost:8000`.

6. **(Optional) Generate a standalone puzzle book PDF (Original CLI functionality):**
* Place your word list in `Words/words.txt` (ensure sufficient words per topic as per original instructions).
* Run the script:
```bash
python main_arg.py # Assuming main_arg.py is the entry point for PDF generation
```
* Your puzzle book PDF will be generated.

## 📖 How It Works
1. The word list is processed into categorized JSON using `rawWordToJSON.py`.
2. Puzzles are generated with specific rules for Normal, Hard, and Bonus modes.
3. Pages are styled and compiled into a professionally designed book format.

* **API (`api.py`)**: Provides endpoints (`/generate`, `/status/{job_id}`) to request puzzle generation and check job status. Uses background tasks for non-blocking generation.
* **Rust Extension (`wordgrid_solver`)**: Handles the computationally intensive task of finding word placements in the grid, optimized for performance.
* **Puzzle Generation (`generatePuzzle.py`)**: Orchestrates the puzzle creation using the Rust solver and generates SVG output.
* **PDF Generation (`main_arg.py`, `index.py`, `appendImage.py`)**: Contains the original logic for creating a complete PDF book (if using the CLI approach).

## 📖 Puzzle Types
- **Normal Puzzle**: A 13x13 word search.
Expand Down
121 changes: 121 additions & 0 deletions api.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
from fastapi import FastAPI, BackgroundTasks
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel
from typing import List, Optional, Dict
import uuid
import os
import json
import shutil
import asyncio

from generatePuzzle import create_puzzle_and_solution
from index import create_title_page

app = FastAPI()


#CORS
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)

#Output-Dir
os.makedirs('output', exist_ok=True)
os.makedirs('output/puzzles',exist_ok=True)

#In Memory Job store

puzzle_jobs = {}

class PuzzleRequest(BaseModel):
wordlist: List[str]
size: int = 15
mask_type: Optional[str] = None
book_name: str = "Where's Word-o"

class JobResponse(BaseModel):
job_id: str
status: str

async def generate_puzzle_task(job_id: str, params: Dict):
job_folder = f"output/puzzles/{job_id}"
os.makedirs(job_folder, exist_ok=True)

try:
puzzle_jobs[job_id]["status"] = "processing"

#generation function call
puzzle_filename = f"{job_folder}/puzzle"
wordlist = params["wordlist"]
size = params["size"]
mask_type = params["mask_type"]

loop = asyncio.get_running_loop()
await loop.run_in_executor(
None,
create_puzzle_and_solution,
puzzle_filename,
wordlist,
size,
size,
mask_type,
None, # background_image
None # page_number
)

puzzle_jobs[job_id]["status"] = "completed"
puzzle_jobs[job_id]["result"] = {
"puzzle_url": f"/output/puzzles/{job_id}/puzzle.svg",
"solution_url": f"/output/puzzles/{job_id}/puzzleS.svg"
}

except Exception as e:
puzzle_jobs[job_id]["status"] = "failed"
puzzle_jobs[job_id]["error"] = str(e)

@app.post("/generate", response_model=JobResponse)
async def generate_puzzle(request: PuzzleRequest, background_tasks: BackgroundTasks):
job_id = str(uuid.uuid4())

puzzle_jobs[job_id] = {
"status": "queued",
"params": request.dict()
}

# Start generation in background
background_tasks.add_task(generate_puzzle_task, job_id, request.dict())

return {"job_id": job_id, "status": "queued"}

@app.get("/status/{job_id}")
async def get_status(job_id: str):
if job_id not in puzzle_jobs:
return {"status": "not_found"}

return puzzle_jobs[job_id]

# Serve static files
app.mount("/output", StaticFiles(directory="output"), name="output")

@app.delete("/jobs/{job_id}")
async def delete_job(job_id: str):
if job_id in puzzle_jobs:
# Remove the job data
del puzzle_jobs[job_id]

# Remove the job files
job_folder = f"output/puzzles/{job_id}"
if os.path.exists(job_folder):
shutil.rmtree(job_folder)

return {"status": "deleted"}
return {"status": "not_found"}

if __name__ == "__main__":
import uvicorn
uvicorn.run("api:app", host="0.0.0.0", port=8000, reload=True)
124 changes: 124 additions & 0 deletions benchmark.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
import asyncio
import httpx
import time
import argparse
import os
from typing import List, Dict, Any

API_BASE_URL = "http://127.0.0.1:8000" # Assuming the API runs locally on port 8000

async def poll_status(client: httpx.AsyncClient, job_id: str, timeout: int = 120) -> Dict[str, Any]:
"""Polls the job status endpoint until completion or timeout."""
start_time = time.time()
while time.time() - start_time < timeout:
try:
response = await client.get(f"{API_BASE_URL}/status/{job_id}")
response.raise_for_status() # Raise an exception for bad status codes
data = response.json()
if data.get("status") in ["completed", "failed"]:
return data
except httpx.RequestError as e:
print(f"Error polling job {job_id}: {e}")
# Decide if you want to retry or fail immediately
await asyncio.sleep(1) # Wait a bit before retrying on network errors
except Exception as e:
print(f"Unexpected error polling job {job_id}: {e}")
return {"status": "failed", "error": f"Polling error: {e}"}

await asyncio.sleep(0.5) # Wait before polling again

print(f"Timeout waiting for job {job_id}")
return {"status": "failed", "error": "Timeout"}


async def run_single_job(client: httpx.AsyncClient, payload: Dict[str, Any]) -> float:
"""Sends a generation request and waits for completion, returning duration."""
start_time = time.time()
job_id = None
try:
response = await client.post(f"{API_BASE_URL}/generate", json=payload)
response.raise_for_status()
job_data = response.json()
job_id = job_data.get("job_id")

if not job_id:
print("Failed to get job_id from response")
return -1.0 # Indicate failure

final_status = await poll_status(client, job_id)

if final_status.get("status") != "completed":
print(f"Job {job_id} failed or timed out: {final_status.get('error', 'Unknown error')}")
return -1.0 # Indicate failure

# Optional: Cleanup
# await client.delete(f"{API_BASE_URL}/jobs/{job_id}")

except httpx.RequestError as e:
print(f"Request failed: {e}")
return -1.0 # Indicate failure
except Exception as e:
print(f"An unexpected error occurred for job {job_id or 'unknown'}: {e}")
return -1.0 # Indicate failure
finally:
# Ensure cleanup even if polling fails but job_id was obtained
if job_id:
try:
await client.delete(f"{API_BASE_URL}/jobs/{job_id}")
except Exception as cleanup_err:
print(f"Error cleaning up job {job_id}: {cleanup_err}")


end_time = time.time()
return end_time - start_time


async def run_benchmark(num_concurrent: int):
"""Runs the benchmark with N concurrent requests."""
payload = {
"wordlist": ["BENCHMARK", "TEST", "CONCURRENT", "FASTAPI", "PYTHON", "ASYNCIO", "HTTPX", "PARALLEL"],
"size": 15 # Adjust size if needed, larger size = longer generation
}

print(f"Starting benchmark with {num_concurrent} concurrent requests...")
start_total_time = time.time()

async with httpx.AsyncClient(timeout=150.0) as client: # Increase client timeout
tasks = [run_single_job(client, payload) for _ in range(num_concurrent)]
results = await asyncio.gather(*tasks)

end_total_time = time.time()
total_duration = end_total_time - start_total_time

successful_jobs = [duration for duration in results if duration > 0]
failed_jobs = len(results) - len(successful_jobs)
num_successful = len(successful_jobs)

print("\n--- Benchmark Results ---")
print(f"Total concurrent requests: {num_concurrent}")
print(f"Successful jobs: {num_successful}")
print(f"Failed/Timed out jobs: {failed_jobs}")

if num_successful > 0:
average_latency = sum(successful_jobs) / num_successful
throughput = num_successful / total_duration if total_duration > 0 else 0
print(f"Total time for all requests: {total_duration:.2f} seconds")
print(f"Average job latency (for successful jobs): {average_latency:.2f} seconds")
print(f"Throughput: {throughput:.2f} jobs/second")
else:
print("No jobs completed successfully.")
print(f"Total time elapsed: {total_duration:.2f} seconds")

print("-------------------------")


async def main():
parser = argparse.ArgumentParser(description="Run API benchmark.")
parser.add_argument("-n", "--num-concurrent", type=int, default=4, # Changed default to 4
help="Number of concurrent requests to send.")
args = parser.parse_args()

await run_benchmark(args.num_concurrent)

if __name__ == "__main__":
asyncio.run(main())
Loading