Skip to content

Commit

Permalink
feat: full , status , job
Browse files Browse the repository at this point in the history
- full : full resource in specific time
- status : get current status of resource
- job : background normal behavior stimate controller
  • Loading branch information
jason810496 committed Jan 23, 2024
1 parent 2ccf23a commit 35ad4eb
Show file tree
Hide file tree
Showing 8 changed files with 126 additions and 13 deletions.
31 changes: 24 additions & 7 deletions consumer/src/api/full.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
from fastapi import APIRouter, Depends, HTTPException, status
from fastapi import APIRouter, status, BackgroundTasks
from fastapi.responses import JSONResponse
from typing import List

from services.full import create_full_cpu_process, create_full_rem_process
from services.job import enqueue as enqueue_task


router = APIRouter(
prefix="/full",
Expand All @@ -9,15 +13,28 @@


@router.get("/cpu/duration/{duration}", status_code=status.HTTP_200_OK)
async def full_cpu(cpu_rate: int, duration: int):
return {"cpu_rate": cpu_rate, "duration": duration}
async def full_cpu(duration: int, background_tasks: BackgroundTasks):
background_tasks.add_task(create_full_cpu_process, duration)
return JSONResponse(
status_code=status.HTTP_200_OK,
content={"full_type": "cpu", "duration": duration, "message": f"full cpu in {duration} seconds"},
)


@router.get("/mem/duration/{duration}", status_code=status.HTTP_200_OK)
async def full_cpu(mem_rate: int, duration: int):
return {"mem_rate": mem_rate, "duration": duration}
@router.get("/ram/duration/{duration}", status_code=status.HTTP_200_OK)
async def full_cpu(duration: int, background_tasks: BackgroundTasks):
background_tasks.add_task(create_full_rem_process, duration)
return JSONResponse(
status_code=status.HTTP_200_OK,
content={"full_type": "mem", "duration": duration, "message": f"full memory in {duration} seconds"},
)


@router.get("/enque/{num}", status_code=status.HTTP_200_OK)
async def enque(num: int):
return {"num": num}
enqueue_task(num)

return JSONResponse(
status_code=status.HTTP_200_OK,
content={"message": f"enque {num}"},
)
4 changes: 3 additions & 1 deletion consumer/src/api/state.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,11 @@
from typing import List


# archive
router = APIRouter(
prefix="/state",
tags=["State"],
deprecated=True,
)


Expand All @@ -13,7 +15,7 @@ async def state_cpu(rate: str, duration: int):
return {"state": rate, "duration": duration}


@router.get("/mem/{rate}/duration/{duration}", status_code=status.HTTP_200_OK)
@router.get("/ram/{rate}/duration/{duration}", status_code=status.HTTP_200_OK)
async def state_mem(rate: str, duration: int):
return {"state": rate, "duration": duration}

Expand Down
23 changes: 21 additions & 2 deletions consumer/src/api/status.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,30 @@
)

# schemas
from schema.statistics import HardwareResponse
from schema.statistics import HardwareResponse , StatisticsResponse


router = APIRouter(prefix="/status", tags=["Status"])

@router.get("", status_code=status.HTTP_200_OK, response_model=StatisticsResponse)
async def all_status():
"""
- cpu: cpu status
- percent: cpu usage percentage
- usage: cpu usage percentage
- mem: memory status
- percent: memory usage percentage
- usage: memory usage in **GB**
- remain_count: remain query count
- avg_exe_time: average execution time
"""
return StatisticsResponse(
cpu=HardwareResponse(percent=get_cpu_percent(), usage=get_cpu_usage()),
mem=HardwareResponse(percent=get_mem_percent(), usage=get_mem_usage()),
remain_count=get_remain_count(),
avg_exe_time=get_avg_exe_time(),
)


@router.get("/cpu", status_code=status.HTTP_200_OK, response_model=HardwareResponse)
async def status_cpu():
Expand All @@ -27,7 +46,7 @@ async def status_cpu():
return HardwareResponse(percent=get_cpu_percent(), usage=get_cpu_usage())


@router.get("/mem", status_code=status.HTTP_200_OK, response_model=HardwareResponse)
@router.get("/ram", status_code=status.HTTP_200_OK, response_model=HardwareResponse)
async def status_mem():
"""
- percent: memory usage percentage
Expand Down
7 changes: 7 additions & 0 deletions consumer/src/schema/statistics.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,10 @@
class HardwareResponse(BaseModel):
percent: float
usage: float


class StatisticsResponse(BaseModel):
cpu: HardwareResponse
mem: HardwareResponse
remain_count: int
avg_exe_time: float
Empty file removed consumer/src/services/cpu.py
Empty file.
60 changes: 60 additions & 0 deletions consumer/src/services/full.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
import time
import multiprocessing
from core.config import get_settings

settings = get_settings()

def full_cpu_in_duration(duration: int):
now = time.time()
print("time", now)
print(f"full cpu in {duration} seconds")

i=0
while True:
if time.time() > now + duration:
break
i += 1
print("time", time.time())

def create_full_cpu_process(duration: int):
'''
create 4 process to full cpu in duration seconds
'''

for _ in range(settings.full_cpu_process_count):
p = multiprocessing.Process(target=full_cpu_in_duration, args=(duration,))
p.start()


"""
refence:
https://stackoverflow.com/questions/6317818/eat-memory-using-python
"""
def full_ram_in_duration(duration: int):
now = time.time()
print("time", now)
print(f"full ram in {duration} seconds")

cnt = 0
a = bytearray(settings.full_ram_byte)
while True:
if time.time() > now + duration:
break

try:
a = a + bytearray(settings.full_ram_byte)
cnt += 1
except MemoryError:
break

if time.time() < now + duration:
time.sleep(now + duration - time.time())

# in GB
print(f"Allocated {cnt} GB in {duration} seconds")
print("time", time.time())

def create_full_rem_process(duration: int):
p = multiprocessing.Process(target=full_ram_in_duration, args=(duration,))
p.start()
return p
14 changes: 11 additions & 3 deletions consumer/src/services/job.py
Original file line number Diff line number Diff line change
@@ -1,27 +1,33 @@
import random
import asyncio

from core.config import get_settings
from state.instance import Sleep, Counter, TotalQueryCount, AvgExecutionTime

settings = get_settings()

def check_enque():
return random.randint(0, 1)


def check_deque():
return random.randint(0, 1)


def get_enque_num():
return random.randint(1, 100)
return random.randint(settings.enqueue_lower_bound, settings.enqueue_upper_bound)


def get_deque_num():
return random.randint(1, 100)
return random.randint(settings.dequeue_lower_bound, settings.dequeue_upper_bound)


def enqueue(num: int):
Counter.increase(num)
TotalQueryCount.increase(num)
AvgExecutionTime.add_time(random.randint(1, 100), TotalQueryCount.get_count())
execution_time = random.randint(settings.execute_lower_bound, settings.execute_upper_bound)
AvgExecutionTime.add_time(execution_time, TotalQueryCount.get_count())


def dequeue(num: int):
Counter.decrease(num)
Expand Down Expand Up @@ -54,6 +60,8 @@ async def mock_behavior_task(*args, **kwargs):
else:
print("no deque")

await asyncio.sleep(settings.job_interval)


class MockBehaviorBackgroundClass:
def __init__(self, *args, **kwargs) -> None:
Expand Down
Empty file removed consumer/src/services/ram.py
Empty file.

0 comments on commit 35ad4eb

Please sign in to comment.