diff --git a/.github/workflows/server-integration.yml b/.github/workflows/server-integration.yml new file mode 100644 index 0000000..be986b2 --- /dev/null +++ b/.github/workflows/server-integration.yml @@ -0,0 +1,49 @@ +name: Server Integration Test +on: + push: + branches: + - 'main' + - 'cp2-server*' + pull_request: + branches: + - 'main' +env: + CARGO_TERM_COLOR: always + +jobs: + build: + defaults: + run: + shell: bash + working-directory: ./server + runs-on: ubuntu-latest + env: + TEST_ROOT: ${{ github.workspace }}/server/tests + SERVER_ROOT: ${{ github.workspace }}/server + steps: + - uses: actions/checkout@v4 + - name: Setup BATS testing framework + uses: mig4/setup-bats@v1.2.0 + - name: Run mock S3 server + run: docker run -p 6333:80 -v ${{ github.workspace }}/server/tests/test_s3_files:/usr/share/nginx/html -d nginx + - name: setup-redis + uses: shogo82148/actions-setup-redis@v1 + with: + auto-start: false + - name: start redis nodes + run: redis-server ${{ github.workspace }}/server/redis.conf --port 6379 --cluster-config-file node1.conf& + - name: start redis nodes + run: redis-server ${{ github.workspace }}/server/redis.conf --port 6380 --cluster-config-file node2.conf& + - name: start redis nodes + run: redis-server ${{ github.workspace }}/server/redis.conf --port 6381 --cluster-config-file node3.conf& + - name: Create redis cluster + run: redis-cli --cluster create localhost:6379 localhost:6380 localhost:6381 --cluster-replicas 0 --cluster-yes + - name: Build + run: cargo build --verbose + - name: Run + run: | + REDIS_PORT=6379 cargo run & + REDIS_PORT=6380 cargo run & + REDIS_PORT=6381 cargo run & + - name: Test get file + run: bats ${{ github.workspace }}/server/tests/server-integration.bats diff --git a/.gitignore b/.gitignore index 88a0133..5cfe5ae 100644 --- a/.gitignore +++ b/.gitignore @@ -19,4 +19,5 @@ Cargo.lock # local parquet file location for client client/parquet_files/ -bench_files \ No newline at end of file +bench_files +.vscode diff --git a/Cargo.toml b/Cargo.toml index e6187e9..036295e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,8 +1,7 @@ -[package] -name = "cache2" -version = "0.1.0" -edition = "2021" +[workspace] +members = [ + "server", + "client" +] -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] +resolver = "2" diff --git a/README.md b/README.md index 0578e9b..dd36bb2 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,93 @@ # 15721-s24-cache2 15-721 Spring 2024 - Cache #2 -To run benchmark, simple run bench.sh \ No newline at end of file +# LRU Cache Server + +This server implements a Least Recently Used (LRU) caching mechanism, providing a simple interface for fetching files from a simulated S3 storage and managing them within an LRU cache. The server is built using Rust and the Rocket framework. + +## Features + +- **Health Check**: Verify the server's health. +- **Fetch File**: Retrieve files, either served from the cache or fetched from "S3" and then cached. +- **Cache Stats**: Get statistics about the current state of the cache. +- **Set Cache Size**: Adjust the maximum size of the cache dynamically. + +## Getting Started + +### Prerequisites + +- Rust and Cargo (latest stable version recommended) +- Rocket Framework +- Docker + +### Installation + +1. Clone the repository: + ```sh + git clone git@github.com:cmu-db15721-s24-cache2.git + cd 15721-s24-cache2/server + ``` + +2. Build the project: + ```sh + docker build -t istziio . + ``` + +3. Run the server: + ```sh + docker compose up -d + ``` + +> [!IMPORTANT] +> Under development stage, the server cluster can be access ONLY within the specific Docker network. Client side needs to be in the same Docker bridge network for the correct redirection. + +### Example + +```sh +$ docker exec -it server-servernode_1-1 /bin/bash +root@node1:/data> apt-get update && apt-get install curl -y +root@node1:/data> curl -L http://node2:8000/s3/test1.txt # make sure -L flag is set for auto redirect to the correct node +``` + + +## Usage + +### Health Check + +- **Endpoint**: `GET /` +- **Description**: Checks if the server is running. +- **CURL Command**: + ```sh + curl http://localhost:8000/ + ``` + +### Fetch File + +- **Endpoint**: `GET /s3/` +- **Description**: Retrieves a file from the cache or fetches it from the simulated S3 storage if not present in the cache. Error reports if file not existed. +- **CURL Command**: + ```sh + curl http://localhost:8000/s3/ + ``` + +### Cache Stats + +- **Endpoint**: `GET /stats` +- **Description**: Returns statistics about the cache, such as current size, maximum size, and number of entries. +- **CURL Command**: + ```sh + curl http://localhost:8000/stats + ``` + +### Set Cache Size + +- **Endpoint**: `POST /size/` +- **Description**: Adjusts the maximum size of the cache. +- **CURL Command**: + ```sh + curl -X POST http://localhost:8000/size/ + ``` + +## Benchmark + +To run benchmark, simple run `bench.sh` \ No newline at end of file diff --git a/client/src/kv_store.rs b/client/src/kv_store.rs index 185bfaa..da5cfb0 100644 --- a/client/src/kv_store.rs +++ b/client/src/kv_store.rs @@ -1,11 +1,11 @@ // In src/kv_store.rs -use tokio::net::TcpStream; -use tokio::io::{self, AsyncWriteExt, BufReader}; -use tokio::io::AsyncBufReadExt; -use std::sync::Mutex; use anyhow::Result; use log::info; +use std::sync::Mutex; +use tokio::io::AsyncBufReadExt; +use tokio::io::{self, AsyncWriteExt, BufReader}; +use tokio::net::TcpStream; pub struct KVStore { address: String, port: u16, @@ -43,7 +43,7 @@ impl KVStore { if let Some(stream) = conn.as_mut() { stream.write_all(message.as_bytes()).await?; stream.flush().await?; - + // Use a loop to read lines from the server. let mut response = String::new(); let mut reader = BufReader::new(stream); diff --git a/server/.gitignore b/server/.gitignore index ed768f3..a454471 100644 --- a/server/.gitignore +++ b/server/.gitignore @@ -1,2 +1,5 @@ /target/ -Cargo.lock \ No newline at end of file +Cargo.lock +cache +output.log +*.swp \ No newline at end of file diff --git a/server/Cargo.toml b/server/Cargo.toml index deecce0..ba4eb48 100644 --- a/server/Cargo.toml +++ b/server/Cargo.toml @@ -1,12 +1,24 @@ [package] -name = "echo_server" +name = "istziio_server_node" version = "0.1.0" edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[lib] +path = "src/lib.rs" + +[[bin]] +name = "istziio_server_node" +path = "src/main.rs" [dependencies] tokio = { version = "1", features = ["full"] } log = "0.4" env_logger = "0.11.1" +fern = "0.5" clap = "3" +rocket = { version = "0.5.0", features = ["json"] } +redis = { version = "0.24.0", features = ["cluster"] } +reqwest = "0.11" +chrono = "0.4" +url = "2.5" \ No newline at end of file diff --git a/server/Dockerfile b/server/Dockerfile new file mode 100644 index 0000000..1d2e116 --- /dev/null +++ b/server/Dockerfile @@ -0,0 +1,30 @@ +FROM rust:1.76.0-bookworm AS chef +# We only pay the installation cost once, +# it will be cached from the second build onwards +RUN cargo install cargo-chef + +WORKDIR app + +FROM chef AS planner +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + +FROM chef AS builder +COPY --from=planner /app/recipe.json recipe.json +# Build dependencies - this is the caching Docker layer! +RUN cargo chef cook --release --recipe-path recipe.json + +# Build application +COPY . . +ENV ROCKET_PROFILE=development +RUN cargo install --path . + +# We do not need the Rust toolchain to run the binary! +FROM redis:7.2.4 +RUN apt-get update && apt-get -y install curl +COPY --from=builder /usr/local/cargo/bin/* /usr/local/bin +COPY docker-entrypoint.sh redis.conf Rocket.toml /usr/local/bin/ +RUN chmod 755 /usr/local/bin/docker-entrypoint.sh + +CMD ["/usr/local/bin/docker-entrypoint.sh"] + diff --git a/server/Rocket.toml b/server/Rocket.toml new file mode 100644 index 0000000..ece9dd0 --- /dev/null +++ b/server/Rocket.toml @@ -0,0 +1,29 @@ +[default] +address = "0.0.0.0" +port = 8000 +workers = 2 +keep_alive = 5 +read_timeout = 5 +write_timeout = 5 +log = "normal" +limits = { forms = 32768 } + +[staging] +address = "0.0.0.0" +port = 8000 +workers = 2 +keep_alive = 5 +read_timeout = 5 +write_timeout = 5 +log = "normal" +limits = { forms = 32768 } + +[release] +address = "0.0.0.0" +port = 8000 +workers = 2 +keep_alive = 5 +read_timeout = 5 +write_timeout = 5 +log = "critical" +limits = { forms = 32768 } diff --git a/server/compose.yaml b/server/compose.yaml new file mode 100644 index 0000000..32851c9 --- /dev/null +++ b/server/compose.yaml @@ -0,0 +1,74 @@ +services: + servernode_1: + hostname: node1 + image: istziio + build: . + ports: + - "8000:8000" + - "6379:6379" + - "16379:16379" + networks: + - cluster-network + environment: + - REDIS_PORT=6379 + - S3_ENDPOINT=http://mocks3 + - ROCKET_CONFIG=/usr/local/bin/Rocket.toml + - ROCKET_ENV=development + + servernode_2: + hostname: node2 + image: istziio + build: . + ports: + - "8001:8000" + - "6380:6379" + - "16380:16379" + networks: + - cluster-network + environment: + - REDIS_PORT=6379 + - S3_ENDPOINT=http://mocks3 + - ROCKET_CONFIG=/usr/local/bin/Rocket.toml + - ROCKET_ENV=development + + servernode_3: + hostname: node3 + image: istziio + build: . + ports: + - "8002:8000" + - "6381:6379" + - "16381:16379" + networks: + - cluster-network + environment: + - REDIS_PORT=6379 + - S3_ENDPOINT=http://mocks3 + - ROCKET_CONFIG=/usr/local/bin/Rocket.toml + - ROCKET_ENV=development + + clustersetup: + image: redis + depends_on: + - servernode_1 + - servernode_2 + - servernode_3 + restart: "no" + entrypoint: [ "bash", "-c", "yes \"yes\" | redis-cli --cluster create node1:6379 node2:6379 node3:6379 --cluster-replicas 0" ] + networks: + - cluster-network + + mocks3: + image: nginx + ports: + - 8080:80 + networks: + - cluster-network + volumes: + - type: bind + source: ./S3 + target: /usr/share/nginx/html +networks: + # The presence of these objects is sufficient to define them + cluster-network: + driver: bridge diff --git a/server/debug.sh b/server/debug.sh new file mode 100644 index 0000000..fc3e8e1 --- /dev/null +++ b/server/debug.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +# Stop and remove containers, networks +docker-compose down || { echo "docker-compose down failed"; exit 1; } + +# Build your Rust project +cargo build || { echo "cargo build failed"; exit 1; } + +# Build your Docker image +docker build -t istziio . || { echo "docker build failed"; exit 1; } + +# Start up your Docker containers in the background +docker-compose up -d || { echo "docker-compose up -d failed"; exit 1; } + +# Execute a bash shell inside your specified container +# Note: You might need to adjust the container name based on your docker-compose settings +docker exec -it server-servernode_1-1 /bin/bash || { echo "docker exec failed"; exit 1; } diff --git a/server/docker-entrypoint.sh b/server/docker-entrypoint.sh new file mode 100644 index 0000000..6a85b56 --- /dev/null +++ b/server/docker-entrypoint.sh @@ -0,0 +1,4 @@ +#!/bin/sh +echo "port ${REDIS_PORT}" >> /usr/local/bin/redis.conf +redis-server /usr/local/bin/redis.conf & +/usr/local/bin/istziio_server_node diff --git a/server/readme.md b/server/readme.md new file mode 100644 index 0000000..c1c7c3a --- /dev/null +++ b/server/readme.md @@ -0,0 +1,122 @@ +# ISTSIIO + +## Server +To run Redis server, use Docker to start a container: +`docker run --name=redis-devel --publish=6379:6379 --hostname=redis --restart=on-failure --detach redis:latest` + +## Detailed Design +### Client-Server Flow +```mermaid +sequenceDiagram + participant Client + participant RedisServer + participant RedisCluster + participant FileSystem + + Client->>RedisServer: new(addrs) + RedisServer->>RedisCluster: Connect to Redis Cluster + RedisCluster->>RedisServer: Connection established + + Client->>RedisServer: get_myid() + alt myid is not cached + RedisServer->>RedisCluster: Run 'redis-cli cluster myid' + RedisCluster->>RedisServer: Return myid + end + + Client->>RedisServer: location_lookup(uid) + RedisServer->>RedisCluster: Determine which slot uid belongs to + RedisCluster->>RedisServer: Return node information + RedisServer->>Client: Return endpoint or None + + Client->>RedisServer: get_file(uid) + alt File is cached + RedisServer->>FileSystem: Retrieve file from local cache + FileSystem->>RedisServer: Return file + else File is not cached + RedisServer->>RedisCluster: Fetch from S3 + RedisCluster->>FileSystem: Store file in local cache + FileSystem->>RedisServer: Return file + end + + Client->>RedisServer: set_file_cache_loc(uid, loc) + RedisServer->>RedisCluster: Set cache location in Redis + RedisCluster->>RedisServer: Acknowledge + + Client->>RedisServer: remove_file(uid) + RedisServer->>RedisCluster: Remove cache location from Redis + RedisCluster->>RedisServer: Acknowledge + + Client->>RedisServer: import_keyslot(keyslots) + loop For each keyslot + RedisServer->>RedisCluster: Set keyslot to NODE + RedisCluster->>RedisServer: Acknowledge + end + + Client->>RedisServer: migrate_keyslot_to(keyslots, node_id) + loop For each keyslot + RedisServer->>RedisCluster: Get keys in slot + RedisCluster->>RedisServer: Return keys + RedisServer->>RedisCluster: Delete keys + RedisCluster->>RedisServer: Acknowledge + RedisServer->>RedisCluster: Set keyslot to new NODE + RedisCluster->>RedisServer: Acknowledge + end + + Client->>RedisServer: yield_keyslots(p) + RedisServer->>RedisCluster: Get shard info + RedisCluster->>RedisServer: Return shard info + RedisServer->>Client: Return keyslots to be migrated +``` + +### Dive inside DiskCache +```mermaid +flowchart TB + start(Get File) -->|Convert uid to string| lockCache(Lock Cache) + lockCache -->|Location Lookup| checkRedirect{Is Redirect Needed?} + checkRedirect -->|Yes| redirect[Redirect to S3 URL] + checkRedirect -->|No| checkCache{Is File in Cache?} + checkCache -->|Yes| fileInCache[Use Cached File] + checkCache -->|No| fetchS3[Fetch File from S3] + fetchS3 -->|Fetch Success| setCacheLoc[Set File Cache Location] + fetchS3 -->|Fetch Fail| notFound[Return 'Not Found'] + setCacheLoc -->|Set Location Success| updateAccess[Update Access Order] + updateAccess -->|Update Complete| openFile[Open File from Cache] + + %% Detail of fetching file from S3 and ensuring capacity + subgraph fetchFromS3[ ] + fetchS3 -->|Fetch File| ensureCap[Ensure Capacity] + ensureCap -->|Ensure Complete| createFile[Create File in Cache] + createFile -->|File Created| setSize[Set File Size] + setSize --> setCacheLoc + end + + %% Detail of ensuring capacity + subgraph ensureCapacity[ ] + ensureCap -->|Check Capacity| evictionCheck{Is Eviction Needed?} + evictionCheck -->|No| createFile + evictionCheck -->|Yes| evictFile[Evict Least Used File] + evictFile --> removeMeta[Remove File From Metadata] + removeMeta -->|Continue Checking| evictionCheck + end + + %% Detail of updating access order + subgraph updateOrder[ ] + updateAccess --> removeOld[Remove Old Access Record] + removeOld --> addNew[Add New Access Record] + addNew --> openFile + end +``` + +### Metadata Migration +```mermaid +flowchart TD + A[Begin Key Migration Process] --> B[Determine Key Slots to Migrate Using yield_keyslots] + B --> C[Identify Target Nodes for Migration] + C --> D{For Each Key Slot} + D --> E[Use migrate_keyslot_to to Migrate Key Slot to Target Node] + E --> F[Target Node Receives Key Slot] + F --> G[Use import_keyslot on Target Node to Update Ownership] + G --> H{More Key Slots?} + H -->|Yes| D + H -->|No| I[Migration Process Complete] +``` \ No newline at end of file diff --git a/server/redis.conf b/server/redis.conf new file mode 100644 index 0000000..1d78381 --- /dev/null +++ b/server/redis.conf @@ -0,0 +1,3 @@ +cluster-enabled yes +cluster-node-timeout 5000 +appendonly yes diff --git a/server/src/cache.rs b/server/src/cache.rs new file mode 100644 index 0000000..adb8b1f --- /dev/null +++ b/server/src/cache.rs @@ -0,0 +1,279 @@ +// cache.rs +use chrono::{self, Utc}; +use log::{debug, error, info}; +use rocket::{fs::NamedFile, response::Redirect}; +use std::collections::{HashMap, VecDeque}; +use std::fs; +use std::io::{ErrorKind, Result as IoResult, Write}; +use std::net::IpAddr; +use std::path::Path; +use std::path::PathBuf; +use std::sync::Arc; +use tokio::sync::{Mutex, RwLock, RwLockReadGuard}; +use tokio::time::{timeout, Duration}; +use url::Url; + +use crate::redis::RedisServer; +use crate::util::hash; + +// Constants +const SHARD_COUNT: u64 = 3; +pub const PORT_OFFSET_TO_WEB_SERVER: u16 = 20000; +// Cache Structures ----------------------------------------------------------- + +pub struct ConcurrentDiskCache { + cache_dir: PathBuf, + max_size: u64, + s3_endpoint: String, + shards: Vec>>, + pub redis: Arc>, +} + +pub struct DiskCache { + cache_dir: PathBuf, + max_size: u64, + current_size: u64, + s3_endpoint: String, + access_order: VecDeque, +} + +// DiskCache Implementation --------------------------------------------------- + +impl DiskCache { + pub fn new(cache_dir: PathBuf, max_size: u64, s3_endpoint: String) -> Arc> { + let current_size = 0; // Start with an empty cache for simplicity + Arc::new(Mutex::new(Self { + cache_dir, + max_size, + current_size, + s3_endpoint, + access_order: VecDeque::new(), + })) + } + + pub async fn get_file( + cache: Arc>, + uid: PathBuf, + redis_read: &RwLockReadGuard<'_, RedisServer>, + ) -> Result { + let uid_str = uid.into_os_string().into_string().unwrap(); + let file_name: PathBuf; + let mut cache = cache.lock().await; + let redirect = redis_read.location_lookup(uid_str.clone()).await; + if let Some((x, p)) = redirect { + let mut url = Url::parse("http://localhost").unwrap(); + let address: IpAddr = x.parse().unwrap(); + if address.is_loopback() { + url.set_host(Some("localhost")).unwrap(); + } else { + url.set_ip_host(address).unwrap(); + } + url.set_port(Some(p + PORT_OFFSET_TO_WEB_SERVER)).unwrap(); + url.set_path(&format!("s3/{}", &uid_str)[..]); + debug!("tell client to redirect to {}", url.to_string()); + return Err(Redirect::to(url.to_string())); + } + if let Some(redis_res) = redis_read.get_file(uid_str.clone()).await { + debug!("{} found in cache", &uid_str); + file_name = redis_res; + } else { + match cache.get_s3_file_to_cache(&uid_str, &redis_read).await { + Ok(cache_file_name) => { + debug!("{} fetched from S3", &uid_str); + file_name = cache_file_name; + let _ = redis_read + .set_file_cache_loc(uid_str.clone(), file_name.clone()) + .await; + } + Err(e) => { + info!("{}", e.to_string()); + return Err(Redirect::to("/not_found_on_this_disk")); + } + } + } + let file_name_str = file_name.to_str().unwrap_or_default().to_string(); + debug!("get_file: {}", file_name_str); + cache.update_access(&file_name_str); + let cache_file_path = cache.cache_dir.join(file_name); + return NamedFile::open(cache_file_path) + .await + .map_err(|_| Redirect::to("/not_found_on_this_disk")); + } + + async fn get_s3_file_to_cache( + &mut self, + s3_file_name: &str, + redis_read: &RwLockReadGuard<'_, RedisServer>, + ) -> IoResult { + // Load from "S3", simulate adding to cache + let s3_file_path = Path::new(&self.s3_endpoint).join(s3_file_name); + let resp = reqwest::get(s3_file_path.into_os_string().into_string().unwrap()) + .await + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; + + // Check if the file was not found in S3 + if resp.status() == reqwest::StatusCode::NOT_FOUND { + return Err(std::io::Error::new( + ErrorKind::NotFound, + "File not found in S3", + )); + } + + // Ensure the response status is successful, otherwise return an error + if !resp.status().is_success() { + return Err(std::io::Error::new( + ErrorKind::Other, + format!( + "Failed to fetch file from S3 with status: {}", + resp.status() + ), + )); + } + let file = resp.bytes().await.unwrap(); // [TODO] error handling + self.ensure_capacity(redis_read).await; + fs::File::create(Path::new(&self.cache_dir).join(s3_file_name))?.write_all(&file[..])?; + let file_size = 1; // Assume each file has size 1 for simplicity + self.current_size += file_size; + self.access_order.push_back(String::from(s3_file_name)); + return Ok(Path::new("").join(s3_file_name)); + } + + async fn ensure_capacity(&mut self, redis_read: &RwLockReadGuard<'_, RedisServer>) { + // Trigger eviction if the cache is full or over its capacity + while self.current_size >= self.max_size && !self.access_order.is_empty() { + if let Some(evicted_file_name) = self.access_order.pop_front() { + let evicted_path = self.cache_dir.join(&evicted_file_name); + match fs::metadata(&evicted_path) { + Ok(metadata) => { + let _file_size = metadata.len(); + if let Ok(_) = fs::remove_file(&evicted_path) { + // Ensure the cache size is reduced by the actual size of the evicted file + self.current_size -= 1; + let _ = redis_read.remove_file(evicted_file_name.clone()).await; + + info!("Evicted file: {}", evicted_file_name); + } else { + eprintln!("Failed to delete file: {}", evicted_path.display()); + } + } + Err(e) => eprintln!( + "Failed to get metadata for file: {}. Error: {}", + evicted_path.display(), + e + ), + } + } + } + } + // Update a file's position in the access order + fn update_access(&mut self, file_name: &String) { + self.access_order.retain(|x| x != file_name); + self.access_order.push_back(file_name.clone()); + } +} + +// ConcurrentDiskCache Implementation ----------------------------------------- + +impl ConcurrentDiskCache { + pub fn new( + cache_dir: PathBuf, + max_size: u64, + s3_endpoint: String, + redis_addrs: Vec, + ) -> Self { + let _ = std::fs::create_dir_all(cache_dir.clone()); + let shard_max_size = max_size / SHARD_COUNT as u64; + let redis_server = RedisServer::new(redis_addrs).unwrap(); + let redis = Arc::new(RwLock::new(redis_server)); + let shards = (0..SHARD_COUNT) + .map(|_| DiskCache::new(cache_dir.clone(), shard_max_size, s3_endpoint.clone())) + .collect::>(); + + Self { + cache_dir, + max_size, + s3_endpoint, + shards, + redis, + } + } + pub async fn get_file(&self, uid: PathBuf) -> Result { + let uid = uid.into_os_string().into_string().unwrap(); + // Use read lock for read operations + let redis_read = self.redis.read().await; // Acquiring a read lock + if !redis_read.mapping_initialized { + drop(redis_read); // Drop read lock before acquiring write lock + + let mut redis_write = self.redis.write().await; // Acquiring a write lock + if let Err(e) = redis_write.update_slot_to_node_mapping().await { + eprintln!("Error updating slot-to-node mapping: {:?}", e); + return Err(Redirect::to("/error_updating_mapping")); + } + redis_write.get_myid(); + redis_write.mapping_initialized = true; + drop(redis_write); + debug!("Initialization complete, dropped Redis write lock"); + } else { + drop(redis_read); + } + let redis_read = self.redis.read().await; + let shard_index = hash(&uid) % self.shards.len(); // Hash UID to select a shard + let shard = &self.shards[shard_index]; + // Debug message showing shard selection + debug!("Selected shard index: {} for uid: {}", shard_index, &uid); + let result = DiskCache::get_file(shard.clone(), uid.into(), &redis_read).await; + drop(redis_read); + debug!("{}", self.get_stats().await); + result + } + + pub async fn get_stats(&self) -> String { + let current_time = chrono::Utc::now(); + let mut stats_summary = format!("Cache Stats at {}\n", current_time.to_rfc3339()); + stats_summary.push_str(&format!( + "{:<15} | {:<12} | {:<12} | {:<10} | {}\n", + "Shard", "Curr Size", "% Used", "Total Files", "Files" + )); + stats_summary.push_str(&"-".repeat(80)); + stats_summary.push('\n'); + + for (index, shard) in self.shards.iter().enumerate() { + match tokio::time::timeout(std::time::Duration::from_secs(5), shard.lock()).await { + Ok(shard_guard) => { + let files_in_shard: Vec<_> = shard_guard.access_order.iter().collect(); + let total_files = files_in_shard.len(); + let used_capacity_pct = + (shard_guard.current_size as f64 / shard_guard.max_size as f64) * 100.0; + stats_summary.push_str(&format!( + "{:<15} | {:<12} | {:<12.2} | {:<10} | {:?}\n", + format!("Shard {}", index), + shard_guard.current_size, + used_capacity_pct, + total_files, + files_in_shard + )); + } + Err(_) => { + stats_summary + .push_str(&format!("Timeout while trying to lock shard {}\n", index)); + } + } + } + + stats_summary + } + /* + pub async fn set_max_size(cache: Arc>, new_size: u64) { + let mut cache = cache.lock().await; + cache.max_size = new_size; + // Optionally trigger capacity enforcement immediately + Self::ensure_capacity(&mut *cache).await; + } + + pub async fn scale_out(cache: Arc>) { + let mut cache = cache.lock().await; + let to_move = cache.redis.yield_keyslots(0.01).await; + debug!("These slots are to move: {:?}", to_move); + } + */ +} diff --git a/server/src/client_connection.rs b/server/src/client_connection.rs deleted file mode 100644 index 546a171..0000000 --- a/server/src/client_connection.rs +++ /dev/null @@ -1,54 +0,0 @@ -// In src/client_connection.rs - -use crate::db::Database; -use log::info; -use tokio::net::TcpStream; -use tokio::io::{self, AsyncReadExt, AsyncWriteExt}; -use std::sync::Arc; - -pub struct ClientConnection { - socket: TcpStream, - db: Arc, -} - -impl ClientConnection { - pub fn new(socket: TcpStream, db: Arc) -> Self { - Self { socket, db } - } - - pub async fn handle_client(&mut self) -> io::Result<()> { - let mut buffer = [0; 1024]; - let peer = self.socket.peer_addr()?; - info!("Client connected from {}", peer); - - loop { - let nbytes = self.socket.read(&mut buffer).await?; - if nbytes == 0 { break; } - - let request = String::from_utf8_lossy(&buffer[..nbytes]); - let parts: Vec<&str> = request.trim().splitn(3, ' ').collect(); - match parts.as_slice() { - ["get", key] => { - if let Some(value) = self.db.get(key) { - let response = format!("{}\n", value); - self.socket.write_all(response.as_bytes()).await?; - } else { - self.socket.write_all(b"not found\n").await?; - } - } - ["put", key, value] => { - info!("put {}, {}", key, value); - self.db.put(key.to_string(), value.to_string()); - self.socket.write_all(b"ok\n").await?; - info!("ack back to client"); - } - _ => { - self.socket.write_all(b"error: invalid command\n").await?; - } - } - } - - Ok(()) - } -} - diff --git a/server/src/db.rs b/server/src/db.rs deleted file mode 100644 index 3b63c8b..0000000 --- a/server/src/db.rs +++ /dev/null @@ -1,26 +0,0 @@ -// In src/db.rs - -use std::collections::HashMap; -use std::sync::Mutex; - -pub struct Database { - store: Mutex>, -} - -impl Database { - pub fn new() -> Self { - Database { - store: Mutex::new(HashMap::new()), - } - } - - pub fn get(&self, key: &str) -> Option { - let store = self.store.lock().unwrap(); - store.get(key).cloned() - } - - pub fn put(&self, key: String, value: String) { - let mut store = self.store.lock().unwrap(); - store.insert(key, value); - } -} diff --git a/server/src/lib.rs b/server/src/lib.rs new file mode 100644 index 0000000..54e7329 --- /dev/null +++ b/server/src/lib.rs @@ -0,0 +1,3 @@ +pub mod cache; +pub mod redis; +pub mod util; diff --git a/server/src/main.rs b/server/src/main.rs index b13fd9c..18c5c13 100644 --- a/server/src/main.rs +++ b/server/src/main.rs @@ -1,52 +1,142 @@ -// In src/main.rs +#[macro_use] +extern crate rocket; +extern crate fern; +#[macro_use] +extern crate log; +use rocket::{fs::NamedFile, response::Redirect, serde::json::Json, State}; +use std::path::PathBuf; use std::sync::Arc; -use tokio::{io, net::TcpListener}; -use log::{error, info, LevelFilter}; -use env_logger::Builder; -use clap::{App, Arg}; - -mod client_connection; -mod db; - -use client_connection::ClientConnection; -use db::Database; - -#[tokio::main] -async fn main() -> io::Result<()> { - let matches = App::new("KVStore Server") - .arg(Arg::new("port") - .long("port") - .takes_value(true) - .default_value("7878") - .help("Sets the server port")) - .arg(Arg::new("address") - .long("address") - .takes_value(true) - .default_value("127.0.0.1") - .help("Sets the IP address")) - .get_matches(); - - let port = matches.value_of("port").unwrap().parse::().expect("Invalid port number"); - let address = matches.value_of("address").unwrap(); - - Builder::new().filter_level(LevelFilter::Info).init(); - - let db = Arc::new(Database::new()); - let listener = TcpListener::bind(format!("{}:{}", address, port)).await?; - - info!("Server running on {}:{}", address, port); - - loop { - let (socket, _) = listener.accept().await?; - let db_clone: Arc = db.clone(); - tokio::spawn(async move { - let mut client = ClientConnection::new(socket, db_clone); - if let Err(e) = client.handle_client().await { - error!("Failed to handle client: {}", e); - } - }); - } +use tokio::sync::Mutex; + +use istziio_server_node::cache::{self, ConcurrentDiskCache}; +use istziio_server_node::util::KeyslotId; + +fn setup_logger() -> Result<(), fern::InitError> { + fern::Dispatch::new() + .format(|out, message, record| { + out.finish(format_args!( + "[{}][{}] {}", + record.target(), + record.level(), + message + )) + }) + .level(log::LevelFilter::Debug) + .chain(std::io::stdout()) + .chain(fern::log_file("output.log")?) + .apply()?; + Ok(()) +} + +#[get("/")] +fn health_check() -> &'static str { + "Healthy\n" +} + +#[get("/stats")] +async fn cache_stats(cache: &State>) -> String { + cache.get_stats().await +} + +#[get("/s3/")] +async fn get_file( + uid: PathBuf, + cache: &State>, +) -> Result { + cache.inner().get_file(uid).await +} + +/* +#[get("/scale-out")] +async fn scale_out(cache: &State>>) -> &'static str { + DiskCache::scale_out(cache.inner().clone()).await; + "success" } +#[get("/keyslots/yield/

")] +async fn yield_keyslots( + p: f64, + cache_guard: &State>>, +) -> Json> { + let cache_mutex = cache_guard.inner().clone(); + let mut cache = cache_mutex.lock().await; + let keyslots = cache.redis.yield_keyslots(p).await; + Json(keyslots) +} + +#[post( + "/keyslots/import", + format = "application/json", + data = "" +)] +async fn import_keyslots( + keyslots_json: Json>, + cache_guard: &State>>, +) { + let cache_mutex = cache_guard.inner().clone(); + let mut cache = cache_mutex.lock().await; + cache.redis.import_keyslot(keyslots_json.into_inner()).await; +} + +#[post( + "/keyslots/migrate_to/", + format = "application/json", + data = "" +)] +async fn migrate_keyslots_to( + keyslots_json: Json>, + node_id: String, + cache_guard: &State>>, +) { + let cache_mutex = cache_guard.inner().clone(); + let cache = cache_mutex.lock().await; + cache + .redis + .migrate_keyslot_to(keyslots_json.into_inner(), node_id) + .await; +} + +#[post("/size/")] +async fn set_cache_size(new_size: u64, cache: &State>>) -> &'static str { + DiskCache::set_max_size(cache.inner().clone(), new_size).await; + "Cache size updated" +} +*/ +#[launch] +fn rocket() -> _ { + let _ = setup_logger(); + let _ = std::fs::create_dir_all("/data/cache"); + let redis_port = std::env::var("REDIS_PORT") + .unwrap_or(String::from("6379")) + .parse::() + .unwrap(); + let rocket_port = cache::PORT_OFFSET_TO_WEB_SERVER + redis_port; + let cache_dir = std::env::var("CACHE_DIR").unwrap_or(format!("./cache_{}", rocket_port)); + let s3_endpoint = std::env::var("S3_ENDPOINT").unwrap_or(String::from("http://0.0.0.0:6333")); + let cache_manager = Arc::new(ConcurrentDiskCache::new( + PathBuf::from(cache_dir), + 6, + s3_endpoint, + vec![format!("redis://0.0.0.0:{}", redis_port)], + )); // [TODO] make the args configurable from env + rocket::build() + .configure(rocket::Config::figment().merge(("port", rocket_port))) + .manage(cache_manager) + .mount( + "/", + routes![ + health_check, + get_file, + cache_stats, + /* + set_cache_size, + scale_out, + yield_keyslots, + migrate_keyslots_to, + import_keyslots + */ + ], + ) +} diff --git a/server/src/redis.rs b/server/src/redis.rs new file mode 100644 index 0000000..f2866b1 --- /dev/null +++ b/server/src/redis.rs @@ -0,0 +1,348 @@ +//redis.rs +use log::debug; +use redis::Commands; +use std::{ + cmp::Reverse, + collections::{BinaryHeap, HashMap}, + path::PathBuf, +}; + +use crate::util::{FileUid, KeyslotId}; + +#[derive(Eq)] +struct RedisKeyslot { + /* This struct is used when the cluster tries to scale out. + * During scaling out, keyslots are decided to be migrated to some other nodes, and + * the number of key stored in one key slot implies the number of cached files to + * be deleted and handover to other nodes. To reduce the lost of cached file due to data + * migration in this scenario, we want to find key slots that contains the least key to + * handover. */ + pub id: KeyslotId, + pub key_cnt: i64, +} + +impl Ord for RedisKeyslot { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.key_cnt.cmp(&other.key_cnt) + } +} + +impl PartialOrd for RedisKeyslot { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl PartialEq for RedisKeyslot { + fn eq(&self, other: &Self) -> bool { + self.key_cnt == other.key_cnt + } +} + +#[derive(Debug, Clone)] +pub struct NodeInfo { + pub node_id: String, + pub endpoint: String, + pub port: u16, +} + +pub struct RedisServer { + pub client: redis::cluster::ClusterClient, + pub myid: String, + pub slot_to_node_mapping: HashMap, + pub mapping_initialized: bool, +} + +impl RedisServer { + pub fn new(addrs: Vec) -> Result { + let client = redis::cluster::ClusterClient::new(addrs)?; + let server = RedisServer { + client, + myid: String::from(""), + slot_to_node_mapping: HashMap::new(), + mapping_initialized: false, + }; + Ok(server) + } + async fn own_slots_from_shards_info( + &self, + shards_info: Vec>, + ) -> Result, String> { + let mut shard_iter = shards_info.iter(); + let myid = &self.myid; + loop { + if let Some(shard_info) = shard_iter.next() { + if let redis::Value::Bulk(nodes_info) = &shard_info[3] { + if let redis::Value::Bulk(fields) = &nodes_info[0] { + let mut node_id = String::from(""); + if let redis::Value::Data(x) = &fields[1] { + node_id = String::from_utf8(x.to_vec()).unwrap(); + } + if node_id == *myid { + if let redis::Value::Bulk(slot_range) = &shard_info[1] { + let mut own_slot_ranges = Vec::new(); + for i in (0..slot_range.len()).step_by(2) { + if let (redis::Value::Int(low), redis::Value::Int(high)) = + (&slot_range[i], &slot_range[i + 1]) + { + let low_id = *low as KeyslotId; + let high_id = *high as KeyslotId; + own_slot_ranges.push([low_id, high_id]); + debug!("this node has slot {} to {}", low_id, high_id); + } + } + return Ok(own_slot_ranges); + } + } + } + } + } else { + debug!("This id is not found in the cluster"); + return Err(String::from("This id is not found in the cluster")); + } + } + } + pub fn get_myid(&mut self) -> &String { + // self.myid cannot be determined at the instantiation moment because the cluster is formed + // via an external script running redis-cli command. This is a workaround to keep cluster + // id inside the struct. + let redis_port = std::env::var("REDIS_PORT") + .unwrap_or(String::from("6379")) + .parse::() + .unwrap(); + if self.myid.len() == 0 { + let result = std::process::Command::new("redis-cli") + .arg("-c") + .arg("-p") + .arg(redis_port.to_string()) + .arg("cluster") + .arg("myid") + .output() + .expect("redis command failed to start"); + self.myid = String::from_utf8(result.stdout).unwrap(); + self.myid = String::from(self.myid.trim()); + } + &self.myid + } + // Function to update the slot-to-node mapping + pub async fn update_slot_to_node_mapping(&mut self) -> Result<(), ()> { + let mut conn = self.client.get_connection().unwrap(); + let shards = redis::cmd("CLUSTER") + .arg("SHARDS") + .query::>>(&mut conn) + .unwrap(); + let mut new_mapping: HashMap = HashMap::new(); + + for shard_info in shards { + if let [_, redis::Value::Bulk(slot_ranges), _, redis::Value::Bulk(nodes_info)] = + &shard_info[..] + { + if let Some(redis::Value::Bulk(node_info)) = nodes_info.first() { + // Initialize variables to hold id and endpoint + let mut node_id = String::new(); + let mut endpoint = String::new(); + let mut port: u16 = 0; + + // Iterate through the node_info array + let mut iter = node_info.iter(); + while let Some(redis::Value::Data(key)) = iter.next() { + if let Ok(key_str) = std::str::from_utf8(key) { + debug!("key_str: {}", key_str); + // Match the key to decide what to do with the value + match key_str { + "id" => { + if let Some(redis::Value::Data(value)) = iter.next() { + node_id = String::from_utf8(value.clone()) + .expect("Invalid UTF-8 for node_id"); + debug!("Node ID: {}", node_id); + } + } + "ip" => { + if let Some(redis::Value::Data(value)) = iter.next() { + endpoint = String::from_utf8(value.clone()) + .expect("Invalid UTF-8 for endpoint"); + debug!("Endpoint: {}", endpoint); + } + } + "port" => { + if let Some(redis::Value::Int(x)) = iter.next() { + port = *x as u16; + debug!("Port: {}", port); + } + } + _ => { + iter.next(); + } // Ignore other keys + } + } + } + + // Check if we have both id and endpoint + if !node_id.is_empty() && !endpoint.is_empty() { + for slots in slot_ranges.chunks(2) { + if let [redis::Value::Int(start), redis::Value::Int(end)] = slots { + for slot in *start..=*end { + let info = NodeInfo { + node_id: node_id.clone(), + endpoint: endpoint.clone(), + port: port.clone(), + }; + new_mapping.insert(slot as KeyslotId, info); + } + } + } + } + } + } + } + + if new_mapping.is_empty() { + debug!("No slots were found for any nodes. The mapping might be incorrect."); + return Err(()); + } + + self.slot_to_node_mapping = new_mapping; + debug!( + "Updated slot-to-node mapping: {:?}", + self.slot_to_node_mapping + ); + Ok(()) + } + // Location lookup function that uses the updated mapping + pub async fn location_lookup(&self, uid: FileUid) -> Option<(String, u16)> { + let slot = self.which_slot(uid).await; + debug!("Looking up location for slot: {}", slot); + + self.slot_to_node_mapping + .get(&slot) + .map(|node_info| { + if node_info.node_id == self.myid { + debug!("Slot {} is local to this node", slot); + None // If the slot is local, we do not need to redirect. + } else { + debug!( + "Redirecting slot {} to node ID {} at {}:{}", + slot, node_info.node_id, node_info.endpoint, node_info.port + ); + Some((node_info.endpoint.clone(), node_info.port.clone())) + } + }) + .flatten() + } + pub async fn get_file(&self, uid: FileUid) -> Option { + let mut conn = self.client.get_connection().unwrap(); + conn.get(uid).map(|u: String| PathBuf::from(u)).ok() + } + pub async fn set_file_cache_loc(&self, uid: FileUid, loc: PathBuf) -> Result<(), ()> { + let mut conn = self.client.get_connection().unwrap(); + let loc_str = loc.into_os_string().into_string().unwrap(); + debug!("try to set key [{}], value [{}] in redis", &uid, &loc_str); + let _ = conn.set::(uid.clone(), loc_str); // [TODO] Error handling + Ok(()) + } + pub async fn remove_file(&self, uid: FileUid) -> Result<(), ()> { + let mut conn = self.client.get_connection().unwrap(); + debug!("remove key [{}] in redis", &uid); + let _ = conn.del::(uid); // [TODO] Error handling + Ok(()) + } + async fn which_slot(&self, uid: FileUid) -> KeyslotId { + let mut conn = self.client.get_connection().unwrap(); + let keyslot = redis::cmd("CLUSTER") + .arg("KEYSLOT") + .arg(uid) + .query::(&mut conn) + .unwrap(); + keyslot + } + pub async fn import_keyslot(&self, keyslots: Vec) { + let mut conn = self.client.get_connection().unwrap(); + for keyslot in keyslots.iter() { + let _ = std::process::Command::new("redis-cli") // TODO: error handling + .arg("-c") + .arg("cluster") + .arg("setslot") + .arg(keyslot.to_string()) + .arg("NODE") + .arg(&self.myid) + .output() + .expect("redis command setslot failed to start"); + if let Ok(_) = redis::cmd("CLUSTER") // TODO: error handling + .arg("SETSLOT") + .arg(keyslot) + .arg("NODE") + .arg(&self.myid) + .query::<()>(&mut conn) + {} + } + } + pub async fn migrate_keyslot_to(&self, keyslots: Vec, destination_node_id: String) { + let mut conn = self.client.get_connection().unwrap(); + for keyslot in keyslots.iter() { + while let Ok(keys_to_remove) = redis::cmd("CLUSTER") + .arg("GETKEYSINSLOT") + .arg(keyslot) + .arg(10000) + .query::>(&mut conn) + { + if keys_to_remove.len() == 0 { + break; + } + let _ = redis::cmd("DEL").arg(keys_to_remove).query::<()>(&mut conn); + } + let _ = std::process::Command::new("redis-cli") // TODO: error handling + .arg("-c") + .arg("cluster") + .arg("setslot") + .arg(keyslot.to_string()) + .arg("NODE") + .arg(destination_node_id.clone()) + .output() + .expect("redis command setslot failed to start"); + } + } + pub async fn yield_keyslots(&self, p: f64) -> Vec { + let mut conn = self.client.get_connection().unwrap(); + let shards_info = redis::cmd("CLUSTER") + .arg("SHARDS") + .query::>>(&mut conn) + .unwrap(); + let mut slot_ranges = self.own_slots_from_shards_info(shards_info).await.unwrap(); + let mut own_slot_cnt = 0; + let mut heap = BinaryHeap::new(); + while let Some([low, high]) = slot_ranges.pop() { + own_slot_cnt += (high - low) + 1; + for keyslot_id in low..(high + 1) { + let key_cnt = redis::cmd("CLUSTER") + .arg("COUNTKEYSINSLOT") + .arg(keyslot_id) + .query::(&mut conn) + .unwrap(); + heap.push(Reverse(RedisKeyslot { + id: keyslot_id, + key_cnt: key_cnt, + })); + } + } + let migrate_slot_cnt = (own_slot_cnt as f64 * p).floor() as i64; + let mut result = Vec::new(); + let top_slot = &heap.peek().unwrap().0; + debug!( + "the least loaded key slot: {} ({} keys)", + top_slot.id, top_slot.key_cnt + ); + debug!( + "{} of the total {} slot of this node is {}", + p, own_slot_cnt, migrate_slot_cnt + ); + for _ in 0..migrate_slot_cnt { + if let Some(keyslot) = heap.pop() { + result.push(keyslot.0.id); + } else { + break; + } + } + debug!("These are slots to be migrate: {:?}", result); + result + } +} diff --git a/server/src/util.rs b/server/src/util.rs new file mode 100644 index 0000000..1b57cab --- /dev/null +++ b/server/src/util.rs @@ -0,0 +1,15 @@ +// util.rs +use std::{ + collections::hash_map::DefaultHasher, + hash::{Hash, Hasher}, +}; + +pub type FileUid = String; +pub type KeyslotId = i16; + +/// Calculates a consistent hash for the given UID. +pub fn hash(uid: &FileUid) -> usize { + let mut hasher = DefaultHasher::new(); + uid.hash(&mut hasher); + hasher.finish() as usize +} diff --git a/server/tests/server-integration.bats b/server/tests/server-integration.bats new file mode 100644 index 0000000..39ec5c3 --- /dev/null +++ b/server/tests/server-integration.bats @@ -0,0 +1,19 @@ +#!/usr/bin/env bats + +@test "healthy" { + curl -sSL -o tmp localhost:26379/ + cat tmp + [ "$(cat tmp)" = "Healthy" ] + curl -sSL -o tmp localhost:26380/ + cat tmp + [ "$(cat tmp)" = "Healthy" ] + curl -sSL -o tmp localhost:26381/ + cat tmp + [ "$(cat tmp)" = "Healthy" ] +} + +@test "get_file file correctness" { + curl -sSL -o tmp localhost:26379/s3/test1.txt + cat tmp + [ "$(cat tmp)" = "$(cat $TEST_ROOT/test_s3_files/test1.txt)" ] +} \ No newline at end of file diff --git a/server/tests/setup.sh b/server/tests/setup.sh new file mode 100644 index 0000000..6dd4b85 --- /dev/null +++ b/server/tests/setup.sh @@ -0,0 +1,9 @@ +SERVER_ROOT="${SERVER_ROOT:-"."}" +redis-server $SERVER_ROOT/redis.conf --port 6379 --cluster-config-file node1.conf& +redis-server $SERVER_ROOT/redis.conf --port 6380 --cluster-config-file node2.conf& +redis-server $SERVER_ROOT/redis.conf --port 6381 --cluster-config-file node3.conf& +sleep 5 +redis-cli --cluster create localhost:6379 localhost:6380 localhost:6381 --cluster-replicas 0 --cluster-yes +REDIS_PORT=6379 cargo run & +REDIS_PORT=6380 cargo run & +REDIS_PORT=6381 cargo run & \ No newline at end of file diff --git a/server/tests/test_s3_files/test1.txt b/server/tests/test_s3_files/test1.txt new file mode 100644 index 0000000..653bd95 --- /dev/null +++ b/server/tests/test_s3_files/test1.txt @@ -0,0 +1 @@ +This is a test file 1 from S3. diff --git a/server/tests/test_s3_files/test10.txt b/server/tests/test_s3_files/test10.txt new file mode 100644 index 0000000..c08deb2 --- /dev/null +++ b/server/tests/test_s3_files/test10.txt @@ -0,0 +1 @@ +This is a test file 10 from S3. diff --git a/server/tests/test_s3_files/test11.txt b/server/tests/test_s3_files/test11.txt new file mode 100644 index 0000000..08eba77 --- /dev/null +++ b/server/tests/test_s3_files/test11.txt @@ -0,0 +1 @@ +This is a test file 11 from S3. diff --git a/server/tests/test_s3_files/test12.txt b/server/tests/test_s3_files/test12.txt new file mode 100644 index 0000000..e2169fe --- /dev/null +++ b/server/tests/test_s3_files/test12.txt @@ -0,0 +1 @@ +This is a test file 12 from S3. diff --git a/server/tests/test_s3_files/test13.txt b/server/tests/test_s3_files/test13.txt new file mode 100644 index 0000000..9f69f5e --- /dev/null +++ b/server/tests/test_s3_files/test13.txt @@ -0,0 +1 @@ +This is a test file 13 from S3. diff --git a/server/tests/test_s3_files/test14.txt b/server/tests/test_s3_files/test14.txt new file mode 100644 index 0000000..88634a2 --- /dev/null +++ b/server/tests/test_s3_files/test14.txt @@ -0,0 +1 @@ +This is a test file 14 from S3. diff --git a/server/tests/test_s3_files/test15.txt b/server/tests/test_s3_files/test15.txt new file mode 100644 index 0000000..77cdf17 --- /dev/null +++ b/server/tests/test_s3_files/test15.txt @@ -0,0 +1 @@ +This is a test file 15 from S3. diff --git a/server/tests/test_s3_files/test16.txt b/server/tests/test_s3_files/test16.txt new file mode 100644 index 0000000..bde4faf --- /dev/null +++ b/server/tests/test_s3_files/test16.txt @@ -0,0 +1 @@ +This is a test file 16 from S3. diff --git a/server/tests/test_s3_files/test2.txt b/server/tests/test_s3_files/test2.txt new file mode 100644 index 0000000..e809612 --- /dev/null +++ b/server/tests/test_s3_files/test2.txt @@ -0,0 +1 @@ +This is a test file 2 from S3. diff --git a/server/tests/test_s3_files/test3.txt b/server/tests/test_s3_files/test3.txt new file mode 100644 index 0000000..363ba96 --- /dev/null +++ b/server/tests/test_s3_files/test3.txt @@ -0,0 +1 @@ +This is a test file 3 from S3. diff --git a/server/tests/test_s3_files/test4.txt b/server/tests/test_s3_files/test4.txt new file mode 100644 index 0000000..7d149fd --- /dev/null +++ b/server/tests/test_s3_files/test4.txt @@ -0,0 +1 @@ +This is a test file 4 from S3. diff --git a/server/tests/test_s3_files/test5.txt b/server/tests/test_s3_files/test5.txt new file mode 100644 index 0000000..b62b4ce --- /dev/null +++ b/server/tests/test_s3_files/test5.txt @@ -0,0 +1 @@ +This is a test file 5 from S3. diff --git a/server/tests/test_s3_files/test6.txt b/server/tests/test_s3_files/test6.txt new file mode 100644 index 0000000..2985329 --- /dev/null +++ b/server/tests/test_s3_files/test6.txt @@ -0,0 +1 @@ +This is a test file 6 from S3. diff --git a/server/tests/test_s3_files/test7.txt b/server/tests/test_s3_files/test7.txt new file mode 100644 index 0000000..a714741 --- /dev/null +++ b/server/tests/test_s3_files/test7.txt @@ -0,0 +1 @@ +This is a test file 7 from S3. diff --git a/server/tests/test_s3_files/test8.txt b/server/tests/test_s3_files/test8.txt new file mode 100644 index 0000000..b0a119a --- /dev/null +++ b/server/tests/test_s3_files/test8.txt @@ -0,0 +1 @@ +This is a test file 8 from S3. diff --git a/server/tests/test_s3_files/test9.txt b/server/tests/test_s3_files/test9.txt new file mode 100644 index 0000000..58225c6 --- /dev/null +++ b/server/tests/test_s3_files/test9.txt @@ -0,0 +1 @@ +This is a test file 9 from S3. diff --git a/src/main.rs b/src/main.rs deleted file mode 100644 index e7a11a9..0000000 --- a/src/main.rs +++ /dev/null @@ -1,3 +0,0 @@ -fn main() { - println!("Hello, world!"); -}