Skip to content

Commit

Permalink
feat!: Implement SQLite db (#28)
Browse files Browse the repository at this point in the history
* refactor Dockerfile using alpine image;
* introduce SQLite, remove redis;
* introduce sqlx, without compile time guarantees on queries;
* replace cached with mini-moka cache. More granular, cached structs and not function calls and returns;
* remove unused parameters in router (sort, order) to avoid useless caching. Add cache for runtime fields values;
* update README.md;
* chore: order creature list by name by default.
  • Loading branch information
RakuJa authored Nov 16, 2023
1 parent f25e9ba commit 34188ec
Show file tree
Hide file tree
Showing 19 changed files with 350 additions and 479 deletions.
5 changes: 2 additions & 3 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,12 @@ validator = {version="0.16.1", features = ["derive"]}
utoipa = { version = "4", features = ["actix_extras"] }
utoipa-swagger-ui = { version = "4", features = ["actix-web"] }

redis = {version = "0.23.0-beta.1", features = ["json"]}
cached = "0.46.0"
sqlx = { version = "0.7.2", features = ["runtime-async-std", "sqlite"] }
mini-moka = "0.10.2"

anyhow = "1.0.75"
serde = { version = "1", features = ["derive"] }
serde_json = "1"
regex = "1.10.2"
strum = {version="0.25", features = ["derive"]}
rand = "0.8.5"
counter = "0.5.7"
Expand Down
29 changes: 23 additions & 6 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,26 +1,43 @@
# Stage 1: Build the Rust project
FROM rust:latest as builder
FROM rust:1.73-alpine as builder

# Set the working directory in the container
WORKDIR /app

# Copy the project files into the container
COPY . .

# Install all the required libraries
# GCC
RUN apk add build-base

RUN apk add musl-dev
RUN cargo install cross

# cross needs docker to work
RUN apk add --update docker openrc
RUN rc-update add docker boot

# Static binary magic
#RUN rustup target add aarch64-unknown-linux-musl
#RUN rustup toolchain install stable-aarch64-unknown-linux-musl

# Build the project with optimizations
RUN cargo build --release
RUN cargo build --target x86_64-unknown-linux-musl --release

# Stage 2: Create a minimal runtime image
FROM debian:bookworm-slim
FROM alpine:latest

# Adding sqlite, cannot do it before
RUN apk add sqlite

# Set the working directory in the container
WORKDIR /app

# Copy the built binary from the previous stage
COPY --from=builder /app/target/release/bybe .
COPY --from=builder /app/target/x86_64-unknown-linux-musl/release/bybe .

# Expose the port that your Actix-Web application will listen on
EXPOSE 25566

# Command to run your application when the container starts
CMD ["./bybe"]
ENTRYPOINT ["./bybe"]
29 changes: 12 additions & 17 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
![Rust](https://img.shields.io/badge/Rust-664666?style=for-the-badge&logo=rust&logoColor=red)
![Actix-web](https://img.shields.io/badge/actix-web?style=for-the-badge&logoColor=black&labelColor=pink&color=black
)
![Redis](https://img.shields.io/badge/redis-%23DD0031.svg?style=for-the-badge&logo=redis&logoColor=white)
![SQLite](https://img.shields.io/badge/sqlite-%2307405e.svg?style=for-the-badge&logo=sqlite&logoColor=white)
[![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](https://opensource.org/licenses/MIT)


Expand All @@ -23,13 +23,13 @@
Built using:

- [Rust](https://www.rust-lang.org/tools/install)
- [Redis](https://redis.io/download/)
- [SQLite](https://www.sqlite.org/download.html)

## Installation guide - Local

1. Install [Rust](https://www.rust-lang.org/tools/install) on your machine.
2. Install [Redis](https://redis.io/download/) and populate the database.
3Clone this repository:
2. Populate the SQLite database.
3. Clone this repository:

```
git clone https://github.com/RakuJa/BYBE
Expand All @@ -41,14 +41,14 @@ git clone https://github.com/RakuJa/BYBE
```
cargo build
```

5. Run the backend in development mode:
6. Set DATABASE_URL variable to SQLite db path
7. Run the backend in development mode:

```
cargo run
```

6. To instead deploy the production build, run:
8. To instead deploy the production build, run:

```
cargo build --release
Expand All @@ -60,23 +60,18 @@ cargo run

## Installation guide using Docker

1) Install Docker on your local machine
2) Download redis on your local machine:
```
docker pull redis
```
3) Clone the repository or download the ZIP
1. Install Docker on your local machine
2. Clone the repository or download the ZIP
```
git clone https://github.com/RakuJa/BYBE
```
4) Go to the local BYBE project folder
3. Go to the local BYBE project folder

5) Build docker image of bybe using
4. Build docker image of bybe using
```
docker build -t bybe .
```

6) Run the image
5. Run the image
```
docker run -p 25566:25566 --name bybe-container bybe
```
Expand Down
106 changes: 21 additions & 85 deletions src/db/db_cache.rs
Original file line number Diff line number Diff line change
@@ -1,36 +1,7 @@
use crate::models::creature::Creature;
use cached::proc_macro::cached;
use crate::AppState;

#[derive(Default, Hash, Eq, PartialEq, Clone)]
pub struct SortedVectorsByField {
pub unordered_creatures: Vec<Creature>,

pub order_by_id_ascending: Vec<Creature>,
pub order_by_id_descending: Vec<Creature>,

pub order_by_name_ascending: Vec<Creature>,
pub order_by_name_descending: Vec<Creature>,

pub order_by_hp_ascending: Vec<Creature>,
pub order_by_hp_descending: Vec<Creature>,

pub order_by_level_ascending: Vec<Creature>,
pub order_by_level_descending: Vec<Creature>,

pub order_by_family_ascending: Vec<Creature>,
pub order_by_family_descending: Vec<Creature>,

pub order_by_alignment_ascending: Vec<Creature>,
pub order_by_alignment_descending: Vec<Creature>,

pub order_by_size_ascending: Vec<Creature>,
pub order_by_size_descending: Vec<Creature>,

pub order_by_rarity_ascending: Vec<Creature>,
pub order_by_rarity_descending: Vec<Creature>,
}

#[derive(Default, Hash, Eq, PartialEq, Clone)]
#[derive(Default, Eq, PartialEq, Clone)]
pub struct RuntimeFieldsValues {
pub list_of_ids: Vec<String>,
pub list_of_levels: Vec<String>,
Expand All @@ -42,10 +13,24 @@ pub struct RuntimeFieldsValues {
pub list_of_creature_types: Vec<String>,
}

#[cached(time = 604800, sync_writes = true)]
pub fn from_db_data_to_filter_cache(data: Vec<Creature>) -> RuntimeFieldsValues {
pub fn from_db_data_to_filter_cache(
app_state: &AppState,
data: Vec<Creature>,
) -> RuntimeFieldsValues {
let mut fields_values_cache = RuntimeFieldsValues::default();
// The right structure would be an hashset, but it does not implement hash..
let cache = &app_state.runtime_fields_cache.clone();
if let Some(runtime_fields) = cache.get(&0) {
return RuntimeFieldsValues {
list_of_ids: runtime_fields.list_of_ids.clone(),
list_of_levels: runtime_fields.list_of_levels.clone(),
list_of_families: runtime_fields.list_of_families.clone(),
list_of_traits: runtime_fields.list_of_traits.clone(),
list_of_alignments: runtime_fields.list_of_alignments.clone(),
list_of_sizes: runtime_fields.list_of_sizes.clone(),
list_of_rarities: runtime_fields.list_of_rarities.clone(),
list_of_creature_types: runtime_fields.list_of_creature_types.clone(),
};
}
for curr_creature in data {
let id = curr_creature.id.to_string();
let lvl = curr_creature.level.to_string();
Expand Down Expand Up @@ -95,56 +80,7 @@ pub fn from_db_data_to_filter_cache(data: Vec<Creature>) -> RuntimeFieldsValues
.push(creature_type);
}
}
fields_values_cache
}

#[cached(time = 604800, sync_writes = true)]
pub fn from_db_data_to_sorted_vectors(unordered_creatures: Vec<Creature>) -> SortedVectorsByField {
let mut sorted_cache = SortedVectorsByField::default();

let mut sort_stage = unordered_creatures.clone();

sorted_cache.unordered_creatures = unordered_creatures.clone();

sort_stage.sort_by_key(|cr| cr.id);
sorted_cache.order_by_id_ascending = sort_stage.clone();
sort_stage.reverse();
sorted_cache.order_by_id_descending = sort_stage.clone();

sort_stage.sort_by_key(|cr| cr.name.clone());
sorted_cache.order_by_name_ascending = sort_stage.clone();
sort_stage.reverse();
sorted_cache.order_by_name_descending = sort_stage.clone();

sort_stage.sort_by_key(|cr| cr.hp);
sorted_cache.order_by_hp_ascending = sort_stage.clone();
sort_stage.reverse();
sorted_cache.order_by_hp_descending = sort_stage.clone();
cache.insert(0, fields_values_cache.clone());

sort_stage.sort_by_key(|cr| cr.level);
sorted_cache.order_by_level_ascending = sort_stage.clone();
sort_stage.reverse();
sorted_cache.order_by_level_descending = sort_stage.clone();

sort_stage.sort_by_key(|cr| cr.family.clone());
sorted_cache.order_by_family_ascending = sort_stage.clone();
sort_stage.reverse();
sorted_cache.order_by_family_descending = sort_stage.clone();

sort_stage.sort_by_key(|cr| cr.alignment.clone());
sorted_cache.order_by_alignment_ascending = sort_stage.clone();
sort_stage.reverse();
sorted_cache.order_by_alignment_descending = sort_stage.clone();

sort_stage.sort_by_key(|cr| cr.size.clone());
sorted_cache.order_by_size_ascending = sort_stage.clone();
sort_stage.reverse();
sorted_cache.order_by_size_descending = sort_stage.clone();

sort_stage.sort_by_key(|cr| cr.rarity.clone());
sorted_cache.order_by_rarity_ascending = sort_stage.clone();
sort_stage.reverse();
sorted_cache.order_by_rarity_descending = sort_stage.clone();

sorted_cache
fields_values_cache
}
Loading

0 comments on commit 34188ec

Please sign in to comment.