This commit is contained in:
Soma Nakamura 2025-07-05 11:50:50 +09:00
commit bde8398d46
30 changed files with 3269 additions and 0 deletions

21
.env.example Normal file
View file

@ -0,0 +1,21 @@
# Database
DATABASE_URL=sqlite://hyperdashi.db
# For production: DATABASE_URL=postgres://user:password@localhost/hyperdashi
# Server
SERVER_HOST=127.0.0.1
SERVER_PORT=8080
# Storage
STORAGE_TYPE=local
LOCAL_STORAGE_PATH=./uploads
# For S3 storage (production)
# STORAGE_TYPE=s3
# AWS_ACCESS_KEY_ID=your_access_key
# AWS_SECRET_ACCESS_KEY=your_secret_key
# AWS_REGION=ap-northeast-1
# S3_BUCKET_NAME=hyperdashi-images
# Logging
RUST_LOG=hyperdashi_server=debug,tower_http=debug,sqlx=warn

29
.envrc Normal file
View file

@ -0,0 +1,29 @@
# Automatically load the nix environment
use flake
# Load environment variables from .env file if it exists
dotenv_if_exists
# Set development environment variables
export DATABASE_URL="sqlite://hyperdashi.db"
export RUST_LOG="debug"
export RUST_BACKTRACE="1"
# Server configuration
export SERVER_HOST="127.0.0.1"
export SERVER_PORT="8081"
# Storage configuration
export STORAGE_TYPE="local"
export STORAGE_MAX_FILE_SIZE_MB="10"
export LOCAL_STORAGE_PATH="./uploads"
# Rust development settings
export CARGO_TARGET_DIR="target"
export RUSTFLAGS="--deny warnings"
# Create necessary directories
mkdir -p uploads
echo "Development environment loaded!"
echo "Run 'nix run .#dev' to start the development server"

81
.gitignore vendored Normal file
View file

@ -0,0 +1,81 @@
# Rust
/target/
**/*.rs.bk
Cargo.lock
# Database files
*.db
*.db-shm
*.db-wal
*.sqlite
*.sqlite3
# Environment files
.env
.env.local
.env.production
.env.*.local
# Logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# Runtime data
pids
*.pid
*.seed
*.pid.lock
# Uploads and temporary files
/uploads/
tmp/
temp/
# OS generated files
.DS_Store
.DS_Store?
._*
.Spotlight-V100
.Trashes
ehthumbs.db
Thumbs.db
# IDE files
.vscode/
.idea/
*.swp
*.swo
*~
# Nix
result
result-*
.direnv/
# Node modules (for frontend)
node_modules/
npm-debug.log
yarn-error.log
.pnpm-debug.log
# Build outputs
dist/
build/
*.tgz
*.tar.gz
# Coverage reports
coverage/
*.lcov
tarpaulin-report.html
# Backup files
*_backup_*
*.bak
*.backup
# Test files
test.db
*test*.db

61
CLAUDE.md Normal file
View file

@ -0,0 +1,61 @@
これは、情報メディアシステム局の物品管理システム「dashi」の発展版・汎用版であるところの「hyperdashi」のバックエンドである。
dashiはhttps://dashi.sohosai.comから使うことができる。
dashiではグラフデータベースと検索エンジンを利用していたが、運用の煩雑さと見合わないことから、RDBに置き換えることを目標としている。
以下にhyperdashiの要件を述べる。
物品情報の登録
機材の物品情報を登録する。
物品名
ラベルID
これがQRコード/バーコードによって物品に貼り付けられ、このラベルIDを参照することとなる
Alphanumeric使わない文字の制限あり、IとOは1と0に見間違えるので使わない
型番
備考
長さや古さ、傷があるなどを書く
購入年度
購入金額
耐久年数
減価償却対象かどうか
高い物品を買ったときに考える。過去に買ったものに関しては減価償却対象ではない
接続名(端子の名前を書く)
可変長配列らしい
ケーブル識別色のパターン
ケーブルに貼ってある色を端子側から順番に追加する。
白に注意!白は良く見えない
収納場所
複数選べる。なぜなら、部屋AのラックXのコンテナαに収納されているような場合に「部屋A」「ラックX」「コンテナα」と3つ書ければ検索性が良いと言えるから。
貸出中か否か
QRかバーコードかラベル未貼付か
登録日時
これは自動で割当たる
更新日時
これも自動で割当たる
廃棄/譲渡済み
削除とは別。削除はミスった時用で、廃棄時はこのフラグを立てる
画像
URLの予定。登録時はファイルが直接上がってくるので、それをオブジェクトストレージにアップロードした結果のURLか、ローカルにフォールバックされた結果のバックエンドのURLが入ることになる。
部分的な更新も可能、このAPIも必要
Excel風のUIから登録・編集を行う予定
貸出管理のテーブル
貸出物品のID
誰に貸出中か
学籍番号と名前
どこに貸出中か
String、団体名などが入る予定
貸出日時
返却日時
備考
貸出し情報も登録・更新返却時するためのAPIが必要
画像はS3系のオブジェクトストレージにアップロードすることを予定しているが、envの設定によってローカルにフォールバックできると嬉しい。
RDBも然り。プロダクションではPostgreSQLの予定だが、ローカルのsqliteにフォールバックできると嬉しい開発時に
サーバはRustで記述し、AxumとSQLxで実装する。
クライアントとの間ではREST APIを使ってやり取りを行う。

67
Cargo.toml Normal file
View file

@ -0,0 +1,67 @@
[package]
name = "hyperdashi-server"
version = "0.1.0"
edition = "2021"
[dependencies]
# Web framework
axum = { version = "0.7", features = ["multipart"] }
tower = "0.4"
tower-http = { version = "0.5", features = ["cors", "trace", "fs"] }
# Async runtime
tokio = { version = "1", features = ["full"] }
# Database
sqlx = { version = "0.7", features = ["runtime-tokio-rustls", "postgres", "sqlite", "json", "chrono", "rust_decimal", "migrate"] }
# Serialization
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
# Configuration
config = "0.14"
# Error handling
thiserror = "1.0"
anyhow = "1.0"
# Logging
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
# Date/Time
chrono = { version = "0.4", features = ["serde"] }
# Decimal for money
rust_decimal = { version = "1.35", features = ["serde-float"] }
# HTTP client (for S3)
reqwest = { version = "0.12", features = ["json", "multipart", "rustls-tls"], default-features = false }
# AWS SDK (for S3)
aws-config = "1.1"
aws-sdk-s3 = "1.14"
# File uploads
multer = "3.0"
# Validation
validator = { version = "0.18", features = ["derive"] }
# Regular expressions
regex = "1.10"
lazy_static = "1.4"
once_cell = "1.19"
# Environment variables
dotenvy = "0.15"
# UUID for unique identifiers
uuid = { version = "1.7", features = ["v4", "serde"] }
# Async trait
async-trait = "0.1"
[dev-dependencies]
tokio-test = "0.4"

345
DESIGN.md Normal file
View file

@ -0,0 +1,345 @@
# HyperDashi バックエンド詳細設計書
## 1. システム概要
HyperDashiは、情報メディアシステム局の物品管理システム「dashi」の発展版・汎用版である。グラフデータベースと検索エンジンを使用していた従来システムから、運用の簡素化を目的としてRDBベースのシステムに移行する。
### 1.1 技術スタック
- **言語**: Rust
- **Webフレームワーク**: Axum
- **データベース**: PostgreSQL (本番環境) / SQLite (開発環境)
- **ORM**: SQLx
- **ストレージ**: S3互換オブジェクトストレージ (本番環境) / ローカルファイルシステム (開発環境)
- **API形式**: REST API
## 2. データベース設計
### 2.1 物品情報テーブル (items)
```sql
CREATE TABLE items (
id SERIAL PRIMARY KEY,
name VARCHAR(255) NOT NULL,
label_id VARCHAR(50) UNIQUE NOT NULL,
model_number VARCHAR(255),
remarks TEXT,
purchase_year INTEGER,
purchase_amount DECIMAL(12, 2),
useful_life INTEGER,
is_depreciable BOOLEAN DEFAULT FALSE,
connection_names TEXT[], -- PostgreSQL配列型、SQLiteではJSON
cable_color_pattern TEXT[], -- PostgreSQL配列型、SQLiteではJSON
storage_locations TEXT[], -- PostgreSQL配列型、SQLiteではJSON
is_on_loan BOOLEAN DEFAULT FALSE,
label_type VARCHAR(20) CHECK (label_type IN ('qr', 'barcode', 'none')),
is_disposed BOOLEAN DEFAULT FALSE,
image_url TEXT,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
);
-- インデックス
CREATE INDEX idx_items_label_id ON items(label_id);
CREATE INDEX idx_items_name ON items(name);
CREATE INDEX idx_items_is_on_loan ON items(is_on_loan);
CREATE INDEX idx_items_is_disposed ON items(is_disposed);
```
### 2.2 貸出管理テーブル (loans)
```sql
CREATE TABLE loans (
id SERIAL PRIMARY KEY,
item_id INTEGER NOT NULL REFERENCES items(id),
student_number VARCHAR(20) NOT NULL,
student_name VARCHAR(100) NOT NULL,
organization VARCHAR(255),
loan_date TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
return_date TIMESTAMP WITH TIME ZONE,
remarks TEXT,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
);
-- インデックス
CREATE INDEX idx_loans_item_id ON loans(item_id);
CREATE INDEX idx_loans_student_number ON loans(student_number);
CREATE INDEX idx_loans_return_date ON loans(return_date);
```
### 2.3 画像管理テーブル (images) - オプション
```sql
CREATE TABLE images (
id SERIAL PRIMARY KEY,
file_name VARCHAR(255) NOT NULL,
content_type VARCHAR(100) NOT NULL,
storage_type VARCHAR(20) CHECK (storage_type IN ('s3', 'local')),
storage_path TEXT NOT NULL,
size_bytes BIGINT,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
);
```
## 3. API設計
### 3.1 物品管理API
#### 3.1.1 物品一覧取得
- **エンドポイント**: `GET /api/v1/items`
- **クエリパラメータ**:
- `page` (optional): ページ番号
- `per_page` (optional): 1ページあたりの件数
- `search` (optional): 検索キーワード
- `is_on_loan` (optional): 貸出中フィルタ
- `is_disposed` (optional): 廃棄済みフィルタ
- **レスポンス**:
```json
{
"items": [
{
"id": 1,
"name": "HDMIケーブル 3m",
"label_id": "HYP-A001",
"model_number": "HDMI-3M-V2",
"remarks": "端子部分に少し傷あり",
"purchase_year": 2023,
"purchase_amount": 1500.00,
"useful_life": 5,
"is_depreciable": false,
"connection_names": ["HDMI Type-A", "HDMI Type-A"],
"cable_color_pattern": ["赤", "青", "赤"],
"storage_locations": ["部屋A", "ラックX", "コンテナα"],
"is_on_loan": false,
"label_type": "qr",
"is_disposed": false,
"image_url": "https://storage.example.com/images/hdmi-cable.jpg",
"created_at": "2023-04-01T10:00:00Z",
"updated_at": "2023-04-01T10:00:00Z"
}
],
"total": 150,
"page": 1,
"per_page": 20
}
```
#### 3.1.2 物品詳細取得
- **エンドポイント**: `GET /api/v1/items/{id}`
- **レスポンス**: 単一の物品オブジェクト
#### 3.1.3 物品登録
- **エンドポイント**: `POST /api/v1/items`
- **リクエストボディ**:
```json
{
"name": "HDMIケーブル 3m",
"label_id": "HYP-A001",
"model_number": "HDMI-3M-V2",
"remarks": "端子部分に少し傷あり",
"purchase_year": 2023,
"purchase_amount": 1500.00,
"useful_life": 5,
"is_depreciable": false,
"connection_names": ["HDMI Type-A", "HDMI Type-A"],
"cable_color_pattern": ["赤", "青", "赤"],
"storage_locations": ["部屋A", "ラックX", "コンテナα"],
"label_type": "qr"
}
```
#### 3.1.4 物品更新
- **エンドポイント**: `PUT /api/v1/items/{id}`
- **リクエストボディ**: 物品登録と同じ(全項目更新)
#### 3.1.5 物品部分更新
- **エンドポイント**: `PATCH /api/v1/items/{id}`
- **リクエストボディ**: 更新したいフィールドのみ
#### 3.1.6 物品削除
- **エンドポイント**: `DELETE /api/v1/items/{id}`
- **説明**: 論理削除ではなく物理削除(ミス入力の修正用)
#### 3.1.7 物品廃棄/譲渡
- **エンドポイント**: `POST /api/v1/items/{id}/dispose`
- **説明**: is_disposedフラグを立てる
#### 3.1.8 ラベルIDによる物品検索
- **エンドポイント**: `GET /api/v1/items/by-label/{label_id}`
- **説明**: QRコード/バーコード読み取り時の検索用
### 3.2 貸出管理API
#### 3.2.1 貸出登録
- **エンドポイント**: `POST /api/v1/loans`
- **リクエストボディ**:
```json
{
"item_id": 1,
"student_number": "21001234",
"student_name": "山田太郎",
"organization": "第74回総合祭実行委員会",
"remarks": "イベント用機材"
}
```
#### 3.2.2 返却処理
- **エンドポイント**: `POST /api/v1/loans/{id}/return`
- **リクエストボディ**:
```json
{
"return_date": "2023-04-10T15:00:00Z",
"remarks": "問題なく返却"
}
```
#### 3.2.3 貸出履歴取得
- **エンドポイント**: `GET /api/v1/loans`
- **クエリパラメータ**:
- `item_id` (optional): 物品ID
- `student_number` (optional): 学籍番号
- `active_only` (optional): 未返却のみ
#### 3.2.4 貸出詳細取得
- **エンドポイント**: `GET /api/v1/loans/{id}`
### 3.3 画像アップロードAPI
#### 3.3.1 画像アップロード
- **エンドポイント**: `POST /api/v1/images/upload`
- **Content-Type**: `multipart/form-data`
- **レスポンス**:
```json
{
"url": "https://storage.example.com/images/abc123.jpg"
}
```
### 3.4 一括操作API
#### 3.4.1 物品一括登録
- **エンドポイント**: `POST /api/v1/items/bulk`
- **リクエストボディ**: 物品オブジェクトの配列
#### 3.4.2 物品一括更新
- **エンドポイント**: `PUT /api/v1/items/bulk`
- **リクエストボディ**: 更新対象の物品オブジェクトの配列
## 4. アプリケーション構造
### 4.1 ディレクトリ構造
```
hyperdashi-server/
├── src/
│ ├── main.rs # エントリーポイント
│ ├── config.rs # 設定管理
│ ├── db/
│ │ ├── mod.rs # データベース接続管理
│ │ └── migrations/ # SQLマイグレーション
│ ├── models/
│ │ ├── mod.rs
│ │ ├── item.rs # 物品モデル
│ │ └── loan.rs # 貸出モデル
│ ├── handlers/
│ │ ├── mod.rs
│ │ ├── items.rs # 物品ハンドラー
│ │ ├── loans.rs # 貸出ハンドラー
│ │ └── images.rs # 画像ハンドラー
│ ├── services/
│ │ ├── mod.rs
│ │ ├── item_service.rs # 物品ビジネスロジック
│ │ ├── loan_service.rs # 貸出ビジネスロジック
│ │ └── storage.rs # ストレージ抽象化
│ ├── utils/
│ │ ├── mod.rs
│ │ ├── validation.rs # バリデーション
│ │ └── label.rs # ラベルID生成
│ └── error.rs # エラー型定義
├── Cargo.toml
├── .env.example
└── README.md
```
### 4.2 主要コンポーネント
#### 4.2.1 設定管理 (config.rs)
```rust
#[derive(Debug, Deserialize)]
pub struct Config {
pub database_url: String,
pub server_host: String,
pub server_port: u16,
pub storage_type: StorageType,
pub s3_config: Option<S3Config>,
pub local_storage_path: Option<String>,
}
#[derive(Debug, Deserialize)]
pub enum StorageType {
S3,
Local,
}
```
#### 4.2.2 ストレージ抽象化 (storage.rs)
```rust
#[async_trait]
pub trait Storage: Send + Sync {
async fn upload(&self, data: Vec<u8>, filename: &str) -> Result<String>;
async fn delete(&self, url: &str) -> Result<()>;
}
pub struct S3Storage { /* ... */ }
pub struct LocalStorage { /* ... */ }
```
## 5. セキュリティ考慮事項
### 5.1 認証・認可
- 初期版では認証なし(内部システムのため)
- 将来的にはJWT等による認証を実装予定
### 5.2 入力検証
- ラベルIDの形式検証英数字、I/O除外
- SQLインジェクション対策SQLx使用
- ファイルアップロードのサイズ制限とMIMEタイプ検証
### 5.3 データ保護
- 個人情報(学籍番号、氏名)の適切な管理
- HTTPSによる通信の暗号化
## 6. パフォーマンス最適化
### 6.1 データベース
- 適切なインデックスの設定
- N+1問題の回避
- コネクションプーリング
### 6.2 画像処理
- 画像のリサイズとサムネイル生成
- CDN利用による配信最適化将来
### 6.3 キャッシング
- 頻繁にアクセスされる物品情報のキャッシング(将来)
## 7. 運用・保守
### 7.1 ロギング
- 構造化ログの出力
- エラートラッキング
### 7.2 モニタリング
- ヘルスチェックエンドポイント
- メトリクス収集(将来)
### 7.3 バックアップ
- データベースの定期バックアップ
- 画像データのバックアップ
## 8. 今後の拡張予定
- 認証・認可機能
- 物品の予約機能
- 統計・レポート機能
- モバイルアプリ対応API
- WebSocket による リアルタイム更新

82
flake.lock generated Normal file
View file

@ -0,0 +1,82 @@
{
"nodes": {
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1751271578,
"narHash": "sha256-P/SQmKDu06x8yv7i0s8bvnnuJYkxVGBWLWHaU+tt4YY=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "3016b4b15d13f3089db8a41ef937b13a9e33a8df",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs",
"rust-overlay": "rust-overlay"
}
},
"rust-overlay": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1751596734,
"narHash": "sha256-1tQOwmn3jEUQjH0WDJyklC+hR7Bj+iqx6ChtRX2QiPA=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "e28ba067a9368286a8bc88b68dc2ca92181a09f0",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

277
flake.nix Normal file
View file

@ -0,0 +1,277 @@
{
description = "HyperDashi Backend Server Development Environment";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
flake-utils.url = "github:numtide/flake-utils";
rust-overlay = {
url = "github:oxalica/rust-overlay";
inputs.nixpkgs.follows = "nixpkgs";
};
};
outputs = { self, nixpkgs, flake-utils, rust-overlay }:
flake-utils.lib.eachDefaultSystem (system:
let
overlays = [ (import rust-overlay) ];
pkgs = import nixpkgs {
inherit system overlays;
};
# Rust toolchain specification
rustToolchain = pkgs.rust-bin.stable.latest.default.override {
extensions = [ "rust-src" "rustfmt" "clippy" "rust-analyzer" ];
};
# Node.js for frontend development
nodejs = pkgs.nodejs_20;
# Database and development tools
developmentTools = with pkgs; [
# Database tools
sqlite
sqlx-cli
# Development utilities
git
curl
jq
# Text editors and IDE support
vim
nano
# Build tools
pkg-config
openssl
# Optional: GUI tools if available
dbeaver-bin
];
# Runtime dependencies
runtimeDeps = with pkgs; [
# SSL/TLS support
openssl
# SQLite runtime
sqlite
# Network tools for testing
netcat-gnu
];
# Development shell packages
shellPackages = [
rustToolchain
nodejs
pkgs.yarn
pkgs.pnpm
] ++ developmentTools ++ runtimeDeps;
in
{
# Development shell
devShells.default = pkgs.mkShell {
buildInputs = shellPackages;
# Environment variables
shellHook = ''
echo "🦀 HyperDashi Development Environment"
echo "=================================================="
echo "Rust version: $(rustc --version)"
echo "Node.js version: $(node --version)"
echo "SQLite version: $(sqlite3 --version)"
echo "=================================================="
# Set environment variables
export DATABASE_URL="sqlite://hyperdashi.db"
export RUST_LOG="debug"
export RUST_BACKTRACE=1
# Server configuration
export SERVER_HOST="127.0.0.1"
export SERVER_PORT="8081"
# Storage configuration
export STORAGE_TYPE="local"
export STORAGE_MAX_FILE_SIZE_MB="10"
export LOCAL_STORAGE_PATH="./uploads"
# Create uploads directory if it doesn't exist
mkdir -p uploads
echo "Environment variables set:"
echo " DATABASE_URL: $DATABASE_URL"
echo " SERVER_PORT: $SERVER_PORT"
echo " STORAGE_MAX_FILE_SIZE_MB: $STORAGE_MAX_FILE_SIZE_MB"
echo ""
echo "Available commands:"
echo " cargo build - Build the project"
echo " cargo run - Run the development server"
echo " cargo test - Run tests"
echo " sqlx migrate run - Run database migrations"
echo " nix run .#setup-db - Initial database setup"
echo " nix run .#dev - Start development server"
echo " nix run .#test - Run all tests"
echo ""
'';
# Additional environment variables for development
DATABASE_URL = "sqlite://hyperdashi.db";
RUST_LOG = "debug";
RUST_BACKTRACE = "1";
# PKG_CONFIG_PATH for OpenSSL
PKG_CONFIG_PATH = "${pkgs.openssl.dev}/lib/pkgconfig";
# Library paths
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath [
pkgs.openssl
pkgs.sqlite
];
};
# Package outputs
packages = {
# Backend binary
hyperdashi-server = pkgs.rustPlatform.buildRustPackage {
pname = "hyperdashi-server";
version = "0.1.0";
src = ./.;
cargoLock = {
lockFile = ./Cargo.lock;
};
nativeBuildInputs = with pkgs; [
pkg-config
rustToolchain
];
buildInputs = with pkgs; [
openssl
sqlite
];
# Skip tests during build (can be run separately)
doCheck = false;
meta = with pkgs.lib; {
description = "HyperDashi equipment management system backend";
license = licenses.mit;
maintainers = [ ];
};
};
# Docker image
docker-image = pkgs.dockerTools.buildImage {
name = "hyperdashi-server";
tag = "latest";
contents = [
self.packages.${system}.hyperdashi-server
pkgs.sqlite
pkgs.openssl
];
config = {
Cmd = [ "${self.packages.${system}.hyperdashi-server}/bin/hyperdashi-server" ];
Env = [
"DATABASE_URL=sqlite:///data/hyperdashi.db"
"SERVER_HOST=0.0.0.0"
"SERVER_PORT=8080"
"STORAGE_TYPE=local"
"LOCAL_STORAGE_PATH=/uploads"
];
ExposedPorts = {
"8080/tcp" = {};
};
Volumes = {
"/data" = {};
"/uploads" = {};
};
};
};
default = self.packages.${system}.hyperdashi-server;
};
# Formatter
formatter = pkgs.nixpkgs-fmt;
# Apps for easy running
apps = {
# Run the server
hyperdashi-server = flake-utils.lib.mkApp {
drv = self.packages.${system}.hyperdashi-server;
};
# Development server with auto-reload
dev = flake-utils.lib.mkApp {
drv = pkgs.writeShellScriptBin "hyperdashi-dev" ''
export DATABASE_URL="sqlite://hyperdashi.db"
export RUST_LOG="debug"
echo "Starting HyperDashi development server..."
echo "Server will be available at http://localhost:8081"
# Run migrations first
sqlx migrate run
# Start the server with cargo watch for auto-reload
if command -v cargo-watch >/dev/null 2>&1; then
cargo watch -x run
else
echo "cargo-watch not found, installing..."
cargo install cargo-watch
cargo watch -x run
fi
'';
};
# Database setup
setup-db = flake-utils.lib.mkApp {
drv = pkgs.writeShellScriptBin "setup-db" ''
export DATABASE_URL="sqlite://hyperdashi.db"
echo "Setting up HyperDashi database..."
# Create database file if it doesn't exist
touch hyperdashi.db
# Run migrations
sqlx migrate run
echo "Database setup complete!"
echo "Database file: $(pwd)/hyperdashi.db"
'';
};
# Run tests
test = flake-utils.lib.mkApp {
drv = pkgs.writeShellScriptBin "hyperdashi-test" ''
export DATABASE_URL="sqlite://test.db"
export RUST_LOG="info"
echo "Running HyperDashi tests..."
# Clean up any existing test database
rm -f test.db
# Run tests
cargo test
# Clean up test database
rm -f test.db
echo "Tests completed!"
'';
};
default = self.apps.${system}.hyperdashi-server;
};
}
);
}

View file

@ -0,0 +1,27 @@
-- Create items table (compatible with both PostgreSQL and SQLite)
CREATE TABLE IF NOT EXISTS items (
id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
label_id TEXT UNIQUE NOT NULL,
model_number TEXT,
remarks TEXT,
purchase_year INTEGER,
purchase_amount REAL,
durability_years INTEGER,
is_depreciation_target BOOLEAN DEFAULT FALSE,
connection_names TEXT, -- JSON array for both DBs
cable_color_pattern TEXT, -- JSON array for both DBs
storage_locations TEXT, -- JSON array for both DBs
is_on_loan BOOLEAN DEFAULT FALSE,
qr_code_type TEXT CHECK (qr_code_type IN ('qr', 'barcode', 'none')),
is_disposed BOOLEAN DEFAULT FALSE,
image_url TEXT,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
-- Create indexes
CREATE INDEX IF NOT EXISTS idx_items_label_id ON items(label_id);
CREATE INDEX IF NOT EXISTS idx_items_name ON items(name);
CREATE INDEX IF NOT EXISTS idx_items_is_on_loan ON items(is_on_loan);
CREATE INDEX IF NOT EXISTS idx_items_is_disposed ON items(is_disposed);

View file

@ -0,0 +1,18 @@
-- Create loans table (compatible with both PostgreSQL and SQLite)
CREATE TABLE IF NOT EXISTS loans (
id INTEGER PRIMARY KEY,
item_id INTEGER NOT NULL REFERENCES items(id),
student_number TEXT NOT NULL,
student_name TEXT NOT NULL,
organization TEXT,
loan_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
return_date TIMESTAMP,
remarks TEXT,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
-- Create indexes
CREATE INDEX IF NOT EXISTS idx_loans_item_id ON loans(item_id);
CREATE INDEX IF NOT EXISTS idx_loans_student_number ON loans(student_number);
CREATE INDEX IF NOT EXISTS idx_loans_return_date ON loans(return_date);

View file

@ -0,0 +1,24 @@
-- Create cable colors table
CREATE TABLE cable_colors (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name VARCHAR(100) NOT NULL UNIQUE,
hex_code VARCHAR(7),
description TEXT,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
-- Insert common cable colors
INSERT INTO cable_colors (name, hex_code, description) VALUES
('', '#FF0000', '赤色'),
('', '#0000FF', '青色'),
('', '#00FF00', '緑色'),
('', '#FFFF00', '黄色'),
('', '#000000', '黒色'),
('', '#FFFFFF', '白色'),
('グレー', '#808080', 'グレー色'),
('オレンジ', '#FFA500', 'オレンジ色'),
('', '#800080', '紫色'),
('', '#A52A2A', '茶色'),
('ピンク', '#FFC0CB', 'ピンク色'),
('シルバー', '#C0C0C0', 'シルバー色');

152
src/config.rs Normal file
View file

@ -0,0 +1,152 @@
use config::{Config as ConfigBuilder, ConfigError, Environment, File};
use serde::Deserialize;
use std::env;
#[derive(Debug, Deserialize, Clone)]
pub struct Config {
pub database: DatabaseConfig,
pub server: ServerConfig,
pub storage: StorageConfig,
}
#[derive(Debug, Deserialize, Clone)]
pub struct DatabaseConfig {
pub url: String,
}
#[derive(Debug, Deserialize, Clone)]
pub struct ServerConfig {
pub host: String,
pub port: u16,
}
#[derive(Debug, Deserialize, Clone)]
pub struct StorageConfig {
#[serde(rename = "type")]
pub storage_type: StorageType,
pub local: Option<LocalStorageConfig>,
pub s3: Option<S3Config>,
#[serde(default = "default_max_file_size")]
pub max_file_size_mb: u64,
}
fn default_max_file_size() -> u64 {
5 // Default 5MB
}
#[derive(Debug, Deserialize, Clone)]
#[serde(rename_all = "lowercase")]
pub enum StorageType {
Local,
S3,
}
#[derive(Debug, Deserialize, Clone)]
pub struct LocalStorageConfig {
pub path: String,
}
#[derive(Debug, Deserialize, Clone)]
pub struct S3Config {
pub bucket_name: String,
pub region: String,
pub access_key_id: Option<String>,
pub secret_access_key: Option<String>,
}
impl Config {
pub fn new() -> Result<Self, ConfigError> {
let run_mode = env::var("RUN_MODE").unwrap_or_else(|_| "development".into());
let s = ConfigBuilder::builder()
// Start off by merging in the "default" configuration file
.add_source(File::with_name("config/default").required(false))
// Add in the current environment file
// Default to 'development' env
.add_source(File::with_name(&format!("config/{}", run_mode)).required(false))
// Add in a local configuration file
// This file shouldn't be checked in to git
.add_source(File::with_name("config/local").required(false))
// Add in settings from the environment (with a prefix of HYPERDASHI)
// Eg.. `HYPERDASHI_DEBUG=1 ./target/app` would set the `debug` key
.add_source(Environment::with_prefix("HYPERDASHI").separator("_"))
// You can override settings from env variables
.add_source(
Environment::default()
.try_parsing(true)
.separator("_")
.list_separator(" ")
)
.build()?;
// You can deserialize (and thus freeze) the entire configuration as
s.try_deserialize()
}
pub fn from_env() -> Result<Self, ConfigError> {
// Load .env file if it exists
dotenvy::dotenv().ok();
let database_url = env::var("DATABASE_URL")
.unwrap_or_else(|_| "sqlite://hyperdashi.db".to_string());
let server_host = env::var("SERVER_HOST")
.unwrap_or_else(|_| "127.0.0.1".to_string());
let server_port = env::var("SERVER_PORT")
.unwrap_or_else(|_| "8080".to_string())
.parse::<u16>()
.unwrap_or(8080);
let storage_type = env::var("STORAGE_TYPE")
.unwrap_or_else(|_| "local".to_string());
let max_file_size_mb = env::var("STORAGE_MAX_FILE_SIZE_MB")
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(5);
let storage = match storage_type.to_lowercase().as_str() {
"s3" => {
let bucket_name = env::var("S3_BUCKET_NAME")
.map_err(|_| ConfigError::Message("S3_BUCKET_NAME not set".to_string()))?;
let region = env::var("AWS_REGION")
.map_err(|_| ConfigError::Message("AWS_REGION not set".to_string()))?;
let access_key_id = env::var("AWS_ACCESS_KEY_ID").ok();
let secret_access_key = env::var("AWS_SECRET_ACCESS_KEY").ok();
StorageConfig {
storage_type: StorageType::S3,
local: None,
s3: Some(S3Config {
bucket_name,
region,
access_key_id,
secret_access_key,
}),
max_file_size_mb,
}
}
_ => {
let path = env::var("LOCAL_STORAGE_PATH")
.unwrap_or_else(|_| "./uploads".to_string());
StorageConfig {
storage_type: StorageType::Local,
local: Some(LocalStorageConfig { path }),
s3: None,
max_file_size_mb,
}
}
};
Ok(Config {
database: DatabaseConfig { url: database_url },
server: ServerConfig {
host: server_host,
port: server_port,
},
storage,
})
}
}

85
src/db/mod.rs Normal file
View file

@ -0,0 +1,85 @@
use sqlx::postgres::{PgPool, PgPoolOptions};
use sqlx::sqlite::{SqliteConnectOptions, SqlitePool, SqlitePoolOptions};
use std::str::FromStr;
use crate::config::Config;
use crate::error::AppResult;
#[derive(Clone)]
pub enum DatabasePool {
Postgres(PgPool),
Sqlite(SqlitePool),
}
impl DatabasePool {
pub async fn new(config: &Config) -> AppResult<Self> {
let database_url = &config.database.url;
if database_url.starts_with("postgres://") || database_url.starts_with("postgresql://") {
let pool = PgPoolOptions::new()
.max_connections(10)
.connect(database_url)
.await?;
Ok(DatabasePool::Postgres(pool))
} else if database_url.starts_with("sqlite://") {
let options = SqliteConnectOptions::from_str(database_url)?
.create_if_missing(true);
let pool = SqlitePoolOptions::new()
.max_connections(10)
.connect_with(options)
.await?;
Ok(DatabasePool::Sqlite(pool))
} else {
Err(crate::error::AppError::ConfigError(
config::ConfigError::Message(
"Invalid database URL. Must start with postgres:// or sqlite://".to_string()
)
))
}
}
pub async fn migrate(&self) -> AppResult<()> {
match self {
DatabasePool::Postgres(pool) => {
sqlx::migrate!("./migrations")
.run(pool)
.await
.map_err(|e| crate::error::AppError::DatabaseError(sqlx::Error::Migrate(Box::new(e))))?;
}
DatabasePool::Sqlite(pool) => {
sqlx::migrate!("./migrations")
.run(pool)
.await
.map_err(|e| crate::error::AppError::DatabaseError(sqlx::Error::Migrate(Box::new(e))))?;
}
}
Ok(())
}
pub fn postgres(&self) -> Option<&PgPool> {
match self {
DatabasePool::Postgres(pool) => Some(pool),
_ => None,
}
}
pub fn sqlite(&self) -> Option<&SqlitePool> {
match self {
DatabasePool::Sqlite(pool) => Some(pool),
_ => None,
}
}
}
#[macro_export]
macro_rules! query_as {
($query:expr, $pool:expr) => {
match $pool {
$crate::db::DatabasePool::Postgres(pool) => sqlx::query_as!($query).fetch_all(pool).await,
$crate::db::DatabasePool::Sqlite(pool) => sqlx::query_as!($query).fetch_all(pool).await,
}
};
}

101
src/error.rs Normal file
View file

@ -0,0 +1,101 @@
use axum::{
http::StatusCode,
response::{IntoResponse, Response},
Json,
};
use serde_json::json;
use std::fmt;
#[derive(Debug)]
pub enum AppError {
NotFound(String),
BadRequest(String),
InternalServerError(String),
DatabaseError(sqlx::Error),
ConfigError(config::ConfigError),
IoError(std::io::Error),
ValidationError(String),
StorageError(String),
}
impl fmt::Display for AppError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
AppError::NotFound(msg) => write!(f, "Not found: {}", msg),
AppError::BadRequest(msg) => write!(f, "Bad request: {}", msg),
AppError::InternalServerError(msg) => write!(f, "Internal server error: {}", msg),
AppError::DatabaseError(err) => write!(f, "Database error: {}", err),
AppError::ConfigError(err) => write!(f, "Configuration error: {}", err),
AppError::IoError(err) => write!(f, "IO error: {}", err),
AppError::ValidationError(msg) => write!(f, "Validation error: {}", msg),
AppError::StorageError(msg) => write!(f, "Storage error: {}", msg),
}
}
}
impl std::error::Error for AppError {}
impl IntoResponse for AppError {
fn into_response(self) -> Response {
let (status, error_message) = match self {
AppError::NotFound(msg) => (StatusCode::NOT_FOUND, msg),
AppError::BadRequest(msg) => (StatusCode::BAD_REQUEST, msg),
AppError::ValidationError(msg) => (StatusCode::BAD_REQUEST, msg),
AppError::InternalServerError(msg) => (StatusCode::INTERNAL_SERVER_ERROR, msg),
AppError::DatabaseError(ref err) => {
tracing::error!("Database error: {:?}", err);
(
StatusCode::INTERNAL_SERVER_ERROR,
"Database error occurred".to_string(),
)
}
AppError::ConfigError(ref err) => {
tracing::error!("Config error: {:?}", err);
(
StatusCode::INTERNAL_SERVER_ERROR,
"Configuration error occurred".to_string(),
)
}
AppError::IoError(ref err) => {
tracing::error!("IO error: {:?}", err);
(
StatusCode::INTERNAL_SERVER_ERROR,
"IO error occurred".to_string(),
)
}
AppError::StorageError(ref msg) => {
tracing::error!("Storage error: {}", msg);
(
StatusCode::INTERNAL_SERVER_ERROR,
"Storage error occurred".to_string(),
)
}
};
let body = Json(json!({
"error": error_message,
}));
(status, body).into_response()
}
}
impl From<sqlx::Error> for AppError {
fn from(err: sqlx::Error) -> Self {
AppError::DatabaseError(err)
}
}
impl From<config::ConfigError> for AppError {
fn from(err: config::ConfigError) -> Self {
AppError::ConfigError(err)
}
}
impl From<std::io::Error> for AppError {
fn from(err: std::io::Error) -> Self {
AppError::IoError(err)
}
}
pub type AppResult<T> = Result<T, AppError>;

View file

@ -0,0 +1,78 @@
use axum::{
extract::{Path, Query, State},
http::StatusCode,
Json,
};
use serde::Deserialize;
use std::sync::Arc;
use validator::Validate;
use crate::error::AppResult;
use crate::models::{CableColor, CableColorsListResponse, CreateCableColorRequest, UpdateCableColorRequest};
use crate::services::{CableColorService, ItemService, LoanService, StorageService};
#[derive(Deserialize)]
pub struct CableColorsQuery {
#[serde(default = "default_page")]
pub page: u32,
#[serde(default = "default_per_page")]
pub per_page: u32,
}
fn default_page() -> u32 {
1
}
fn default_per_page() -> u32 {
20
}
pub async fn list_cable_colors(
State((cable_color_service, _item_service, _loan_service, _storage_service)): State<(Arc<CableColorService>, Arc<ItemService>, Arc<LoanService>, Arc<StorageService>)>,
Query(params): Query<CableColorsQuery>,
) -> AppResult<Json<CableColorsListResponse>> {
let response = cable_color_service
.list_cable_colors(params.page, params.per_page)
.await?;
Ok(Json(response))
}
pub async fn get_cable_color(
State((cable_color_service, _item_service, _loan_service, _storage_service)): State<(Arc<CableColorService>, Arc<ItemService>, Arc<LoanService>, Arc<StorageService>)>,
Path(id): Path<i64>,
) -> AppResult<Json<CableColor>> {
let cable_color = cable_color_service.get_cable_color(id).await?;
Ok(Json(cable_color))
}
pub async fn create_cable_color(
State((cable_color_service, _item_service, _loan_service, _storage_service)): State<(Arc<CableColorService>, Arc<ItemService>, Arc<LoanService>, Arc<StorageService>)>,
Json(req): Json<CreateCableColorRequest>,
) -> AppResult<(StatusCode, Json<CableColor>)> {
req.validate()
.map_err(|e| crate::error::AppError::ValidationError(e.to_string()))?;
let cable_color = cable_color_service.create_cable_color(req).await?;
Ok((StatusCode::CREATED, Json(cable_color)))
}
pub async fn update_cable_color(
State((cable_color_service, _item_service, _loan_service, _storage_service)): State<(Arc<CableColorService>, Arc<ItemService>, Arc<LoanService>, Arc<StorageService>)>,
Path(id): Path<i64>,
Json(req): Json<UpdateCableColorRequest>,
) -> AppResult<Json<CableColor>> {
req.validate()
.map_err(|e| crate::error::AppError::ValidationError(e.to_string()))?;
let cable_color = cable_color_service.update_cable_color(id, req).await?;
Ok(Json(cable_color))
}
pub async fn delete_cable_color(
State((cable_color_service, _item_service, _loan_service, _storage_service)): State<(Arc<CableColorService>, Arc<ItemService>, Arc<LoanService>, Arc<StorageService>)>,
Path(id): Path<i64>,
) -> AppResult<StatusCode> {
cable_color_service.delete_cable_color(id).await?;
Ok(StatusCode::NO_CONTENT)
}

87
src/handlers/images.rs Normal file
View file

@ -0,0 +1,87 @@
use axum::{
extract::{Multipart, State},
http::StatusCode,
Json,
};
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use crate::error::AppResult;
use crate::services::{ItemService, LoanService, StorageService, CableColorService};
#[derive(Debug, Serialize, Deserialize)]
pub struct ImageUploadResponse {
pub url: String,
pub filename: String,
pub size: usize,
}
pub async fn upload_image(
State((_cable_color_service, _item_service, _loan_service, storage_service)): State<(Arc<CableColorService>, Arc<ItemService>, Arc<LoanService>, Arc<StorageService>)>,
mut multipart: Multipart,
) -> AppResult<(StatusCode, Json<ImageUploadResponse>)> {
while let Some(field) = multipart.next_field().await.map_err(|e| {
crate::error::AppError::BadRequest(format!("Failed to read multipart field: {}", e))
})? {
let name = field.name().unwrap_or("").to_string();
if name == "image" {
let filename = field.file_name()
.ok_or_else(|| crate::error::AppError::BadRequest("No filename provided".to_string()))?
.to_string();
let content_type = field.content_type()
.unwrap_or("application/octet-stream")
.to_string();
// 画像ファイルの検証
if !is_image_content_type(&content_type) {
return Err(crate::error::AppError::BadRequest(
"Only image files are allowed (JPEG, PNG, GIF, WebP)".to_string()
));
}
let data = field.bytes().await.map_err(|e| {
crate::error::AppError::BadRequest(format!("Failed to read file data: {}", e))
})?;
// ファイルサイズ制限 (5MB)
const MAX_FILE_SIZE: usize = 5 * 1024 * 1024;
if data.len() > MAX_FILE_SIZE {
return Err(crate::error::AppError::BadRequest(
"File size exceeds 5MB limit".to_string()
));
}
// ユニークなファイル名を生成
let unique_filename = generate_unique_filename(&filename);
// ストレージにアップロード
let url = storage_service.upload(data.to_vec(), &unique_filename, &content_type).await?;
return Ok((StatusCode::CREATED, Json(ImageUploadResponse {
url,
filename: unique_filename,
size: data.len(),
})));
}
}
Err(crate::error::AppError::BadRequest("No image field found in multipart data".to_string()))
}
fn is_image_content_type(content_type: &str) -> bool {
matches!(content_type,
"image/jpeg" | "image/jpg" | "image/png" | "image/gif" | "image/webp"
)
}
fn generate_unique_filename(original_filename: &str) -> String {
let timestamp = chrono::Utc::now().timestamp_millis();
let extension = std::path::Path::new(original_filename)
.extension()
.and_then(|ext| ext.to_str())
.unwrap_or("jpg");
format!("{}_{}.{}", timestamp, uuid::Uuid::new_v4(), extension)
}

130
src/handlers/items.rs Normal file
View file

@ -0,0 +1,130 @@
use axum::{
extract::{Path, Query, State},
http::StatusCode,
Json,
};
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use validator::Validate;
use crate::error::AppResult;
use crate::models::{CreateItemRequest, Item, ItemsListResponse, UpdateItemRequest};
use crate::services::{ItemService, LoanService, StorageService, CableColorService};
#[derive(Deserialize)]
pub struct ItemsQuery {
#[serde(default = "default_page")]
pub page: u32,
#[serde(default = "default_per_page")]
pub per_page: u32,
pub search: Option<String>,
pub is_on_loan: Option<bool>,
pub is_disposed: Option<bool>,
}
fn default_page() -> u32 {
1
}
fn default_per_page() -> u32 {
20
}
pub async fn list_items(
State((_cable_color_service, item_service, _loan_service, _storage_service)): State<(Arc<CableColorService>, Arc<ItemService>, Arc<LoanService>, Arc<StorageService>)>,
Query(params): Query<ItemsQuery>,
) -> AppResult<Json<ItemsListResponse>> {
let response = item_service
.list_items(
params.page,
params.per_page,
params.search,
params.is_on_loan,
params.is_disposed,
)
.await?;
Ok(Json(response))
}
pub async fn get_item(
State((_cable_color_service, item_service, _loan_service, _storage_service)): State<(Arc<CableColorService>, Arc<ItemService>, Arc<LoanService>, Arc<StorageService>)>,
Path(id): Path<i64>,
) -> AppResult<Json<Item>> {
let item = item_service.get_item(id).await?;
Ok(Json(item))
}
pub async fn get_item_by_label(
State((_cable_color_service, item_service, _loan_service, _storage_service)): State<(Arc<CableColorService>, Arc<ItemService>, Arc<LoanService>, Arc<StorageService>)>,
Path(label_id): Path<String>,
) -> AppResult<Json<Item>> {
let item = item_service.get_item_by_label(&label_id).await?;
Ok(Json(item))
}
pub async fn create_item(
State((_cable_color_service, item_service, _loan_service, _storage_service)): State<(Arc<CableColorService>, Arc<ItemService>, Arc<LoanService>, Arc<StorageService>)>,
Json(req): Json<CreateItemRequest>,
) -> AppResult<(StatusCode, Json<Item>)> {
req.validate()
.map_err(|e| crate::error::AppError::ValidationError(e.to_string()))?;
let item = item_service.create_item(req).await?;
Ok((StatusCode::CREATED, Json(item)))
}
pub async fn update_item(
State((_cable_color_service, item_service, _loan_service, _storage_service)): State<(Arc<CableColorService>, Arc<ItemService>, Arc<LoanService>, Arc<StorageService>)>,
Path(id): Path<i64>,
Json(req): Json<UpdateItemRequest>,
) -> AppResult<Json<Item>> {
req.validate()
.map_err(|e| crate::error::AppError::ValidationError(e.to_string()))?;
let item = item_service.update_item(id, req).await?;
Ok(Json(item))
}
pub async fn delete_item(
State((_cable_color_service, item_service, _loan_service, _storage_service)): State<(Arc<CableColorService>, Arc<ItemService>, Arc<LoanService>, Arc<StorageService>)>,
Path(id): Path<i64>,
) -> AppResult<StatusCode> {
item_service.delete_item(id).await?;
Ok(StatusCode::NO_CONTENT)
}
pub async fn dispose_item(
State((_cable_color_service, item_service, _loan_service, _storage_service)): State<(Arc<CableColorService>, Arc<ItemService>, Arc<LoanService>, Arc<StorageService>)>,
Path(id): Path<i64>,
) -> AppResult<Json<Item>> {
let item = item_service.dispose_item(id).await?;
Ok(Json(item))
}
pub async fn undispose_item(
State((_cable_color_service, item_service, _loan_service, _storage_service)): State<(Arc<CableColorService>, Arc<ItemService>, Arc<LoanService>, Arc<StorageService>)>,
Path(id): Path<i64>,
) -> AppResult<Json<Item>> {
let item = item_service.undispose_item(id).await?;
Ok(Json(item))
}
#[derive(Serialize)]
pub struct SuggestionsResponse {
pub suggestions: Vec<String>,
}
pub async fn get_connection_names_suggestions(
State((_cable_color_service, item_service, _loan_service, _storage_service)): State<(Arc<CableColorService>, Arc<ItemService>, Arc<LoanService>, Arc<StorageService>)>,
) -> AppResult<Json<SuggestionsResponse>> {
let suggestions = item_service.get_connection_names_suggestions().await?;
Ok(Json(SuggestionsResponse { suggestions }))
}
pub async fn get_storage_locations_suggestions(
State((_cable_color_service, item_service, _loan_service, _storage_service)): State<(Arc<CableColorService>, Arc<ItemService>, Arc<LoanService>, Arc<StorageService>)>,
) -> AppResult<Json<SuggestionsResponse>> {
let suggestions = item_service.get_storage_locations_suggestions().await?;
Ok(Json(SuggestionsResponse { suggestions }))
}

79
src/handlers/loans.rs Normal file
View file

@ -0,0 +1,79 @@
use axum::{
extract::{Path, Query, State},
http::StatusCode,
Json,
};
use serde::Deserialize;
use std::sync::Arc;
use validator::Validate;
use crate::error::AppResult;
use crate::models::{CreateLoanRequest, Loan, LoansListResponse, ReturnLoanRequest};
use crate::services::{ItemService, LoanService, StorageService, CableColorService};
#[derive(Deserialize)]
pub struct LoansQuery {
#[serde(default = "default_page")]
pub page: u32,
#[serde(default = "default_per_page")]
pub per_page: u32,
pub item_id: Option<i64>,
pub student_number: Option<String>,
pub active_only: Option<bool>,
}
fn default_page() -> u32 {
1
}
fn default_per_page() -> u32 {
20
}
pub async fn list_loans(
State((_cable_color_service, _item_service, loan_service, _storage_service)): State<(Arc<CableColorService>, Arc<ItemService>, Arc<LoanService>, Arc<StorageService>)>,
Query(params): Query<LoansQuery>,
) -> AppResult<Json<LoansListResponse>> {
let response = loan_service
.list_loans(
params.page,
params.per_page,
params.item_id,
params.student_number,
params.active_only,
)
.await?;
Ok(Json(response))
}
pub async fn get_loan(
State((_cable_color_service, _item_service, loan_service, _storage_service)): State<(Arc<CableColorService>, Arc<ItemService>, Arc<LoanService>, Arc<StorageService>)>,
Path(id): Path<i64>,
) -> AppResult<Json<Loan>> {
let loan = loan_service.get_loan(id).await?;
Ok(Json(loan))
}
pub async fn create_loan(
State((_cable_color_service, _item_service, loan_service, _storage_service)): State<(Arc<CableColorService>, Arc<ItemService>, Arc<LoanService>, Arc<StorageService>)>,
Json(req): Json<CreateLoanRequest>,
) -> AppResult<(StatusCode, Json<Loan>)> {
req.validate()
.map_err(|e| crate::error::AppError::ValidationError(e.to_string()))?;
let loan = loan_service.create_loan(req).await?;
Ok((StatusCode::CREATED, Json(loan)))
}
pub async fn return_loan(
State((_cable_color_service, _item_service, loan_service, _storage_service)): State<(Arc<CableColorService>, Arc<ItemService>, Arc<LoanService>, Arc<StorageService>)>,
Path(id): Path<i64>,
Json(req): Json<ReturnLoanRequest>,
) -> AppResult<Json<Loan>> {
req.validate()
.map_err(|e| crate::error::AppError::ValidationError(e.to_string()))?;
let loan = loan_service.return_loan(id, req).await?;
Ok(Json(loan))
}

15
src/handlers/mod.rs Normal file
View file

@ -0,0 +1,15 @@
pub mod items;
pub mod loans;
pub mod images;
pub mod cable_colors;
pub use items::*;
pub use loans::*;
pub use images::*;
pub use cable_colors::*;
use std::sync::Arc;
use crate::config::Config;
use crate::services::{ItemService, LoanService, StorageService, CableColorService};
pub type AppState = (Arc<CableColorService>, Arc<ItemService>, Arc<LoanService>, Arc<StorageService>, Arc<Config>);

111
src/main.rs Normal file
View file

@ -0,0 +1,111 @@
use axum::{
routing::{delete, get, post, put},
Router,
};
use std::net::SocketAddr;
use std::sync::Arc;
use tower_http::cors::CorsLayer;
use tower_http::services::ServeDir;
use tower_http::trace::TraceLayer;
use tracing::info;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
mod config;
mod db;
mod error;
mod handlers;
mod models;
mod services;
use crate::config::{Config, StorageType};
use crate::db::DatabasePool;
use crate::services::{ItemService, LoanService, StorageService, CableColorService};
#[tokio::main]
async fn main() -> anyhow::Result<()> {
// Initialize tracing
tracing_subscriber::registry()
.with(
tracing_subscriber::EnvFilter::try_from_default_env()
.unwrap_or_else(|_| "hyperdashi_server=debug,tower_http=debug".into()),
)
.with(tracing_subscriber::fmt::layer())
.init();
info!("Starting HyperDashi server...");
// Load configuration
let config = Config::from_env()?;
info!("Configuration loaded: {:?}", config);
// Initialize database connection
let db_pool = DatabasePool::new(&config).await?;
info!("Database connection established");
// Run migrations
db_pool.migrate().await?;
info!("Database migrations completed");
// Initialize storage
let storage = Arc::new(StorageService::new(&config).await?);
info!("Storage initialized");
// Initialize services
let cable_color_service = Arc::new(CableColorService::new(db_pool.clone()));
let item_service = Arc::new(ItemService::new(db_pool.clone()));
let loan_service = Arc::new(LoanService::new(db_pool.clone()));
// Build application routes
let mut app = Router::new()
.route("/", get(root))
.route("/api/v1/health", get(health_check))
// Item routes
.route("/api/v1/items", get(handlers::list_items).post(handlers::create_item))
.route("/api/v1/items/:id", get(handlers::get_item).put(handlers::update_item).delete(handlers::delete_item))
.route("/api/v1/items/:id/dispose", post(handlers::dispose_item))
.route("/api/v1/items/:id/undispose", post(handlers::undispose_item))
.route("/api/v1/items/by-label/:label_id", get(handlers::get_item_by_label))
.route("/api/v1/items/suggestions/connection_names", get(handlers::get_connection_names_suggestions))
.route("/api/v1/items/suggestions/storage_locations", get(handlers::get_storage_locations_suggestions))
// Cable color routes
.route("/api/v1/cable_colors", get(handlers::list_cable_colors).post(handlers::create_cable_color))
.route("/api/v1/cable_colors/:id", get(handlers::get_cable_color).put(handlers::update_cable_color).delete(handlers::delete_cable_color))
// Loan routes
.route("/api/v1/loans", get(handlers::list_loans).post(handlers::create_loan))
.route("/api/v1/loans/:id", get(handlers::get_loan))
.route("/api/v1/loans/:id/return", post(handlers::return_loan))
// Image routes
.route("/api/v1/images/upload", post(handlers::upload_image))
// Add state - combine services
.with_state((cable_color_service, item_service, loan_service, storage))
.layer(CorsLayer::permissive())
.layer(TraceLayer::new_for_http());
// Add static file serving for local storage
if matches!(config.storage.storage_type, StorageType::Local) {
if let Some(local_config) = &config.storage.local {
info!("Enabling static file serving for uploads at {}", local_config.path);
app = app.nest_service("/uploads", ServeDir::new(&local_config.path));
}
}
// Start server
let addr = SocketAddr::from((
config.server.host.parse::<std::net::IpAddr>()?,
config.server.port,
));
info!("Server listening on {}", addr);
let listener = tokio::net::TcpListener::bind(addr).await?;
axum::serve(listener, app).await?;
Ok(())
}
async fn root() -> &'static str {
"HyperDashi Server"
}
async fn health_check() -> &'static str {
"OK"
}

46
src/models/cable_color.rs Normal file
View file

@ -0,0 +1,46 @@
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use validator::Validate;
#[derive(Debug, Serialize, Deserialize)]
pub struct CableColor {
pub id: i64,
pub name: String,
pub hex_code: Option<String>,
pub description: Option<String>,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}
#[derive(Debug, Deserialize, Validate)]
pub struct CreateCableColorRequest {
#[validate(length(min = 1, max = 100))]
pub name: String,
#[validate(regex(path = "*crate::models::cable_color::HEX_COLOR_REGEX"))]
pub hex_code: Option<String>,
pub description: Option<String>,
}
#[derive(Debug, Deserialize, Validate)]
pub struct UpdateCableColorRequest {
#[validate(length(min = 1, max = 100))]
pub name: Option<String>,
#[validate(regex(path = "*crate::models::cable_color::HEX_COLOR_REGEX"))]
pub hex_code: Option<String>,
pub description: Option<String>,
}
#[derive(Debug, Serialize)]
pub struct CableColorsListResponse {
pub cable_colors: Vec<CableColor>,
pub total: i64,
pub page: u32,
pub per_page: u32,
}
use lazy_static::lazy_static;
use regex::Regex;
lazy_static! {
pub static ref HEX_COLOR_REGEX: Regex = Regex::new(r"^#[0-9A-Fa-f]{6}$").unwrap();
}

107
src/models/item.rs Normal file
View file

@ -0,0 +1,107 @@
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use validator::Validate;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Item {
pub id: i64,
pub name: String,
pub label_id: String,
pub model_number: Option<String>,
pub remarks: Option<String>,
pub purchase_year: Option<i64>,
pub purchase_amount: Option<f64>,
pub durability_years: Option<i64>,
pub is_depreciation_target: Option<bool>,
pub connection_names: Option<Vec<String>>,
pub cable_color_pattern: Option<Vec<String>>,
pub storage_locations: Option<Vec<String>>,
pub is_on_loan: Option<bool>,
pub qr_code_type: Option<String>,
pub is_disposed: Option<bool>,
pub image_url: Option<String>,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}
#[derive(Debug, Clone, Serialize, Deserialize, Validate)]
pub struct CreateItemRequest {
#[validate(length(min = 1, max = 255))]
pub name: String,
#[validate(length(min = 1, max = 50))]
pub label_id: String,
#[validate(length(max = 255))]
pub model_number: Option<String>,
pub remarks: Option<String>,
#[validate(range(min = 1900, max = 2100))]
pub purchase_year: Option<i64>,
pub purchase_amount: Option<f64>,
#[validate(range(min = 1, max = 100))]
pub durability_years: Option<i64>,
pub is_depreciation_target: Option<bool>,
pub connection_names: Option<Vec<String>>,
pub cable_color_pattern: Option<Vec<String>>,
pub storage_locations: Option<Vec<String>>,
pub qr_code_type: Option<String>,
#[validate(url)]
pub image_url: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize, Validate)]
pub struct UpdateItemRequest {
#[validate(length(min = 1, max = 255))]
pub name: Option<String>,
#[validate(length(min = 1, max = 50))]
pub label_id: Option<String>,
#[validate(length(max = 255))]
pub model_number: Option<String>,
pub remarks: Option<String>,
#[validate(range(min = 1900, max = 2100))]
pub purchase_year: Option<i64>,
pub purchase_amount: Option<f64>,
#[validate(range(min = 1, max = 100))]
pub durability_years: Option<i64>,
pub is_depreciation_target: Option<bool>,
pub connection_names: Option<Vec<String>>,
pub cable_color_pattern: Option<Vec<String>>,
pub storage_locations: Option<Vec<String>>,
pub is_on_loan: Option<bool>,
pub qr_code_type: Option<String>,
pub is_disposed: Option<bool>,
#[validate(url)]
pub image_url: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ItemsListResponse {
pub items: Vec<Item>,
pub total: i64,
pub page: u32,
pub per_page: u32,
}

72
src/models/loan.rs Normal file
View file

@ -0,0 +1,72 @@
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use validator::Validate;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Loan {
pub id: i64,
pub item_id: i64,
pub student_number: String,
pub student_name: String,
pub organization: Option<String>,
pub loan_date: DateTime<Utc>,
pub return_date: Option<DateTime<Utc>>,
pub remarks: Option<String>,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}
#[derive(Debug, Clone, Serialize, Deserialize, Validate)]
pub struct CreateLoanRequest {
pub item_id: i64,
#[validate(length(min = 1, max = 20))]
pub student_number: String,
#[validate(length(min = 1, max = 100))]
pub student_name: String,
#[validate(length(max = 255))]
pub organization: Option<String>,
pub remarks: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize, Validate)]
pub struct ReturnLoanRequest {
pub return_date: Option<DateTime<Utc>>,
pub remarks: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LoanWithItem {
pub id: i64,
pub item_id: i64,
pub item_name: String,
pub item_label_id: String,
pub student_number: String,
pub student_name: String,
pub organization: Option<String>,
pub loan_date: DateTime<Utc>,
pub return_date: Option<DateTime<Utc>>,
pub remarks: Option<String>,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LoansListResponse {
pub loans: Vec<LoanWithItem>,
pub total: i64,
pub page: u32,
pub per_page: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LoanFilters {
pub item_id: Option<i64>,
pub student_number: Option<String>,
pub active_only: Option<bool>,
pub page: Option<u32>,
pub per_page: Option<u32>,
}

7
src/models/mod.rs Normal file
View file

@ -0,0 +1,7 @@
pub mod item;
pub mod loan;
pub mod cable_color;
pub use item::*;
pub use loan::*;
pub use cable_color::*;

View file

@ -0,0 +1,170 @@
use crate::db::DatabasePool;
use crate::error::{AppError, AppResult};
use crate::models::{CableColor, CableColorsListResponse, CreateCableColorRequest, UpdateCableColorRequest};
use sqlx::Row;
pub struct CableColorService {
db: DatabasePool,
}
impl CableColorService {
pub fn new(db: DatabasePool) -> Self {
Self { db }
}
pub async fn create_cable_color(&self, req: CreateCableColorRequest) -> AppResult<CableColor> {
match &self.db {
DatabasePool::Postgres(_pool) => {
Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
}
DatabasePool::Sqlite(pool) => {
let result = sqlx::query(
r#"
INSERT INTO cable_colors (name, hex_code, description)
VALUES (?1, ?2, ?3)
"#,
)
.bind(&req.name)
.bind(&req.hex_code)
.bind(&req.description)
.execute(pool)
.await?;
let id = result.last_insert_rowid();
self.get_cable_color(id).await
}
}
}
pub async fn get_cable_color(&self, id: i64) -> AppResult<CableColor> {
match &self.db {
DatabasePool::Postgres(_pool) => {
Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
}
DatabasePool::Sqlite(pool) => {
let row = sqlx::query(
r#"
SELECT id, name, hex_code, description, created_at, updated_at
FROM cable_colors
WHERE id = ?1
"#,
)
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| AppError::NotFound(format!("Cable color with id {} not found", id)))?;
Ok(self.row_to_cable_color(row))
}
}
}
pub async fn list_cable_colors(
&self,
page: u32,
per_page: u32,
) -> AppResult<CableColorsListResponse> {
let offset = ((page - 1) * per_page) as i64;
let limit = per_page as i64;
match &self.db {
DatabasePool::Postgres(_pool) => {
Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
}
DatabasePool::Sqlite(pool) => {
let rows = sqlx::query(
r#"
SELECT id, name, hex_code, description, created_at, updated_at
FROM cable_colors
ORDER BY created_at DESC
LIMIT ?1 OFFSET ?2
"#,
)
.bind(limit)
.bind(offset)
.fetch_all(pool)
.await?;
let cable_colors: Vec<CableColor> = rows.into_iter()
.map(|row| self.row_to_cable_color(row))
.collect();
let count_row = sqlx::query("SELECT COUNT(*) as count FROM cable_colors")
.fetch_one(pool)
.await?;
let total: i64 = count_row.get("count");
Ok(CableColorsListResponse {
cable_colors,
total,
page,
per_page,
})
}
}
}
pub async fn update_cable_color(&self, id: i64, req: UpdateCableColorRequest) -> AppResult<CableColor> {
match &self.db {
DatabasePool::Postgres(_pool) => {
Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
}
DatabasePool::Sqlite(pool) => {
// まず色が存在するかチェック
let _existing_color = self.get_cable_color(id).await?;
let now = chrono::Utc::now();
sqlx::query(
r#"
UPDATE cable_colors SET
name = COALESCE(?2, name),
hex_code = COALESCE(?3, hex_code),
description = COALESCE(?4, description),
updated_at = ?5
WHERE id = ?1
"#,
)
.bind(id)
.bind(&req.name)
.bind(&req.hex_code)
.bind(&req.description)
.bind(now)
.execute(pool)
.await?;
self.get_cable_color(id).await
}
}
}
pub async fn delete_cable_color(&self, id: i64) -> AppResult<()> {
match &self.db {
DatabasePool::Postgres(_pool) => {
Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
}
DatabasePool::Sqlite(pool) => {
let result = sqlx::query("DELETE FROM cable_colors WHERE id = ?1")
.bind(id)
.execute(pool)
.await?;
if result.rows_affected() == 0 {
return Err(AppError::NotFound(format!("Cable color with id {} not found", id)));
}
Ok(())
}
}
}
fn row_to_cable_color(&self, row: sqlx::sqlite::SqliteRow) -> CableColor {
CableColor {
id: row.get("id"),
name: row.get("name"),
hex_code: row.get("hex_code"),
description: row.get("description"),
created_at: row.get("created_at"),
updated_at: row.get("updated_at"),
}
}
}

View file

@ -0,0 +1,496 @@
use crate::db::DatabasePool;
use crate::error::{AppError, AppResult};
use crate::models::{CreateItemRequest, Item, ItemsListResponse, UpdateItemRequest};
use chrono::Utc;
use sqlx::Row;
pub struct ItemService {
db: DatabasePool,
}
impl ItemService {
pub fn new(db: DatabasePool) -> Self {
Self { db }
}
pub async fn create_item(&self, req: CreateItemRequest) -> AppResult<Item> {
match &self.db {
DatabasePool::Postgres(_pool) => {
Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
}
DatabasePool::Sqlite(pool) => {
let connection_names = req.connection_names
.map(|v| serde_json::to_string(&v).unwrap_or_default());
let cable_color_pattern = req.cable_color_pattern
.map(|v| serde_json::to_string(&v).unwrap_or_default());
let storage_locations = req.storage_locations
.map(|v| serde_json::to_string(&v).unwrap_or_default());
let is_depreciation_target = req.is_depreciation_target.unwrap_or(false);
let result = sqlx::query!(
r#"
INSERT INTO items (
name, label_id, model_number, remarks, purchase_year,
purchase_amount, durability_years, is_depreciation_target, connection_names,
cable_color_pattern, storage_locations, qr_code_type, image_url
) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)
"#,
req.name,
req.label_id,
req.model_number,
req.remarks,
req.purchase_year,
req.purchase_amount,
req.durability_years,
is_depreciation_target,
connection_names,
cable_color_pattern,
storage_locations,
req.qr_code_type,
req.image_url
)
.execute(pool)
.await?;
let id = result.last_insert_rowid();
self.get_item(id).await
}
}
}
pub async fn get_item(&self, id: i64) -> AppResult<Item> {
match &self.db {
DatabasePool::Postgres(_pool) => {
Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
}
DatabasePool::Sqlite(pool) => {
let row = sqlx::query(
r#"
SELECT
id, name, label_id, model_number, remarks, purchase_year,
purchase_amount, durability_years, is_depreciation_target,
connection_names, cable_color_pattern, storage_locations,
is_on_loan, qr_code_type, is_disposed, image_url,
created_at, updated_at
FROM items
WHERE id = ?1
"#,
)
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| AppError::NotFound(format!("Item with id {} not found", id)))?;
Ok(self.row_to_item(row))
}
}
}
pub async fn get_item_by_label(&self, label_id: &str) -> AppResult<Item> {
match &self.db {
DatabasePool::Postgres(_pool) => {
Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
}
DatabasePool::Sqlite(pool) => {
let row = sqlx::query(
r#"
SELECT
id, name, label_id, model_number, remarks, purchase_year,
purchase_amount, durability_years, is_depreciation_target,
connection_names, cable_color_pattern, storage_locations,
is_on_loan, qr_code_type, is_disposed, image_url,
created_at, updated_at
FROM items
WHERE label_id = ?1
"#,
)
.bind(label_id)
.fetch_optional(pool)
.await?
.ok_or_else(|| AppError::NotFound(format!("Item with label_id {} not found", label_id)))?;
Ok(self.row_to_item(row))
}
}
}
pub async fn list_items(
&self,
page: u32,
per_page: u32,
search: Option<String>,
is_on_loan: Option<bool>,
is_disposed: Option<bool>,
) -> AppResult<ItemsListResponse> {
let offset = ((page - 1) * per_page) as i64;
let limit = per_page as i64;
match &self.db {
DatabasePool::Postgres(_pool) => {
Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
}
DatabasePool::Sqlite(pool) => {
// 動的WHEREクエリを構築簡単な方法
let mut where_conditions = Vec::new();
// 検索条件
if search.is_some() {
where_conditions.push("(name LIKE ? OR label_id LIKE ? OR model_number LIKE ? OR remarks LIKE ?)".to_string());
}
// 貸出状態フィルター
if is_on_loan.is_some() {
where_conditions.push("is_on_loan = ?".to_string());
}
// 廃棄状態フィルター
if is_disposed.is_some() {
where_conditions.push("is_disposed = ?".to_string());
}
let where_clause = if where_conditions.is_empty() {
String::new()
} else {
format!("WHERE {}", where_conditions.join(" AND "))
};
// シンプルなアプローチで実装(フィルター条件ごとに分岐)
let (items, total) = if search.is_none() && is_on_loan.is_none() && is_disposed.is_none() {
// フィルターなし
let rows = sqlx::query(
r#"
SELECT
id, name, label_id, model_number, remarks, purchase_year,
purchase_amount, durability_years, is_depreciation_target,
connection_names, cable_color_pattern, storage_locations,
is_on_loan, qr_code_type, is_disposed, image_url,
created_at, updated_at
FROM items
ORDER BY created_at DESC
LIMIT ?1 OFFSET ?2
"#,
)
.bind(limit)
.bind(offset)
.fetch_all(pool)
.await?;
let items: Vec<Item> = rows.into_iter()
.map(|row| self.row_to_item(row))
.collect();
let count_row = sqlx::query("SELECT COUNT(*) as count FROM items")
.fetch_one(pool)
.await?;
let total: i64 = count_row.get("count");
(items, total)
} else {
// フィルターあり - 動的クエリを使用
let query_str = format!(
r#"
SELECT
id, name, label_id, model_number, remarks, purchase_year,
purchase_amount, durability_years, is_depreciation_target,
connection_names, cable_color_pattern, storage_locations,
is_on_loan, qr_code_type, is_disposed, image_url,
created_at, updated_at
FROM items
{}
ORDER BY created_at DESC
LIMIT ? OFFSET ?
"#,
where_clause
);
let count_query_str = format!("SELECT COUNT(*) as count FROM items {}", where_clause);
// パラメーターをバインドするためのヘルパー関数
let mut query = sqlx::query(&query_str);
let mut count_query = sqlx::query(&count_query_str);
// 検索条件
if let Some(search_term) = &search {
let search_pattern = format!("%{}%", search_term);
query = query.bind(search_pattern.clone()).bind(search_pattern.clone()).bind(search_pattern.clone()).bind(search_pattern.clone());
count_query = count_query.bind(search_pattern.clone()).bind(search_pattern.clone()).bind(search_pattern.clone()).bind(search_pattern);
}
// 貸出状態フィルター
if let Some(loan_status) = is_on_loan {
let loan_value = if loan_status { 1i32 } else { 0i32 };
query = query.bind(loan_value);
count_query = count_query.bind(loan_value);
}
// 廃棄状態フィルター
if let Some(disposed_status) = is_disposed {
let disposed_value = if disposed_status { 1i32 } else { 0i32 };
query = query.bind(disposed_value);
count_query = count_query.bind(disposed_value);
}
// LIMIT/OFFSETをバインド
query = query.bind(limit).bind(offset);
let rows = query.fetch_all(pool).await?;
let items: Vec<Item> = rows.into_iter()
.map(|row| self.row_to_item(row))
.collect();
let count_row = count_query.fetch_one(pool).await?;
let total: i64 = count_row.get("count");
(items, total)
};
Ok(ItemsListResponse {
items,
total,
page,
per_page,
})
}
}
}
pub async fn update_item(&self, id: i64, req: UpdateItemRequest) -> AppResult<Item> {
match &self.db {
DatabasePool::Postgres(_pool) => {
Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
}
DatabasePool::Sqlite(pool) => {
// まず物品が存在するかチェック
let _existing_item = self.get_item(id).await?;
// JSON配列フィールドをシリアライズ
let connection_names_json = req.connection_names
.as_ref()
.map(|names| serde_json::to_string(names))
.transpose()
.map_err(|e| AppError::InternalServerError(format!("Failed to serialize connection_names: {}", e)))?;
let cable_color_pattern_json = req.cable_color_pattern
.as_ref()
.map(|pattern| serde_json::to_string(pattern))
.transpose()
.map_err(|e| AppError::InternalServerError(format!("Failed to serialize cable_color_pattern: {}", e)))?;
let storage_locations_json = req.storage_locations
.as_ref()
.map(|locations| serde_json::to_string(locations))
.transpose()
.map_err(|e| AppError::InternalServerError(format!("Failed to serialize storage_locations: {}", e)))?;
let now = chrono::Utc::now();
sqlx::query!(
r#"
UPDATE items SET
name = COALESCE(?2, name),
label_id = COALESCE(?3, label_id),
model_number = COALESCE(?4, model_number),
remarks = COALESCE(?5, remarks),
purchase_year = COALESCE(?6, purchase_year),
purchase_amount = COALESCE(?7, purchase_amount),
durability_years = COALESCE(?8, durability_years),
is_depreciation_target = COALESCE(?9, is_depreciation_target),
connection_names = COALESCE(?10, connection_names),
cable_color_pattern = COALESCE(?11, cable_color_pattern),
storage_locations = COALESCE(?12, storage_locations),
qr_code_type = COALESCE(?13, qr_code_type),
image_url = COALESCE(?14, image_url),
updated_at = ?15
WHERE id = ?1
"#,
id,
req.name,
req.label_id,
req.model_number,
req.remarks,
req.purchase_year,
req.purchase_amount,
req.durability_years,
req.is_depreciation_target,
connection_names_json,
cable_color_pattern_json,
storage_locations_json,
req.qr_code_type,
req.image_url,
now
)
.execute(pool)
.await?;
// 更新後の物品を取得して返す
self.get_item(id).await
}
}
}
pub async fn delete_item(&self, id: i64) -> AppResult<()> {
match &self.db {
DatabasePool::Postgres(_pool) => {
Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
}
DatabasePool::Sqlite(pool) => {
// まず物品が存在し、貸出中でないかチェック
let item = self.get_item(id).await?;
if item.is_on_loan.unwrap_or(false) {
return Err(AppError::BadRequest("Cannot delete item that is currently on loan".to_string()));
}
// アクティブな貸出がないかチェック
let active_loans = sqlx::query!(
"SELECT COUNT(*) as count FROM loans WHERE item_id = ?1 AND return_date IS NULL",
id
)
.fetch_one(pool)
.await?;
if active_loans.count > 0 {
return Err(AppError::BadRequest("Cannot delete item with active loans".to_string()));
}
let result = sqlx::query("DELETE FROM items WHERE id = ?1")
.bind(id)
.execute(pool)
.await?;
if result.rows_affected() == 0 {
return Err(AppError::NotFound(format!("Item with id {} not found", id)));
}
Ok(())
}
}
}
pub async fn dispose_item(&self, id: i64) -> AppResult<Item> {
match &self.db {
DatabasePool::Postgres(_pool) => {
Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
}
DatabasePool::Sqlite(pool) => {
let now = Utc::now();
let result = sqlx::query("UPDATE items SET is_disposed = 1, updated_at = ?2 WHERE id = ?1")
.bind(id)
.bind(now)
.execute(pool)
.await?;
if result.rows_affected() == 0 {
return Err(AppError::NotFound(format!("Item with id {} not found", id)));
}
self.get_item(id).await
}
}
}
pub async fn undispose_item(&self, id: i64) -> AppResult<Item> {
match &self.db {
DatabasePool::Postgres(_pool) => {
Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
}
DatabasePool::Sqlite(pool) => {
let now = Utc::now();
let result = sqlx::query("UPDATE items SET is_disposed = 0, updated_at = ?2 WHERE id = ?1")
.bind(id)
.bind(now)
.execute(pool)
.await?;
if result.rows_affected() == 0 {
return Err(AppError::NotFound(format!("Item with id {} not found", id)));
}
self.get_item(id).await
}
}
}
pub async fn get_connection_names_suggestions(&self) -> AppResult<Vec<String>> {
match &self.db {
DatabasePool::Postgres(_pool) => {
Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
}
DatabasePool::Sqlite(pool) => {
let rows = sqlx::query("SELECT DISTINCT connection_names FROM items WHERE connection_names IS NOT NULL AND connection_names != ''")
.fetch_all(pool)
.await?;
let mut suggestions = Vec::new();
for row in rows {
if let Some(json_str) = row.get::<Option<String>, _>("connection_names") {
if let Ok(names) = serde_json::from_str::<Vec<String>>(&json_str) {
suggestions.extend(names);
}
}
}
// 重複を除去してソート
suggestions.sort();
suggestions.dedup();
Ok(suggestions)
}
}
}
pub async fn get_storage_locations_suggestions(&self) -> AppResult<Vec<String>> {
match &self.db {
DatabasePool::Postgres(_pool) => {
Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
}
DatabasePool::Sqlite(pool) => {
let rows = sqlx::query("SELECT DISTINCT storage_locations FROM items WHERE storage_locations IS NOT NULL AND storage_locations != ''")
.fetch_all(pool)
.await?;
let mut suggestions = Vec::new();
for row in rows {
if let Some(json_str) = row.get::<Option<String>, _>("storage_locations") {
if let Ok(locations) = serde_json::from_str::<Vec<String>>(&json_str) {
suggestions.extend(locations);
}
}
}
// 重複を除去してソート
suggestions.sort();
suggestions.dedup();
Ok(suggestions)
}
}
}
fn row_to_item(&self, row: sqlx::sqlite::SqliteRow) -> Item {
let connection_names: Option<Vec<String>> = row.get::<Option<String>, _>("connection_names")
.and_then(|s| serde_json::from_str(&s).ok());
let cable_color_pattern: Option<Vec<String>> = row.get::<Option<String>, _>("cable_color_pattern")
.and_then(|s| serde_json::from_str(&s).ok());
let storage_locations: Option<Vec<String>> = row.get::<Option<String>, _>("storage_locations")
.and_then(|s| serde_json::from_str(&s).ok());
Item {
id: row.get("id"),
name: row.get("name"),
label_id: row.get("label_id"),
model_number: row.get("model_number"),
remarks: row.get("remarks"),
purchase_year: row.get("purchase_year"),
purchase_amount: row.get("purchase_amount"),
durability_years: row.get("durability_years"),
is_depreciation_target: row.get("is_depreciation_target"),
connection_names,
cable_color_pattern,
storage_locations,
is_on_loan: row.get("is_on_loan"),
qr_code_type: row.get("qr_code_type"),
is_disposed: row.get("is_disposed"),
image_url: row.get("image_url"),
created_at: row.get("created_at"),
updated_at: row.get("updated_at"),
}
}
}

View file

@ -0,0 +1,313 @@
use crate::db::DatabasePool;
use crate::error::{AppError, AppResult};
use crate::models::{CreateLoanRequest, Loan, LoanWithItem, LoansListResponse, ReturnLoanRequest};
use chrono::Utc;
use sqlx::Row;
pub struct LoanService {
db: DatabasePool,
}
impl LoanService {
pub fn new(db: DatabasePool) -> Self {
Self { db }
}
pub async fn create_loan(&self, req: CreateLoanRequest) -> AppResult<Loan> {
match &self.db {
DatabasePool::Postgres(_pool) => {
Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
}
DatabasePool::Sqlite(pool) => {
// まず、物品が存在し、貸出可能かチェック
let item_check = sqlx::query!(
"SELECT id, name, is_on_loan, is_disposed FROM items WHERE id = ?1",
req.item_id
)
.fetch_optional(pool)
.await?;
let item = item_check.ok_or_else(||
AppError::NotFound(format!("Item with id {} not found", req.item_id))
)?;
if item.is_on_loan.unwrap_or(false) {
return Err(AppError::BadRequest("Item is already on loan".to_string()));
}
if item.is_disposed.unwrap_or(false) {
return Err(AppError::BadRequest("Item is disposed and cannot be loaned".to_string()));
}
// 貸出記録を作成
let result = sqlx::query!(
r#"
INSERT INTO loans (
item_id, student_number, student_name, organization, remarks
) VALUES (?1, ?2, ?3, ?4, ?5)
"#,
req.item_id,
req.student_number,
req.student_name,
req.organization,
req.remarks
)
.execute(pool)
.await?;
// 物品の貸出状態を更新
let now = Utc::now();
sqlx::query!(
"UPDATE items SET is_on_loan = 1, updated_at = ?2 WHERE id = ?1",
req.item_id,
now
)
.execute(pool)
.await?;
let loan_id = result.last_insert_rowid();
self.get_loan(loan_id).await
}
}
}
pub async fn get_loan(&self, id: i64) -> AppResult<Loan> {
match &self.db {
DatabasePool::Postgres(_pool) => {
Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
}
DatabasePool::Sqlite(pool) => {
let row = sqlx::query(
r#"
SELECT
id, item_id, student_number, student_name, organization,
loan_date, return_date, remarks, created_at, updated_at
FROM loans
WHERE id = ?1
"#,
)
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| AppError::NotFound(format!("Loan with id {} not found", id)))?;
Ok(self.row_to_loan(row))
}
}
}
pub async fn list_loans(
&self,
page: u32,
per_page: u32,
item_id: Option<i64>,
student_number: Option<String>,
active_only: Option<bool>,
) -> AppResult<LoansListResponse> {
let offset = ((page - 1) * per_page) as i64;
let limit = per_page as i64;
match &self.db {
DatabasePool::Postgres(_pool) => {
Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
}
DatabasePool::Sqlite(pool) => {
// フィルタリング機能を実装
let (loans, total) = if item_id.is_none() && student_number.is_none() && active_only.is_none() {
// フィルターなし
let rows = sqlx::query(
r#"
SELECT
l.id, l.item_id, l.student_number, l.student_name, l.organization,
l.loan_date, l.return_date, l.remarks, l.created_at, l.updated_at,
i.name as item_name, i.label_id as item_label_id
FROM loans l
INNER JOIN items i ON l.item_id = i.id
ORDER BY l.created_at DESC
LIMIT ?1 OFFSET ?2
"#,
)
.bind(limit)
.bind(offset)
.fetch_all(pool)
.await?;
let loans: Vec<LoanWithItem> = rows.into_iter()
.map(|row| self.row_to_loan_with_item(row))
.collect();
let count_row = sqlx::query("SELECT COUNT(*) as count FROM loans")
.fetch_one(pool)
.await?;
let total: i64 = count_row.get("count");
(loans, total)
} else {
// フィルターあり - 動的クエリを構築
let mut where_conditions = Vec::new();
// 物品IDフィルター
if item_id.is_some() {
where_conditions.push("l.item_id = ?".to_string());
}
// 学籍番号フィルター
if student_number.is_some() {
where_conditions.push("l.student_number = ?".to_string());
}
// アクティブ貸出のみフィルター
if let Some(true) = active_only {
where_conditions.push("l.return_date IS NULL".to_string());
} else if let Some(false) = active_only {
where_conditions.push("l.return_date IS NOT NULL".to_string());
}
let where_clause = if where_conditions.is_empty() {
String::new()
} else {
format!("WHERE {}", where_conditions.join(" AND "))
};
let query_str = format!(
r#"
SELECT
l.id, l.item_id, l.student_number, l.student_name, l.organization,
l.loan_date, l.return_date, l.remarks, l.created_at, l.updated_at,
i.name as item_name, i.label_id as item_label_id
FROM loans l
INNER JOIN items i ON l.item_id = i.id
{}
ORDER BY l.created_at DESC
LIMIT ? OFFSET ?
"#,
where_clause
);
let count_query_str = format!(
"SELECT COUNT(*) as count FROM loans l INNER JOIN items i ON l.item_id = i.id {}",
where_clause
);
// パラメーターをバインド
let mut query = sqlx::query(&query_str);
let mut count_query = sqlx::query(&count_query_str);
// 物品IDフィルター
if let Some(id) = item_id {
query = query.bind(id);
count_query = count_query.bind(id);
}
// 学籍番号フィルター
if let Some(ref number) = student_number {
query = query.bind(number);
count_query = count_query.bind(number);
}
// LIMIT/OFFSETをバインドactive_onlyは既にWHERE句に含まれている
query = query.bind(limit).bind(offset);
let rows = query.fetch_all(pool).await?;
let loans: Vec<LoanWithItem> = rows.into_iter()
.map(|row| self.row_to_loan_with_item(row))
.collect();
let count_row = count_query.fetch_one(pool).await?;
let total: i64 = count_row.get("count");
(loans, total)
};
Ok(LoansListResponse {
loans,
total,
page,
per_page,
})
}
}
}
pub async fn return_loan(&self, id: i64, req: ReturnLoanRequest) -> AppResult<Loan> {
match &self.db {
DatabasePool::Postgres(_pool) => {
Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
}
DatabasePool::Sqlite(pool) => {
// 貸出記録が存在し、未返却かチェック
let loan_check = sqlx::query!(
"SELECT id, item_id, return_date FROM loans WHERE id = ?1",
id
)
.fetch_optional(pool)
.await?;
let loan = loan_check.ok_or_else(||
AppError::NotFound(format!("Loan with id {} not found", id))
)?;
if loan.return_date.is_some() {
return Err(AppError::BadRequest("Loan has already been returned".to_string()));
}
let return_date = req.return_date.unwrap_or_else(|| Utc::now());
let now = Utc::now();
// 貸出記録を更新
sqlx::query!(
"UPDATE loans SET return_date = ?2, remarks = ?3, updated_at = ?4 WHERE id = ?1",
id,
return_date,
req.remarks,
now
)
.execute(pool)
.await?;
// 物品の貸出状態を更新
sqlx::query!(
"UPDATE items SET is_on_loan = 0, updated_at = ?2 WHERE id = ?1",
loan.item_id,
now
)
.execute(pool)
.await?;
self.get_loan(id).await
}
}
}
fn row_to_loan(&self, row: sqlx::sqlite::SqliteRow) -> Loan {
Loan {
id: row.get("id"),
item_id: row.get("item_id"),
student_number: row.get("student_number"),
student_name: row.get("student_name"),
organization: row.get("organization"),
loan_date: row.get("loan_date"),
return_date: row.get("return_date"),
remarks: row.get("remarks"),
created_at: row.get("created_at"),
updated_at: row.get("updated_at"),
}
}
fn row_to_loan_with_item(&self, row: sqlx::sqlite::SqliteRow) -> LoanWithItem {
LoanWithItem {
id: row.get("id"),
item_id: row.get("item_id"),
item_name: row.get("item_name"),
item_label_id: row.get("item_label_id"),
student_number: row.get("student_number"),
student_name: row.get("student_name"),
organization: row.get("organization"),
loan_date: row.get("loan_date"),
return_date: row.get("return_date"),
remarks: row.get("remarks"),
created_at: row.get("created_at"),
updated_at: row.get("updated_at"),
}
}
}

9
src/services/mod.rs Normal file
View file

@ -0,0 +1,9 @@
pub mod item_service;
pub mod loan_service;
pub mod cable_color_service;
pub mod storage;
pub use item_service::*;
pub use loan_service::*;
pub use cable_color_service::*;
pub use storage::StorageService;

178
src/services/storage.rs Normal file
View file

@ -0,0 +1,178 @@
use aws_sdk_s3::Client as S3Client;
use std::path::{Path, PathBuf};
use tokio::fs;
use uuid::Uuid;
use crate::config::{Config, StorageType};
use crate::error::{AppError, AppResult};
#[derive(Clone)]
pub enum StorageService {
S3(S3Storage),
Local(LocalStorage),
}
impl StorageService {
pub async fn new(config: &Config) -> AppResult<Self> {
match &config.storage.storage_type {
StorageType::S3 => Ok(StorageService::S3(S3Storage::new(config).await?)),
StorageType::Local => Ok(StorageService::Local(LocalStorage::new(config)?)),
}
}
pub async fn upload(&self, data: Vec<u8>, filename: &str, content_type: &str) -> AppResult<String> {
match self {
StorageService::S3(storage) => storage.upload(data, filename, content_type).await,
StorageService::Local(storage) => storage.upload(data, filename, content_type).await,
}
}
pub async fn delete(&self, url: &str) -> AppResult<()> {
match self {
StorageService::S3(storage) => storage.delete(url).await,
StorageService::Local(storage) => storage.delete(url).await,
}
}
pub fn get_url(&self, key: &str) -> String {
match self {
StorageService::S3(storage) => storage.get_url(key),
StorageService::Local(storage) => storage.get_url(key),
}
}
}
#[derive(Clone)]
pub struct S3Storage {
client: S3Client,
bucket_name: String,
base_url: String,
}
impl S3Storage {
pub async fn new(config: &Config) -> AppResult<Self> {
let s3_config = config.storage.s3.as_ref()
.ok_or_else(|| AppError::ConfigError(
config::ConfigError::Message("S3 configuration not found".to_string())
))?;
let aws_config = aws_config::defaults(aws_config::BehaviorVersion::latest()).load().await;
let client = S3Client::new(&aws_config);
let base_url = format!("https://{}.s3.{}.amazonaws.com",
s3_config.bucket_name,
s3_config.region
);
Ok(Self {
client,
bucket_name: s3_config.bucket_name.clone(),
base_url,
})
}
pub async fn upload(&self, data: Vec<u8>, filename: &str, content_type: &str) -> AppResult<String> {
let key = format!("images/{}/{}", Uuid::new_v4(), filename);
self.client
.put_object()
.bucket(&self.bucket_name)
.key(&key)
.body(data.into())
.content_type(content_type)
.send()
.await
.map_err(|e| AppError::StorageError(format!("Failed to upload to S3: {}", e)))?;
Ok(self.get_url(&key))
}
pub async fn delete(&self, url: &str) -> AppResult<()> {
// Extract key from URL
let key = url.strip_prefix(&format!("{}/", self.base_url))
.ok_or_else(|| AppError::StorageError("Invalid S3 URL".to_string()))?;
self.client
.delete_object()
.bucket(&self.bucket_name)
.key(key)
.send()
.await
.map_err(|e| AppError::StorageError(format!("Failed to delete from S3: {}", e)))?;
Ok(())
}
pub fn get_url(&self, key: &str) -> String {
format!("{}/{}", self.base_url, key)
}
}
#[derive(Clone)]
pub struct LocalStorage {
base_path: PathBuf,
base_url: String,
}
impl LocalStorage {
pub fn new(config: &Config) -> AppResult<Self> {
let local_config = config.storage.local.as_ref()
.ok_or_else(|| AppError::ConfigError(
config::ConfigError::Message("Local storage configuration not found".to_string())
))?;
let base_path = PathBuf::from(&local_config.path);
let base_url = format!("http://{}:{}/uploads",
config.server.host,
config.server.port
);
Ok(Self {
base_path,
base_url,
})
}
async fn ensure_directory(&self, path: &Path) -> AppResult<()> {
if !path.exists() {
fs::create_dir_all(path).await?;
}
Ok(())
}
pub async fn upload(&self, data: Vec<u8>, filename: &str, _content_type: &str) -> AppResult<String> {
let dir_name = Uuid::new_v4().to_string();
let dir_path = self.base_path.join(&dir_name);
self.ensure_directory(&dir_path).await?;
let file_path = dir_path.join(filename);
fs::write(&file_path, data).await?;
let relative_path = format!("{}/{}", dir_name, filename);
Ok(self.get_url(&relative_path))
}
pub async fn delete(&self, url: &str) -> AppResult<()> {
// Extract relative path from URL
let relative_path = url.strip_prefix(&format!("{}/", self.base_url))
.ok_or_else(|| AppError::StorageError("Invalid local storage URL".to_string()))?;
let file_path = self.base_path.join(relative_path);
if file_path.exists() {
fs::remove_file(&file_path).await?;
// Try to remove empty parent directory
if let Some(parent) = file_path.parent() {
let _ = fs::remove_dir(parent).await;
}
}
Ok(())
}
pub fn get_url(&self, key: &str) -> String {
format!("{}/{}", self.base_url, key)
}
}

1
test_item.json Normal file
View file

@ -0,0 +1 @@
{"name":"テストマイク","label_id":"MIC001","model_number":"Sony WM-1000XM4","remarks":"テスト用","purchase_year":2023,"purchase_amount":35000.0,"durability_years":5,"is_depreciation_target":true,"qr_code_type":"qr"}