commit bde8398d46496ae2595d33d09ed6b6ba056de26e
Author: Soma Nakamura
Date: Sat Jul 5 11:50:50 2025 +0900
initial
diff --git a/.env.example b/.env.example
new file mode 100644
index 0000000..c633332
--- /dev/null
+++ b/.env.example
@@ -0,0 +1,21 @@
+# Database
+DATABASE_URL=sqlite://hyperdashi.db
+# For production: DATABASE_URL=postgres://user:password@localhost/hyperdashi
+
+# Server
+SERVER_HOST=127.0.0.1
+SERVER_PORT=8080
+
+# Storage
+STORAGE_TYPE=local
+LOCAL_STORAGE_PATH=./uploads
+
+# For S3 storage (production)
+# STORAGE_TYPE=s3
+# AWS_ACCESS_KEY_ID=your_access_key
+# AWS_SECRET_ACCESS_KEY=your_secret_key
+# AWS_REGION=ap-northeast-1
+# S3_BUCKET_NAME=hyperdashi-images
+
+# Logging
+RUST_LOG=hyperdashi_server=debug,tower_http=debug,sqlx=warn
\ No newline at end of file
diff --git a/.envrc b/.envrc
new file mode 100644
index 0000000..a761330
--- /dev/null
+++ b/.envrc
@@ -0,0 +1,29 @@
+# Automatically load the nix environment
+use flake
+
+# Load environment variables from .env file if it exists
+dotenv_if_exists
+
+# Set development environment variables
+export DATABASE_URL="sqlite://hyperdashi.db"
+export RUST_LOG="debug"
+export RUST_BACKTRACE="1"
+
+# Server configuration
+export SERVER_HOST="127.0.0.1"
+export SERVER_PORT="8081"
+
+# Storage configuration
+export STORAGE_TYPE="local"
+export STORAGE_MAX_FILE_SIZE_MB="10"
+export LOCAL_STORAGE_PATH="./uploads"
+
+# Rust development settings
+export CARGO_TARGET_DIR="target"
+export RUSTFLAGS="--deny warnings"
+
+# Create necessary directories
+mkdir -p uploads
+
+echo "Development environment loaded!"
+echo "Run 'nix run .#dev' to start the development server"
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..cb2b048
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,81 @@
+# Rust
+/target/
+**/*.rs.bk
+Cargo.lock
+
+# Database files
+*.db
+*.db-shm
+*.db-wal
+*.sqlite
+*.sqlite3
+
+# Environment files
+.env
+.env.local
+.env.production
+.env.*.local
+
+# Logs
+*.log
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+
+# Runtime data
+pids
+*.pid
+*.seed
+*.pid.lock
+
+# Uploads and temporary files
+/uploads/
+tmp/
+temp/
+
+# OS generated files
+.DS_Store
+.DS_Store?
+._*
+.Spotlight-V100
+.Trashes
+ehthumbs.db
+Thumbs.db
+
+# IDE files
+.vscode/
+.idea/
+*.swp
+*.swo
+*~
+
+# Nix
+result
+result-*
+.direnv/
+
+# Node modules (for frontend)
+node_modules/
+npm-debug.log
+yarn-error.log
+.pnpm-debug.log
+
+# Build outputs
+dist/
+build/
+*.tgz
+*.tar.gz
+
+# Coverage reports
+coverage/
+*.lcov
+tarpaulin-report.html
+
+# Backup files
+*_backup_*
+*.bak
+*.backup
+
+# Test files
+test.db
+*test*.db
diff --git a/CLAUDE.md b/CLAUDE.md
new file mode 100644
index 0000000..ad1c9cc
--- /dev/null
+++ b/CLAUDE.md
@@ -0,0 +1,61 @@
+これは、情報メディアシステム局の物品管理システム「dashi」の発展版・汎用版であるところの「hyperdashi」のバックエンドである。
+
+dashiはhttps://dashi.sohosai.comから使うことができる。
+dashiではグラフデータベースと検索エンジンを利用していたが、運用の煩雑さと見合わないことから、RDBに置き換えることを目標としている。
+
+以下にhyperdashiの要件を述べる。
+
+物品情報の登録
+ 機材の物品情報を登録する。
+ 物品名
+ ラベルID
+ これがQRコード/バーコードによって物品に貼り付けられ、このラベルIDを参照することとなる
+ Alphanumeric(使わない文字の制限あり、IとOは1と0に見間違えるので使わない)
+
+ 型番
+ 備考
+ 長さや古さ、傷があるなどを書く
+ 購入年度
+ 購入金額
+ 耐久年数
+ 減価償却対象かどうか
+ 高い物品を買ったときに考える。過去に買ったものに関しては減価償却対象ではない
+ 接続名(端子の名前を書く)
+ 可変長配列らしい
+ ケーブル識別色のパターン
+ ケーブルに貼ってある色を端子側から順番に追加する。
+ 白に注意!白は良く見えない
+ 収納場所
+ 複数選べる。なぜなら、部屋AのラックXのコンテナαに収納されているような場合に「部屋A」「ラックX」「コンテナα」と3つ書ければ検索性が良いと言えるから。
+ 貸出中か否か
+ QRかバーコードかラベル未貼付か
+ 登録日時
+ これは自動で割当たる
+ 更新日時
+ これも自動で割当たる
+ 廃棄/譲渡済み
+ 削除とは別。削除はミスった時用で、廃棄時はこのフラグを立てる
+ 画像
+ URLの予定。登録時はファイルが直接上がってくるので、それをオブジェクトストレージにアップロードした結果のURLか、ローカルにフォールバックされた結果のバックエンドのURLが入ることになる。
+
+部分的な更新も可能、このAPIも必要
+Excel風のUIから登録・編集を行う予定
+
+貸出管理のテーブル
+ 貸出物品のID
+ 誰に貸出中か
+ 学籍番号と名前
+ どこに貸出中か
+ String、団体名などが入る予定
+ 貸出日時
+ 返却日時
+ 備考
+
+貸出し情報も登録・更新(返却時)するためのAPIが必要
+
+画像はS3系のオブジェクトストレージにアップロードすることを予定しているが、envの設定によってローカルにフォールバックできると嬉しい。
+RDBも然り。プロダクションではPostgreSQLの予定だが、ローカルのsqliteにフォールバックできると嬉しい(開発時に)。
+
+サーバはRustで記述し、AxumとSQLxで実装する。
+クライアントとの間ではREST APIを使ってやり取りを行う。
+
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 0000000..7d0f65d
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,67 @@
+[package]
+name = "hyperdashi-server"
+version = "0.1.0"
+edition = "2021"
+
+[dependencies]
+# Web framework
+axum = { version = "0.7", features = ["multipart"] }
+tower = "0.4"
+tower-http = { version = "0.5", features = ["cors", "trace", "fs"] }
+
+# Async runtime
+tokio = { version = "1", features = ["full"] }
+
+# Database
+sqlx = { version = "0.7", features = ["runtime-tokio-rustls", "postgres", "sqlite", "json", "chrono", "rust_decimal", "migrate"] }
+
+# Serialization
+serde = { version = "1.0", features = ["derive"] }
+serde_json = "1.0"
+
+# Configuration
+config = "0.14"
+
+# Error handling
+thiserror = "1.0"
+anyhow = "1.0"
+
+# Logging
+tracing = "0.1"
+tracing-subscriber = { version = "0.3", features = ["env-filter"] }
+
+# Date/Time
+chrono = { version = "0.4", features = ["serde"] }
+
+# Decimal for money
+rust_decimal = { version = "1.35", features = ["serde-float"] }
+
+# HTTP client (for S3)
+reqwest = { version = "0.12", features = ["json", "multipart", "rustls-tls"], default-features = false }
+
+# AWS SDK (for S3)
+aws-config = "1.1"
+aws-sdk-s3 = "1.14"
+
+# File uploads
+multer = "3.0"
+
+# Validation
+validator = { version = "0.18", features = ["derive"] }
+
+# Regular expressions
+regex = "1.10"
+lazy_static = "1.4"
+once_cell = "1.19"
+
+# Environment variables
+dotenvy = "0.15"
+
+# UUID for unique identifiers
+uuid = { version = "1.7", features = ["v4", "serde"] }
+
+# Async trait
+async-trait = "0.1"
+
+[dev-dependencies]
+tokio-test = "0.4"
diff --git a/DESIGN.md b/DESIGN.md
new file mode 100644
index 0000000..576a5cc
--- /dev/null
+++ b/DESIGN.md
@@ -0,0 +1,345 @@
+# HyperDashi バックエンド詳細設計書
+
+## 1. システム概要
+
+HyperDashiは、情報メディアシステム局の物品管理システム「dashi」の発展版・汎用版である。グラフデータベースと検索エンジンを使用していた従来システムから、運用の簡素化を目的としてRDBベースのシステムに移行する。
+
+### 1.1 技術スタック
+- **言語**: Rust
+- **Webフレームワーク**: Axum
+- **データベース**: PostgreSQL (本番環境) / SQLite (開発環境)
+- **ORM**: SQLx
+- **ストレージ**: S3互換オブジェクトストレージ (本番環境) / ローカルファイルシステム (開発環境)
+- **API形式**: REST API
+
+## 2. データベース設計
+
+### 2.1 物品情報テーブル (items)
+
+```sql
+CREATE TABLE items (
+ id SERIAL PRIMARY KEY,
+ name VARCHAR(255) NOT NULL,
+ label_id VARCHAR(50) UNIQUE NOT NULL,
+ model_number VARCHAR(255),
+ remarks TEXT,
+ purchase_year INTEGER,
+ purchase_amount DECIMAL(12, 2),
+ useful_life INTEGER,
+ is_depreciable BOOLEAN DEFAULT FALSE,
+ connection_names TEXT[], -- PostgreSQL配列型、SQLiteではJSON
+ cable_color_pattern TEXT[], -- PostgreSQL配列型、SQLiteではJSON
+ storage_locations TEXT[], -- PostgreSQL配列型、SQLiteではJSON
+ is_on_loan BOOLEAN DEFAULT FALSE,
+ label_type VARCHAR(20) CHECK (label_type IN ('qr', 'barcode', 'none')),
+ is_disposed BOOLEAN DEFAULT FALSE,
+ image_url TEXT,
+ created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
+ updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
+);
+
+-- インデックス
+CREATE INDEX idx_items_label_id ON items(label_id);
+CREATE INDEX idx_items_name ON items(name);
+CREATE INDEX idx_items_is_on_loan ON items(is_on_loan);
+CREATE INDEX idx_items_is_disposed ON items(is_disposed);
+```
+
+### 2.2 貸出管理テーブル (loans)
+
+```sql
+CREATE TABLE loans (
+ id SERIAL PRIMARY KEY,
+ item_id INTEGER NOT NULL REFERENCES items(id),
+ student_number VARCHAR(20) NOT NULL,
+ student_name VARCHAR(100) NOT NULL,
+ organization VARCHAR(255),
+ loan_date TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ return_date TIMESTAMP WITH TIME ZONE,
+ remarks TEXT,
+ created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
+ updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
+);
+
+-- インデックス
+CREATE INDEX idx_loans_item_id ON loans(item_id);
+CREATE INDEX idx_loans_student_number ON loans(student_number);
+CREATE INDEX idx_loans_return_date ON loans(return_date);
+```
+
+### 2.3 画像管理テーブル (images) - オプション
+
+```sql
+CREATE TABLE images (
+ id SERIAL PRIMARY KEY,
+ file_name VARCHAR(255) NOT NULL,
+ content_type VARCHAR(100) NOT NULL,
+ storage_type VARCHAR(20) CHECK (storage_type IN ('s3', 'local')),
+ storage_path TEXT NOT NULL,
+ size_bytes BIGINT,
+ created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
+);
+```
+
+## 3. API設計
+
+### 3.1 物品管理API
+
+#### 3.1.1 物品一覧取得
+- **エンドポイント**: `GET /api/v1/items`
+- **クエリパラメータ**:
+ - `page` (optional): ページ番号
+ - `per_page` (optional): 1ページあたりの件数
+ - `search` (optional): 検索キーワード
+ - `is_on_loan` (optional): 貸出中フィルタ
+ - `is_disposed` (optional): 廃棄済みフィルタ
+- **レスポンス**:
+```json
+{
+ "items": [
+ {
+ "id": 1,
+ "name": "HDMIケーブル 3m",
+ "label_id": "HYP-A001",
+ "model_number": "HDMI-3M-V2",
+ "remarks": "端子部分に少し傷あり",
+ "purchase_year": 2023,
+ "purchase_amount": 1500.00,
+ "useful_life": 5,
+ "is_depreciable": false,
+ "connection_names": ["HDMI Type-A", "HDMI Type-A"],
+ "cable_color_pattern": ["赤", "青", "赤"],
+ "storage_locations": ["部屋A", "ラックX", "コンテナα"],
+ "is_on_loan": false,
+ "label_type": "qr",
+ "is_disposed": false,
+ "image_url": "https://storage.example.com/images/hdmi-cable.jpg",
+ "created_at": "2023-04-01T10:00:00Z",
+ "updated_at": "2023-04-01T10:00:00Z"
+ }
+ ],
+ "total": 150,
+ "page": 1,
+ "per_page": 20
+}
+```
+
+#### 3.1.2 物品詳細取得
+- **エンドポイント**: `GET /api/v1/items/{id}`
+- **レスポンス**: 単一の物品オブジェクト
+
+#### 3.1.3 物品登録
+- **エンドポイント**: `POST /api/v1/items`
+- **リクエストボディ**:
+```json
+{
+ "name": "HDMIケーブル 3m",
+ "label_id": "HYP-A001",
+ "model_number": "HDMI-3M-V2",
+ "remarks": "端子部分に少し傷あり",
+ "purchase_year": 2023,
+ "purchase_amount": 1500.00,
+ "useful_life": 5,
+ "is_depreciable": false,
+ "connection_names": ["HDMI Type-A", "HDMI Type-A"],
+ "cable_color_pattern": ["赤", "青", "赤"],
+ "storage_locations": ["部屋A", "ラックX", "コンテナα"],
+ "label_type": "qr"
+}
+```
+
+#### 3.1.4 物品更新
+- **エンドポイント**: `PUT /api/v1/items/{id}`
+- **リクエストボディ**: 物品登録と同じ(全項目更新)
+
+#### 3.1.5 物品部分更新
+- **エンドポイント**: `PATCH /api/v1/items/{id}`
+- **リクエストボディ**: 更新したいフィールドのみ
+
+#### 3.1.6 物品削除
+- **エンドポイント**: `DELETE /api/v1/items/{id}`
+- **説明**: 論理削除ではなく物理削除(ミス入力の修正用)
+
+#### 3.1.7 物品廃棄/譲渡
+- **エンドポイント**: `POST /api/v1/items/{id}/dispose`
+- **説明**: is_disposedフラグを立てる
+
+#### 3.1.8 ラベルIDによる物品検索
+- **エンドポイント**: `GET /api/v1/items/by-label/{label_id}`
+- **説明**: QRコード/バーコード読み取り時の検索用
+
+### 3.2 貸出管理API
+
+#### 3.2.1 貸出登録
+- **エンドポイント**: `POST /api/v1/loans`
+- **リクエストボディ**:
+```json
+{
+ "item_id": 1,
+ "student_number": "21001234",
+ "student_name": "山田太郎",
+ "organization": "第74回総合祭実行委員会",
+ "remarks": "イベント用機材"
+}
+```
+
+#### 3.2.2 返却処理
+- **エンドポイント**: `POST /api/v1/loans/{id}/return`
+- **リクエストボディ**:
+```json
+{
+ "return_date": "2023-04-10T15:00:00Z",
+ "remarks": "問題なく返却"
+}
+```
+
+#### 3.2.3 貸出履歴取得
+- **エンドポイント**: `GET /api/v1/loans`
+- **クエリパラメータ**:
+ - `item_id` (optional): 物品ID
+ - `student_number` (optional): 学籍番号
+ - `active_only` (optional): 未返却のみ
+
+#### 3.2.4 貸出詳細取得
+- **エンドポイント**: `GET /api/v1/loans/{id}`
+
+### 3.3 画像アップロードAPI
+
+#### 3.3.1 画像アップロード
+- **エンドポイント**: `POST /api/v1/images/upload`
+- **Content-Type**: `multipart/form-data`
+- **レスポンス**:
+```json
+{
+ "url": "https://storage.example.com/images/abc123.jpg"
+}
+```
+
+### 3.4 一括操作API
+
+#### 3.4.1 物品一括登録
+- **エンドポイント**: `POST /api/v1/items/bulk`
+- **リクエストボディ**: 物品オブジェクトの配列
+
+#### 3.4.2 物品一括更新
+- **エンドポイント**: `PUT /api/v1/items/bulk`
+- **リクエストボディ**: 更新対象の物品オブジェクトの配列
+
+## 4. アプリケーション構造
+
+### 4.1 ディレクトリ構造
+```
+hyperdashi-server/
+├── src/
+│ ├── main.rs # エントリーポイント
+│ ├── config.rs # 設定管理
+│ ├── db/
+│ │ ├── mod.rs # データベース接続管理
+│ │ └── migrations/ # SQLマイグレーション
+│ ├── models/
+│ │ ├── mod.rs
+│ │ ├── item.rs # 物品モデル
+│ │ └── loan.rs # 貸出モデル
+│ ├── handlers/
+│ │ ├── mod.rs
+│ │ ├── items.rs # 物品ハンドラー
+│ │ ├── loans.rs # 貸出ハンドラー
+│ │ └── images.rs # 画像ハンドラー
+│ ├── services/
+│ │ ├── mod.rs
+│ │ ├── item_service.rs # 物品ビジネスロジック
+│ │ ├── loan_service.rs # 貸出ビジネスロジック
+│ │ └── storage.rs # ストレージ抽象化
+│ ├── utils/
+│ │ ├── mod.rs
+│ │ ├── validation.rs # バリデーション
+│ │ └── label.rs # ラベルID生成
+│ └── error.rs # エラー型定義
+├── Cargo.toml
+├── .env.example
+└── README.md
+```
+
+### 4.2 主要コンポーネント
+
+#### 4.2.1 設定管理 (config.rs)
+```rust
+#[derive(Debug, Deserialize)]
+pub struct Config {
+ pub database_url: String,
+ pub server_host: String,
+ pub server_port: u16,
+ pub storage_type: StorageType,
+ pub s3_config: Option,
+ pub local_storage_path: Option,
+}
+
+#[derive(Debug, Deserialize)]
+pub enum StorageType {
+ S3,
+ Local,
+}
+```
+
+#### 4.2.2 ストレージ抽象化 (storage.rs)
+```rust
+#[async_trait]
+pub trait Storage: Send + Sync {
+ async fn upload(&self, data: Vec, filename: &str) -> Result;
+ async fn delete(&self, url: &str) -> Result<()>;
+}
+
+pub struct S3Storage { /* ... */ }
+pub struct LocalStorage { /* ... */ }
+```
+
+## 5. セキュリティ考慮事項
+
+### 5.1 認証・認可
+- 初期版では認証なし(内部システムのため)
+- 将来的にはJWT等による認証を実装予定
+
+### 5.2 入力検証
+- ラベルIDの形式検証(英数字、I/O除外)
+- SQLインジェクション対策(SQLx使用)
+- ファイルアップロードのサイズ制限とMIMEタイプ検証
+
+### 5.3 データ保護
+- 個人情報(学籍番号、氏名)の適切な管理
+- HTTPSによる通信の暗号化
+
+## 6. パフォーマンス最適化
+
+### 6.1 データベース
+- 適切なインデックスの設定
+- N+1問題の回避
+- コネクションプーリング
+
+### 6.2 画像処理
+- 画像のリサイズとサムネイル生成
+- CDN利用による配信最適化(将来)
+
+### 6.3 キャッシング
+- 頻繁にアクセスされる物品情報のキャッシング(将来)
+
+## 7. 運用・保守
+
+### 7.1 ロギング
+- 構造化ログの出力
+- エラートラッキング
+
+### 7.2 モニタリング
+- ヘルスチェックエンドポイント
+- メトリクス収集(将来)
+
+### 7.3 バックアップ
+- データベースの定期バックアップ
+- 画像データのバックアップ
+
+## 8. 今後の拡張予定
+
+- 認証・認可機能
+- 物品の予約機能
+- 統計・レポート機能
+- モバイルアプリ対応API
+- WebSocket による リアルタイム更新
\ No newline at end of file
diff --git a/flake.lock b/flake.lock
new file mode 100644
index 0000000..640bb45
--- /dev/null
+++ b/flake.lock
@@ -0,0 +1,82 @@
+{
+ "nodes": {
+ "flake-utils": {
+ "inputs": {
+ "systems": "systems"
+ },
+ "locked": {
+ "lastModified": 1731533236,
+ "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
+ "type": "github"
+ },
+ "original": {
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "type": "github"
+ }
+ },
+ "nixpkgs": {
+ "locked": {
+ "lastModified": 1751271578,
+ "narHash": "sha256-P/SQmKDu06x8yv7i0s8bvnnuJYkxVGBWLWHaU+tt4YY=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "3016b4b15d13f3089db8a41ef937b13a9e33a8df",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "ref": "nixos-unstable",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "root": {
+ "inputs": {
+ "flake-utils": "flake-utils",
+ "nixpkgs": "nixpkgs",
+ "rust-overlay": "rust-overlay"
+ }
+ },
+ "rust-overlay": {
+ "inputs": {
+ "nixpkgs": [
+ "nixpkgs"
+ ]
+ },
+ "locked": {
+ "lastModified": 1751596734,
+ "narHash": "sha256-1tQOwmn3jEUQjH0WDJyklC+hR7Bj+iqx6ChtRX2QiPA=",
+ "owner": "oxalica",
+ "repo": "rust-overlay",
+ "rev": "e28ba067a9368286a8bc88b68dc2ca92181a09f0",
+ "type": "github"
+ },
+ "original": {
+ "owner": "oxalica",
+ "repo": "rust-overlay",
+ "type": "github"
+ }
+ },
+ "systems": {
+ "locked": {
+ "lastModified": 1681028828,
+ "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+ "owner": "nix-systems",
+ "repo": "default",
+ "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
+ "type": "github"
+ },
+ "original": {
+ "owner": "nix-systems",
+ "repo": "default",
+ "type": "github"
+ }
+ }
+ },
+ "root": "root",
+ "version": 7
+}
diff --git a/flake.nix b/flake.nix
new file mode 100644
index 0000000..93ccb64
--- /dev/null
+++ b/flake.nix
@@ -0,0 +1,277 @@
+{
+ description = "HyperDashi Backend Server Development Environment";
+
+ inputs = {
+ nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
+ flake-utils.url = "github:numtide/flake-utils";
+ rust-overlay = {
+ url = "github:oxalica/rust-overlay";
+ inputs.nixpkgs.follows = "nixpkgs";
+ };
+ };
+
+ outputs = { self, nixpkgs, flake-utils, rust-overlay }:
+ flake-utils.lib.eachDefaultSystem (system:
+ let
+ overlays = [ (import rust-overlay) ];
+ pkgs = import nixpkgs {
+ inherit system overlays;
+ };
+
+ # Rust toolchain specification
+ rustToolchain = pkgs.rust-bin.stable.latest.default.override {
+ extensions = [ "rust-src" "rustfmt" "clippy" "rust-analyzer" ];
+ };
+
+ # Node.js for frontend development
+ nodejs = pkgs.nodejs_20;
+
+ # Database and development tools
+ developmentTools = with pkgs; [
+ # Database tools
+ sqlite
+ sqlx-cli
+
+ # Development utilities
+ git
+ curl
+ jq
+
+ # Text editors and IDE support
+ vim
+ nano
+
+ # Build tools
+ pkg-config
+ openssl
+
+ # Optional: GUI tools if available
+ dbeaver-bin
+ ];
+
+ # Runtime dependencies
+ runtimeDeps = with pkgs; [
+ # SSL/TLS support
+ openssl
+
+ # SQLite runtime
+ sqlite
+
+ # Network tools for testing
+ netcat-gnu
+ ];
+
+ # Development shell packages
+ shellPackages = [
+ rustToolchain
+ nodejs
+ pkgs.yarn
+ pkgs.pnpm
+ ] ++ developmentTools ++ runtimeDeps;
+
+ in
+ {
+ # Development shell
+ devShells.default = pkgs.mkShell {
+ buildInputs = shellPackages;
+
+ # Environment variables
+ shellHook = ''
+ echo "🦀 HyperDashi Development Environment"
+ echo "=================================================="
+ echo "Rust version: $(rustc --version)"
+ echo "Node.js version: $(node --version)"
+ echo "SQLite version: $(sqlite3 --version)"
+ echo "=================================================="
+
+ # Set environment variables
+ export DATABASE_URL="sqlite://hyperdashi.db"
+ export RUST_LOG="debug"
+ export RUST_BACKTRACE=1
+
+ # Server configuration
+ export SERVER_HOST="127.0.0.1"
+ export SERVER_PORT="8081"
+
+ # Storage configuration
+ export STORAGE_TYPE="local"
+ export STORAGE_MAX_FILE_SIZE_MB="10"
+ export LOCAL_STORAGE_PATH="./uploads"
+
+ # Create uploads directory if it doesn't exist
+ mkdir -p uploads
+
+ echo "Environment variables set:"
+ echo " DATABASE_URL: $DATABASE_URL"
+ echo " SERVER_PORT: $SERVER_PORT"
+ echo " STORAGE_MAX_FILE_SIZE_MB: $STORAGE_MAX_FILE_SIZE_MB"
+ echo ""
+ echo "Available commands:"
+ echo " cargo build - Build the project"
+ echo " cargo run - Run the development server"
+ echo " cargo test - Run tests"
+ echo " sqlx migrate run - Run database migrations"
+ echo " nix run .#setup-db - Initial database setup"
+ echo " nix run .#dev - Start development server"
+ echo " nix run .#test - Run all tests"
+ echo ""
+ '';
+
+ # Additional environment variables for development
+ DATABASE_URL = "sqlite://hyperdashi.db";
+ RUST_LOG = "debug";
+ RUST_BACKTRACE = "1";
+
+ # PKG_CONFIG_PATH for OpenSSL
+ PKG_CONFIG_PATH = "${pkgs.openssl.dev}/lib/pkgconfig";
+
+ # Library paths
+ LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath [
+ pkgs.openssl
+ pkgs.sqlite
+ ];
+ };
+
+ # Package outputs
+ packages = {
+ # Backend binary
+ hyperdashi-server = pkgs.rustPlatform.buildRustPackage {
+ pname = "hyperdashi-server";
+ version = "0.1.0";
+
+ src = ./.;
+
+ cargoLock = {
+ lockFile = ./Cargo.lock;
+ };
+
+ nativeBuildInputs = with pkgs; [
+ pkg-config
+ rustToolchain
+ ];
+
+ buildInputs = with pkgs; [
+ openssl
+ sqlite
+ ];
+
+ # Skip tests during build (can be run separately)
+ doCheck = false;
+
+ meta = with pkgs.lib; {
+ description = "HyperDashi equipment management system backend";
+ license = licenses.mit;
+ maintainers = [ ];
+ };
+ };
+
+ # Docker image
+ docker-image = pkgs.dockerTools.buildImage {
+ name = "hyperdashi-server";
+ tag = "latest";
+
+ contents = [
+ self.packages.${system}.hyperdashi-server
+ pkgs.sqlite
+ pkgs.openssl
+ ];
+
+ config = {
+ Cmd = [ "${self.packages.${system}.hyperdashi-server}/bin/hyperdashi-server" ];
+ Env = [
+ "DATABASE_URL=sqlite:///data/hyperdashi.db"
+ "SERVER_HOST=0.0.0.0"
+ "SERVER_PORT=8080"
+ "STORAGE_TYPE=local"
+ "LOCAL_STORAGE_PATH=/uploads"
+ ];
+ ExposedPorts = {
+ "8080/tcp" = {};
+ };
+ Volumes = {
+ "/data" = {};
+ "/uploads" = {};
+ };
+ };
+ };
+
+ default = self.packages.${system}.hyperdashi-server;
+ };
+
+ # Formatter
+ formatter = pkgs.nixpkgs-fmt;
+
+ # Apps for easy running
+ apps = {
+ # Run the server
+ hyperdashi-server = flake-utils.lib.mkApp {
+ drv = self.packages.${system}.hyperdashi-server;
+ };
+
+ # Development server with auto-reload
+ dev = flake-utils.lib.mkApp {
+ drv = pkgs.writeShellScriptBin "hyperdashi-dev" ''
+ export DATABASE_URL="sqlite://hyperdashi.db"
+ export RUST_LOG="debug"
+
+ echo "Starting HyperDashi development server..."
+ echo "Server will be available at http://localhost:8081"
+
+ # Run migrations first
+ sqlx migrate run
+
+ # Start the server with cargo watch for auto-reload
+ if command -v cargo-watch >/dev/null 2>&1; then
+ cargo watch -x run
+ else
+ echo "cargo-watch not found, installing..."
+ cargo install cargo-watch
+ cargo watch -x run
+ fi
+ '';
+ };
+
+ # Database setup
+ setup-db = flake-utils.lib.mkApp {
+ drv = pkgs.writeShellScriptBin "setup-db" ''
+ export DATABASE_URL="sqlite://hyperdashi.db"
+
+ echo "Setting up HyperDashi database..."
+
+ # Create database file if it doesn't exist
+ touch hyperdashi.db
+
+ # Run migrations
+ sqlx migrate run
+
+ echo "Database setup complete!"
+ echo "Database file: $(pwd)/hyperdashi.db"
+ '';
+ };
+
+ # Run tests
+ test = flake-utils.lib.mkApp {
+ drv = pkgs.writeShellScriptBin "hyperdashi-test" ''
+ export DATABASE_URL="sqlite://test.db"
+ export RUST_LOG="info"
+
+ echo "Running HyperDashi tests..."
+
+ # Clean up any existing test database
+ rm -f test.db
+
+ # Run tests
+ cargo test
+
+ # Clean up test database
+ rm -f test.db
+
+ echo "Tests completed!"
+ '';
+ };
+
+ default = self.apps.${system}.hyperdashi-server;
+ };
+ }
+ );
+}
\ No newline at end of file
diff --git a/migrations/20240704000001_create_items_table.sql b/migrations/20240704000001_create_items_table.sql
new file mode 100644
index 0000000..dfc4296
--- /dev/null
+++ b/migrations/20240704000001_create_items_table.sql
@@ -0,0 +1,27 @@
+-- Create items table (compatible with both PostgreSQL and SQLite)
+CREATE TABLE IF NOT EXISTS items (
+ id INTEGER PRIMARY KEY,
+ name TEXT NOT NULL,
+ label_id TEXT UNIQUE NOT NULL,
+ model_number TEXT,
+ remarks TEXT,
+ purchase_year INTEGER,
+ purchase_amount REAL,
+ durability_years INTEGER,
+ is_depreciation_target BOOLEAN DEFAULT FALSE,
+ connection_names TEXT, -- JSON array for both DBs
+ cable_color_pattern TEXT, -- JSON array for both DBs
+ storage_locations TEXT, -- JSON array for both DBs
+ is_on_loan BOOLEAN DEFAULT FALSE,
+ qr_code_type TEXT CHECK (qr_code_type IN ('qr', 'barcode', 'none')),
+ is_disposed BOOLEAN DEFAULT FALSE,
+ image_url TEXT,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+);
+
+-- Create indexes
+CREATE INDEX IF NOT EXISTS idx_items_label_id ON items(label_id);
+CREATE INDEX IF NOT EXISTS idx_items_name ON items(name);
+CREATE INDEX IF NOT EXISTS idx_items_is_on_loan ON items(is_on_loan);
+CREATE INDEX IF NOT EXISTS idx_items_is_disposed ON items(is_disposed);
\ No newline at end of file
diff --git a/migrations/20240704000002_create_loans_table.sql b/migrations/20240704000002_create_loans_table.sql
new file mode 100644
index 0000000..267efbc
--- /dev/null
+++ b/migrations/20240704000002_create_loans_table.sql
@@ -0,0 +1,18 @@
+-- Create loans table (compatible with both PostgreSQL and SQLite)
+CREATE TABLE IF NOT EXISTS loans (
+ id INTEGER PRIMARY KEY,
+ item_id INTEGER NOT NULL REFERENCES items(id),
+ student_number TEXT NOT NULL,
+ student_name TEXT NOT NULL,
+ organization TEXT,
+ loan_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ return_date TIMESTAMP,
+ remarks TEXT,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+);
+
+-- Create indexes
+CREATE INDEX IF NOT EXISTS idx_loans_item_id ON loans(item_id);
+CREATE INDEX IF NOT EXISTS idx_loans_student_number ON loans(student_number);
+CREATE INDEX IF NOT EXISTS idx_loans_return_date ON loans(return_date);
\ No newline at end of file
diff --git a/migrations/20250705000001_create_cable_colors_table.sql b/migrations/20250705000001_create_cable_colors_table.sql
new file mode 100644
index 0000000..9e14f8b
--- /dev/null
+++ b/migrations/20250705000001_create_cable_colors_table.sql
@@ -0,0 +1,24 @@
+-- Create cable colors table
+CREATE TABLE cable_colors (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ name VARCHAR(100) NOT NULL UNIQUE,
+ hex_code VARCHAR(7),
+ description TEXT,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+);
+
+-- Insert common cable colors
+INSERT INTO cable_colors (name, hex_code, description) VALUES
+('赤', '#FF0000', '赤色'),
+('青', '#0000FF', '青色'),
+('緑', '#00FF00', '緑色'),
+('黄', '#FFFF00', '黄色'),
+('黒', '#000000', '黒色'),
+('白', '#FFFFFF', '白色'),
+('グレー', '#808080', 'グレー色'),
+('オレンジ', '#FFA500', 'オレンジ色'),
+('紫', '#800080', '紫色'),
+('茶', '#A52A2A', '茶色'),
+('ピンク', '#FFC0CB', 'ピンク色'),
+('シルバー', '#C0C0C0', 'シルバー色');
\ No newline at end of file
diff --git a/src/config.rs b/src/config.rs
new file mode 100644
index 0000000..03ced37
--- /dev/null
+++ b/src/config.rs
@@ -0,0 +1,152 @@
+use config::{Config as ConfigBuilder, ConfigError, Environment, File};
+use serde::Deserialize;
+use std::env;
+
+#[derive(Debug, Deserialize, Clone)]
+pub struct Config {
+ pub database: DatabaseConfig,
+ pub server: ServerConfig,
+ pub storage: StorageConfig,
+}
+
+#[derive(Debug, Deserialize, Clone)]
+pub struct DatabaseConfig {
+ pub url: String,
+}
+
+#[derive(Debug, Deserialize, Clone)]
+pub struct ServerConfig {
+ pub host: String,
+ pub port: u16,
+}
+
+#[derive(Debug, Deserialize, Clone)]
+pub struct StorageConfig {
+ #[serde(rename = "type")]
+ pub storage_type: StorageType,
+ pub local: Option,
+ pub s3: Option,
+ #[serde(default = "default_max_file_size")]
+ pub max_file_size_mb: u64,
+}
+
+fn default_max_file_size() -> u64 {
+ 5 // Default 5MB
+}
+
+#[derive(Debug, Deserialize, Clone)]
+#[serde(rename_all = "lowercase")]
+pub enum StorageType {
+ Local,
+ S3,
+}
+
+#[derive(Debug, Deserialize, Clone)]
+pub struct LocalStorageConfig {
+ pub path: String,
+}
+
+#[derive(Debug, Deserialize, Clone)]
+pub struct S3Config {
+ pub bucket_name: String,
+ pub region: String,
+ pub access_key_id: Option,
+ pub secret_access_key: Option,
+}
+
+impl Config {
+ pub fn new() -> Result {
+ let run_mode = env::var("RUN_MODE").unwrap_or_else(|_| "development".into());
+
+ let s = ConfigBuilder::builder()
+ // Start off by merging in the "default" configuration file
+ .add_source(File::with_name("config/default").required(false))
+ // Add in the current environment file
+ // Default to 'development' env
+ .add_source(File::with_name(&format!("config/{}", run_mode)).required(false))
+ // Add in a local configuration file
+ // This file shouldn't be checked in to git
+ .add_source(File::with_name("config/local").required(false))
+ // Add in settings from the environment (with a prefix of HYPERDASHI)
+ // Eg.. `HYPERDASHI_DEBUG=1 ./target/app` would set the `debug` key
+ .add_source(Environment::with_prefix("HYPERDASHI").separator("_"))
+ // You can override settings from env variables
+ .add_source(
+ Environment::default()
+ .try_parsing(true)
+ .separator("_")
+ .list_separator(" ")
+ )
+ .build()?;
+
+ // You can deserialize (and thus freeze) the entire configuration as
+ s.try_deserialize()
+ }
+
+ pub fn from_env() -> Result {
+ // Load .env file if it exists
+ dotenvy::dotenv().ok();
+
+ let database_url = env::var("DATABASE_URL")
+ .unwrap_or_else(|_| "sqlite://hyperdashi.db".to_string());
+
+ let server_host = env::var("SERVER_HOST")
+ .unwrap_or_else(|_| "127.0.0.1".to_string());
+
+ let server_port = env::var("SERVER_PORT")
+ .unwrap_or_else(|_| "8080".to_string())
+ .parse::()
+ .unwrap_or(8080);
+
+ let storage_type = env::var("STORAGE_TYPE")
+ .unwrap_or_else(|_| "local".to_string());
+
+ let max_file_size_mb = env::var("STORAGE_MAX_FILE_SIZE_MB")
+ .ok()
+ .and_then(|s| s.parse().ok())
+ .unwrap_or(5);
+
+ let storage = match storage_type.to_lowercase().as_str() {
+ "s3" => {
+ let bucket_name = env::var("S3_BUCKET_NAME")
+ .map_err(|_| ConfigError::Message("S3_BUCKET_NAME not set".to_string()))?;
+ let region = env::var("AWS_REGION")
+ .map_err(|_| ConfigError::Message("AWS_REGION not set".to_string()))?;
+ let access_key_id = env::var("AWS_ACCESS_KEY_ID").ok();
+ let secret_access_key = env::var("AWS_SECRET_ACCESS_KEY").ok();
+
+ StorageConfig {
+ storage_type: StorageType::S3,
+ local: None,
+ s3: Some(S3Config {
+ bucket_name,
+ region,
+ access_key_id,
+ secret_access_key,
+ }),
+ max_file_size_mb,
+ }
+ }
+ _ => {
+ let path = env::var("LOCAL_STORAGE_PATH")
+ .unwrap_or_else(|_| "./uploads".to_string());
+
+ StorageConfig {
+ storage_type: StorageType::Local,
+ local: Some(LocalStorageConfig { path }),
+ s3: None,
+ max_file_size_mb,
+ }
+ }
+ };
+
+ Ok(Config {
+ database: DatabaseConfig { url: database_url },
+ server: ServerConfig {
+ host: server_host,
+ port: server_port,
+ },
+ storage,
+ })
+ }
+}
\ No newline at end of file
diff --git a/src/db/mod.rs b/src/db/mod.rs
new file mode 100644
index 0000000..e4bc300
--- /dev/null
+++ b/src/db/mod.rs
@@ -0,0 +1,85 @@
+use sqlx::postgres::{PgPool, PgPoolOptions};
+use sqlx::sqlite::{SqliteConnectOptions, SqlitePool, SqlitePoolOptions};
+use std::str::FromStr;
+
+use crate::config::Config;
+use crate::error::AppResult;
+
+#[derive(Clone)]
+pub enum DatabasePool {
+ Postgres(PgPool),
+ Sqlite(SqlitePool),
+}
+
+impl DatabasePool {
+ pub async fn new(config: &Config) -> AppResult {
+ let database_url = &config.database.url;
+
+ if database_url.starts_with("postgres://") || database_url.starts_with("postgresql://") {
+ let pool = PgPoolOptions::new()
+ .max_connections(10)
+ .connect(database_url)
+ .await?;
+
+ Ok(DatabasePool::Postgres(pool))
+ } else if database_url.starts_with("sqlite://") {
+ let options = SqliteConnectOptions::from_str(database_url)?
+ .create_if_missing(true);
+
+ let pool = SqlitePoolOptions::new()
+ .max_connections(10)
+ .connect_with(options)
+ .await?;
+
+ Ok(DatabasePool::Sqlite(pool))
+ } else {
+ Err(crate::error::AppError::ConfigError(
+ config::ConfigError::Message(
+ "Invalid database URL. Must start with postgres:// or sqlite://".to_string()
+ )
+ ))
+ }
+ }
+
+ pub async fn migrate(&self) -> AppResult<()> {
+ match self {
+ DatabasePool::Postgres(pool) => {
+ sqlx::migrate!("./migrations")
+ .run(pool)
+ .await
+ .map_err(|e| crate::error::AppError::DatabaseError(sqlx::Error::Migrate(Box::new(e))))?;
+ }
+ DatabasePool::Sqlite(pool) => {
+ sqlx::migrate!("./migrations")
+ .run(pool)
+ .await
+ .map_err(|e| crate::error::AppError::DatabaseError(sqlx::Error::Migrate(Box::new(e))))?;
+ }
+ }
+ Ok(())
+ }
+
+ pub fn postgres(&self) -> Option<&PgPool> {
+ match self {
+ DatabasePool::Postgres(pool) => Some(pool),
+ _ => None,
+ }
+ }
+
+ pub fn sqlite(&self) -> Option<&SqlitePool> {
+ match self {
+ DatabasePool::Sqlite(pool) => Some(pool),
+ _ => None,
+ }
+ }
+}
+
+#[macro_export]
+macro_rules! query_as {
+ ($query:expr, $pool:expr) => {
+ match $pool {
+ $crate::db::DatabasePool::Postgres(pool) => sqlx::query_as!($query).fetch_all(pool).await,
+ $crate::db::DatabasePool::Sqlite(pool) => sqlx::query_as!($query).fetch_all(pool).await,
+ }
+ };
+}
\ No newline at end of file
diff --git a/src/error.rs b/src/error.rs
new file mode 100644
index 0000000..b68a7aa
--- /dev/null
+++ b/src/error.rs
@@ -0,0 +1,101 @@
+use axum::{
+ http::StatusCode,
+ response::{IntoResponse, Response},
+ Json,
+};
+use serde_json::json;
+use std::fmt;
+
+#[derive(Debug)]
+pub enum AppError {
+ NotFound(String),
+ BadRequest(String),
+ InternalServerError(String),
+ DatabaseError(sqlx::Error),
+ ConfigError(config::ConfigError),
+ IoError(std::io::Error),
+ ValidationError(String),
+ StorageError(String),
+}
+
+impl fmt::Display for AppError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ AppError::NotFound(msg) => write!(f, "Not found: {}", msg),
+ AppError::BadRequest(msg) => write!(f, "Bad request: {}", msg),
+ AppError::InternalServerError(msg) => write!(f, "Internal server error: {}", msg),
+ AppError::DatabaseError(err) => write!(f, "Database error: {}", err),
+ AppError::ConfigError(err) => write!(f, "Configuration error: {}", err),
+ AppError::IoError(err) => write!(f, "IO error: {}", err),
+ AppError::ValidationError(msg) => write!(f, "Validation error: {}", msg),
+ AppError::StorageError(msg) => write!(f, "Storage error: {}", msg),
+ }
+ }
+}
+
+impl std::error::Error for AppError {}
+
+impl IntoResponse for AppError {
+ fn into_response(self) -> Response {
+ let (status, error_message) = match self {
+ AppError::NotFound(msg) => (StatusCode::NOT_FOUND, msg),
+ AppError::BadRequest(msg) => (StatusCode::BAD_REQUEST, msg),
+ AppError::ValidationError(msg) => (StatusCode::BAD_REQUEST, msg),
+ AppError::InternalServerError(msg) => (StatusCode::INTERNAL_SERVER_ERROR, msg),
+ AppError::DatabaseError(ref err) => {
+ tracing::error!("Database error: {:?}", err);
+ (
+ StatusCode::INTERNAL_SERVER_ERROR,
+ "Database error occurred".to_string(),
+ )
+ }
+ AppError::ConfigError(ref err) => {
+ tracing::error!("Config error: {:?}", err);
+ (
+ StatusCode::INTERNAL_SERVER_ERROR,
+ "Configuration error occurred".to_string(),
+ )
+ }
+ AppError::IoError(ref err) => {
+ tracing::error!("IO error: {:?}", err);
+ (
+ StatusCode::INTERNAL_SERVER_ERROR,
+ "IO error occurred".to_string(),
+ )
+ }
+ AppError::StorageError(ref msg) => {
+ tracing::error!("Storage error: {}", msg);
+ (
+ StatusCode::INTERNAL_SERVER_ERROR,
+ "Storage error occurred".to_string(),
+ )
+ }
+ };
+
+ let body = Json(json!({
+ "error": error_message,
+ }));
+
+ (status, body).into_response()
+ }
+}
+
+impl From for AppError {
+ fn from(err: sqlx::Error) -> Self {
+ AppError::DatabaseError(err)
+ }
+}
+
+impl From for AppError {
+ fn from(err: config::ConfigError) -> Self {
+ AppError::ConfigError(err)
+ }
+}
+
+impl From for AppError {
+ fn from(err: std::io::Error) -> Self {
+ AppError::IoError(err)
+ }
+}
+
+pub type AppResult = Result;
\ No newline at end of file
diff --git a/src/handlers/cable_colors.rs b/src/handlers/cable_colors.rs
new file mode 100644
index 0000000..44cbec6
--- /dev/null
+++ b/src/handlers/cable_colors.rs
@@ -0,0 +1,78 @@
+use axum::{
+ extract::{Path, Query, State},
+ http::StatusCode,
+ Json,
+};
+use serde::Deserialize;
+use std::sync::Arc;
+use validator::Validate;
+
+use crate::error::AppResult;
+use crate::models::{CableColor, CableColorsListResponse, CreateCableColorRequest, UpdateCableColorRequest};
+use crate::services::{CableColorService, ItemService, LoanService, StorageService};
+
+#[derive(Deserialize)]
+pub struct CableColorsQuery {
+ #[serde(default = "default_page")]
+ pub page: u32,
+ #[serde(default = "default_per_page")]
+ pub per_page: u32,
+}
+
+fn default_page() -> u32 {
+ 1
+}
+
+fn default_per_page() -> u32 {
+ 20
+}
+
+pub async fn list_cable_colors(
+ State((cable_color_service, _item_service, _loan_service, _storage_service)): State<(Arc, Arc, Arc, Arc)>,
+ Query(params): Query,
+) -> AppResult> {
+ let response = cable_color_service
+ .list_cable_colors(params.page, params.per_page)
+ .await?;
+
+ Ok(Json(response))
+}
+
+pub async fn get_cable_color(
+ State((cable_color_service, _item_service, _loan_service, _storage_service)): State<(Arc, Arc, Arc, Arc)>,
+ Path(id): Path,
+) -> AppResult> {
+ let cable_color = cable_color_service.get_cable_color(id).await?;
+ Ok(Json(cable_color))
+}
+
+pub async fn create_cable_color(
+ State((cable_color_service, _item_service, _loan_service, _storage_service)): State<(Arc, Arc, Arc, Arc)>,
+ Json(req): Json,
+) -> AppResult<(StatusCode, Json)> {
+ req.validate()
+ .map_err(|e| crate::error::AppError::ValidationError(e.to_string()))?;
+
+ let cable_color = cable_color_service.create_cable_color(req).await?;
+ Ok((StatusCode::CREATED, Json(cable_color)))
+}
+
+pub async fn update_cable_color(
+ State((cable_color_service, _item_service, _loan_service, _storage_service)): State<(Arc, Arc, Arc, Arc)>,
+ Path(id): Path,
+ Json(req): Json,
+) -> AppResult> {
+ req.validate()
+ .map_err(|e| crate::error::AppError::ValidationError(e.to_string()))?;
+
+ let cable_color = cable_color_service.update_cable_color(id, req).await?;
+ Ok(Json(cable_color))
+}
+
+pub async fn delete_cable_color(
+ State((cable_color_service, _item_service, _loan_service, _storage_service)): State<(Arc, Arc, Arc, Arc)>,
+ Path(id): Path,
+) -> AppResult {
+ cable_color_service.delete_cable_color(id).await?;
+ Ok(StatusCode::NO_CONTENT)
+}
\ No newline at end of file
diff --git a/src/handlers/images.rs b/src/handlers/images.rs
new file mode 100644
index 0000000..1cc93e2
--- /dev/null
+++ b/src/handlers/images.rs
@@ -0,0 +1,87 @@
+use axum::{
+ extract::{Multipart, State},
+ http::StatusCode,
+ Json,
+};
+use serde::{Deserialize, Serialize};
+use std::sync::Arc;
+
+use crate::error::AppResult;
+use crate::services::{ItemService, LoanService, StorageService, CableColorService};
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct ImageUploadResponse {
+ pub url: String,
+ pub filename: String,
+ pub size: usize,
+}
+
+pub async fn upload_image(
+ State((_cable_color_service, _item_service, _loan_service, storage_service)): State<(Arc, Arc, Arc, Arc)>,
+ mut multipart: Multipart,
+) -> AppResult<(StatusCode, Json)> {
+ while let Some(field) = multipart.next_field().await.map_err(|e| {
+ crate::error::AppError::BadRequest(format!("Failed to read multipart field: {}", e))
+ })? {
+ let name = field.name().unwrap_or("").to_string();
+
+ if name == "image" {
+ let filename = field.file_name()
+ .ok_or_else(|| crate::error::AppError::BadRequest("No filename provided".to_string()))?
+ .to_string();
+
+ let content_type = field.content_type()
+ .unwrap_or("application/octet-stream")
+ .to_string();
+
+ // 画像ファイルの検証
+ if !is_image_content_type(&content_type) {
+ return Err(crate::error::AppError::BadRequest(
+ "Only image files are allowed (JPEG, PNG, GIF, WebP)".to_string()
+ ));
+ }
+
+ let data = field.bytes().await.map_err(|e| {
+ crate::error::AppError::BadRequest(format!("Failed to read file data: {}", e))
+ })?;
+
+ // ファイルサイズ制限 (5MB)
+ const MAX_FILE_SIZE: usize = 5 * 1024 * 1024;
+ if data.len() > MAX_FILE_SIZE {
+ return Err(crate::error::AppError::BadRequest(
+ "File size exceeds 5MB limit".to_string()
+ ));
+ }
+
+ // ユニークなファイル名を生成
+ let unique_filename = generate_unique_filename(&filename);
+
+ // ストレージにアップロード
+ let url = storage_service.upload(data.to_vec(), &unique_filename, &content_type).await?;
+
+ return Ok((StatusCode::CREATED, Json(ImageUploadResponse {
+ url,
+ filename: unique_filename,
+ size: data.len(),
+ })));
+ }
+ }
+
+ Err(crate::error::AppError::BadRequest("No image field found in multipart data".to_string()))
+}
+
+fn is_image_content_type(content_type: &str) -> bool {
+ matches!(content_type,
+ "image/jpeg" | "image/jpg" | "image/png" | "image/gif" | "image/webp"
+ )
+}
+
+fn generate_unique_filename(original_filename: &str) -> String {
+ let timestamp = chrono::Utc::now().timestamp_millis();
+ let extension = std::path::Path::new(original_filename)
+ .extension()
+ .and_then(|ext| ext.to_str())
+ .unwrap_or("jpg");
+
+ format!("{}_{}.{}", timestamp, uuid::Uuid::new_v4(), extension)
+}
\ No newline at end of file
diff --git a/src/handlers/items.rs b/src/handlers/items.rs
new file mode 100644
index 0000000..b758a05
--- /dev/null
+++ b/src/handlers/items.rs
@@ -0,0 +1,130 @@
+use axum::{
+ extract::{Path, Query, State},
+ http::StatusCode,
+ Json,
+};
+use serde::{Deserialize, Serialize};
+use std::sync::Arc;
+use validator::Validate;
+
+use crate::error::AppResult;
+use crate::models::{CreateItemRequest, Item, ItemsListResponse, UpdateItemRequest};
+use crate::services::{ItemService, LoanService, StorageService, CableColorService};
+
+#[derive(Deserialize)]
+pub struct ItemsQuery {
+ #[serde(default = "default_page")]
+ pub page: u32,
+ #[serde(default = "default_per_page")]
+ pub per_page: u32,
+ pub search: Option,
+ pub is_on_loan: Option,
+ pub is_disposed: Option,
+}
+
+fn default_page() -> u32 {
+ 1
+}
+
+fn default_per_page() -> u32 {
+ 20
+}
+
+pub async fn list_items(
+ State((_cable_color_service, item_service, _loan_service, _storage_service)): State<(Arc, Arc, Arc, Arc)>,
+ Query(params): Query,
+) -> AppResult> {
+ let response = item_service
+ .list_items(
+ params.page,
+ params.per_page,
+ params.search,
+ params.is_on_loan,
+ params.is_disposed,
+ )
+ .await?;
+
+ Ok(Json(response))
+}
+
+pub async fn get_item(
+ State((_cable_color_service, item_service, _loan_service, _storage_service)): State<(Arc, Arc, Arc, Arc)>,
+ Path(id): Path,
+) -> AppResult> {
+ let item = item_service.get_item(id).await?;
+ Ok(Json(item))
+}
+
+pub async fn get_item_by_label(
+ State((_cable_color_service, item_service, _loan_service, _storage_service)): State<(Arc, Arc, Arc, Arc)>,
+ Path(label_id): Path,
+) -> AppResult> {
+ let item = item_service.get_item_by_label(&label_id).await?;
+ Ok(Json(item))
+}
+
+pub async fn create_item(
+ State((_cable_color_service, item_service, _loan_service, _storage_service)): State<(Arc, Arc, Arc, Arc)>,
+ Json(req): Json,
+) -> AppResult<(StatusCode, Json- )> {
+ req.validate()
+ .map_err(|e| crate::error::AppError::ValidationError(e.to_string()))?;
+
+ let item = item_service.create_item(req).await?;
+ Ok((StatusCode::CREATED, Json(item)))
+}
+
+pub async fn update_item(
+ State((_cable_color_service, item_service, _loan_service, _storage_service)): State<(Arc, Arc, Arc, Arc)>,
+ Path(id): Path,
+ Json(req): Json,
+) -> AppResult> {
+ req.validate()
+ .map_err(|e| crate::error::AppError::ValidationError(e.to_string()))?;
+
+ let item = item_service.update_item(id, req).await?;
+ Ok(Json(item))
+}
+
+pub async fn delete_item(
+ State((_cable_color_service, item_service, _loan_service, _storage_service)): State<(Arc, Arc, Arc, Arc)>,
+ Path(id): Path,
+) -> AppResult {
+ item_service.delete_item(id).await?;
+ Ok(StatusCode::NO_CONTENT)
+}
+
+pub async fn dispose_item(
+ State((_cable_color_service, item_service, _loan_service, _storage_service)): State<(Arc, Arc, Arc, Arc)>,
+ Path(id): Path,
+) -> AppResult> {
+ let item = item_service.dispose_item(id).await?;
+ Ok(Json(item))
+}
+
+pub async fn undispose_item(
+ State((_cable_color_service, item_service, _loan_service, _storage_service)): State<(Arc, Arc, Arc, Arc)>,
+ Path(id): Path,
+) -> AppResult> {
+ let item = item_service.undispose_item(id).await?;
+ Ok(Json(item))
+}
+
+#[derive(Serialize)]
+pub struct SuggestionsResponse {
+ pub suggestions: Vec,
+}
+
+pub async fn get_connection_names_suggestions(
+ State((_cable_color_service, item_service, _loan_service, _storage_service)): State<(Arc, Arc, Arc, Arc)>,
+) -> AppResult> {
+ let suggestions = item_service.get_connection_names_suggestions().await?;
+ Ok(Json(SuggestionsResponse { suggestions }))
+}
+
+pub async fn get_storage_locations_suggestions(
+ State((_cable_color_service, item_service, _loan_service, _storage_service)): State<(Arc, Arc, Arc, Arc)>,
+) -> AppResult> {
+ let suggestions = item_service.get_storage_locations_suggestions().await?;
+ Ok(Json(SuggestionsResponse { suggestions }))
+}
\ No newline at end of file
diff --git a/src/handlers/loans.rs b/src/handlers/loans.rs
new file mode 100644
index 0000000..0780df2
--- /dev/null
+++ b/src/handlers/loans.rs
@@ -0,0 +1,79 @@
+use axum::{
+ extract::{Path, Query, State},
+ http::StatusCode,
+ Json,
+};
+use serde::Deserialize;
+use std::sync::Arc;
+use validator::Validate;
+
+use crate::error::AppResult;
+use crate::models::{CreateLoanRequest, Loan, LoansListResponse, ReturnLoanRequest};
+use crate::services::{ItemService, LoanService, StorageService, CableColorService};
+
+#[derive(Deserialize)]
+pub struct LoansQuery {
+ #[serde(default = "default_page")]
+ pub page: u32,
+ #[serde(default = "default_per_page")]
+ pub per_page: u32,
+ pub item_id: Option,
+ pub student_number: Option,
+ pub active_only: Option,
+}
+
+fn default_page() -> u32 {
+ 1
+}
+
+fn default_per_page() -> u32 {
+ 20
+}
+
+pub async fn list_loans(
+ State((_cable_color_service, _item_service, loan_service, _storage_service)): State<(Arc, Arc, Arc, Arc)>,
+ Query(params): Query,
+) -> AppResult> {
+ let response = loan_service
+ .list_loans(
+ params.page,
+ params.per_page,
+ params.item_id,
+ params.student_number,
+ params.active_only,
+ )
+ .await?;
+
+ Ok(Json(response))
+}
+
+pub async fn get_loan(
+ State((_cable_color_service, _item_service, loan_service, _storage_service)): State<(Arc, Arc, Arc, Arc)>,
+ Path(id): Path,
+) -> AppResult> {
+ let loan = loan_service.get_loan(id).await?;
+ Ok(Json(loan))
+}
+
+pub async fn create_loan(
+ State((_cable_color_service, _item_service, loan_service, _storage_service)): State<(Arc, Arc, Arc, Arc)>,
+ Json(req): Json,
+) -> AppResult<(StatusCode, Json)> {
+ req.validate()
+ .map_err(|e| crate::error::AppError::ValidationError(e.to_string()))?;
+
+ let loan = loan_service.create_loan(req).await?;
+ Ok((StatusCode::CREATED, Json(loan)))
+}
+
+pub async fn return_loan(
+ State((_cable_color_service, _item_service, loan_service, _storage_service)): State<(Arc, Arc, Arc, Arc)>,
+ Path(id): Path,
+ Json(req): Json,
+) -> AppResult> {
+ req.validate()
+ .map_err(|e| crate::error::AppError::ValidationError(e.to_string()))?;
+
+ let loan = loan_service.return_loan(id, req).await?;
+ Ok(Json(loan))
+}
\ No newline at end of file
diff --git a/src/handlers/mod.rs b/src/handlers/mod.rs
new file mode 100644
index 0000000..34cf4aa
--- /dev/null
+++ b/src/handlers/mod.rs
@@ -0,0 +1,15 @@
+pub mod items;
+pub mod loans;
+pub mod images;
+pub mod cable_colors;
+
+pub use items::*;
+pub use loans::*;
+pub use images::*;
+pub use cable_colors::*;
+
+use std::sync::Arc;
+use crate::config::Config;
+use crate::services::{ItemService, LoanService, StorageService, CableColorService};
+
+pub type AppState = (Arc, Arc, Arc, Arc, Arc);
\ No newline at end of file
diff --git a/src/main.rs b/src/main.rs
new file mode 100644
index 0000000..a39bebd
--- /dev/null
+++ b/src/main.rs
@@ -0,0 +1,111 @@
+use axum::{
+ routing::{delete, get, post, put},
+ Router,
+};
+use std::net::SocketAddr;
+use std::sync::Arc;
+use tower_http::cors::CorsLayer;
+use tower_http::services::ServeDir;
+use tower_http::trace::TraceLayer;
+use tracing::info;
+use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
+
+mod config;
+mod db;
+mod error;
+mod handlers;
+mod models;
+mod services;
+
+use crate::config::{Config, StorageType};
+use crate::db::DatabasePool;
+use crate::services::{ItemService, LoanService, StorageService, CableColorService};
+
+#[tokio::main]
+async fn main() -> anyhow::Result<()> {
+ // Initialize tracing
+ tracing_subscriber::registry()
+ .with(
+ tracing_subscriber::EnvFilter::try_from_default_env()
+ .unwrap_or_else(|_| "hyperdashi_server=debug,tower_http=debug".into()),
+ )
+ .with(tracing_subscriber::fmt::layer())
+ .init();
+
+ info!("Starting HyperDashi server...");
+
+ // Load configuration
+ let config = Config::from_env()?;
+ info!("Configuration loaded: {:?}", config);
+
+ // Initialize database connection
+ let db_pool = DatabasePool::new(&config).await?;
+ info!("Database connection established");
+
+ // Run migrations
+ db_pool.migrate().await?;
+ info!("Database migrations completed");
+
+ // Initialize storage
+ let storage = Arc::new(StorageService::new(&config).await?);
+ info!("Storage initialized");
+
+ // Initialize services
+ let cable_color_service = Arc::new(CableColorService::new(db_pool.clone()));
+ let item_service = Arc::new(ItemService::new(db_pool.clone()));
+ let loan_service = Arc::new(LoanService::new(db_pool.clone()));
+
+ // Build application routes
+ let mut app = Router::new()
+ .route("/", get(root))
+ .route("/api/v1/health", get(health_check))
+ // Item routes
+ .route("/api/v1/items", get(handlers::list_items).post(handlers::create_item))
+ .route("/api/v1/items/:id", get(handlers::get_item).put(handlers::update_item).delete(handlers::delete_item))
+ .route("/api/v1/items/:id/dispose", post(handlers::dispose_item))
+ .route("/api/v1/items/:id/undispose", post(handlers::undispose_item))
+ .route("/api/v1/items/by-label/:label_id", get(handlers::get_item_by_label))
+ .route("/api/v1/items/suggestions/connection_names", get(handlers::get_connection_names_suggestions))
+ .route("/api/v1/items/suggestions/storage_locations", get(handlers::get_storage_locations_suggestions))
+ // Cable color routes
+ .route("/api/v1/cable_colors", get(handlers::list_cable_colors).post(handlers::create_cable_color))
+ .route("/api/v1/cable_colors/:id", get(handlers::get_cable_color).put(handlers::update_cable_color).delete(handlers::delete_cable_color))
+ // Loan routes
+ .route("/api/v1/loans", get(handlers::list_loans).post(handlers::create_loan))
+ .route("/api/v1/loans/:id", get(handlers::get_loan))
+ .route("/api/v1/loans/:id/return", post(handlers::return_loan))
+ // Image routes
+ .route("/api/v1/images/upload", post(handlers::upload_image))
+ // Add state - combine services
+ .with_state((cable_color_service, item_service, loan_service, storage))
+ .layer(CorsLayer::permissive())
+ .layer(TraceLayer::new_for_http());
+
+ // Add static file serving for local storage
+ if matches!(config.storage.storage_type, StorageType::Local) {
+ if let Some(local_config) = &config.storage.local {
+ info!("Enabling static file serving for uploads at {}", local_config.path);
+ app = app.nest_service("/uploads", ServeDir::new(&local_config.path));
+ }
+ }
+
+ // Start server
+ let addr = SocketAddr::from((
+ config.server.host.parse::()?,
+ config.server.port,
+ ));
+ info!("Server listening on {}", addr);
+
+ let listener = tokio::net::TcpListener::bind(addr).await?;
+ axum::serve(listener, app).await?;
+
+ Ok(())
+}
+
+async fn root() -> &'static str {
+ "HyperDashi Server"
+}
+
+async fn health_check() -> &'static str {
+ "OK"
+}
\ No newline at end of file
diff --git a/src/models/cable_color.rs b/src/models/cable_color.rs
new file mode 100644
index 0000000..bfd42fa
--- /dev/null
+++ b/src/models/cable_color.rs
@@ -0,0 +1,46 @@
+use chrono::{DateTime, Utc};
+use serde::{Deserialize, Serialize};
+use validator::Validate;
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct CableColor {
+ pub id: i64,
+ pub name: String,
+ pub hex_code: Option,
+ pub description: Option,
+ pub created_at: DateTime,
+ pub updated_at: DateTime,
+}
+
+#[derive(Debug, Deserialize, Validate)]
+pub struct CreateCableColorRequest {
+ #[validate(length(min = 1, max = 100))]
+ pub name: String,
+ #[validate(regex(path = "*crate::models::cable_color::HEX_COLOR_REGEX"))]
+ pub hex_code: Option,
+ pub description: Option,
+}
+
+#[derive(Debug, Deserialize, Validate)]
+pub struct UpdateCableColorRequest {
+ #[validate(length(min = 1, max = 100))]
+ pub name: Option,
+ #[validate(regex(path = "*crate::models::cable_color::HEX_COLOR_REGEX"))]
+ pub hex_code: Option,
+ pub description: Option,
+}
+
+#[derive(Debug, Serialize)]
+pub struct CableColorsListResponse {
+ pub cable_colors: Vec,
+ pub total: i64,
+ pub page: u32,
+ pub per_page: u32,
+}
+
+use lazy_static::lazy_static;
+use regex::Regex;
+
+lazy_static! {
+ pub static ref HEX_COLOR_REGEX: Regex = Regex::new(r"^#[0-9A-Fa-f]{6}$").unwrap();
+}
\ No newline at end of file
diff --git a/src/models/item.rs b/src/models/item.rs
new file mode 100644
index 0000000..a9bb90c
--- /dev/null
+++ b/src/models/item.rs
@@ -0,0 +1,107 @@
+use chrono::{DateTime, Utc};
+use serde::{Deserialize, Serialize};
+use validator::Validate;
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct Item {
+ pub id: i64,
+ pub name: String,
+ pub label_id: String,
+ pub model_number: Option,
+ pub remarks: Option,
+ pub purchase_year: Option,
+ pub purchase_amount: Option,
+ pub durability_years: Option,
+ pub is_depreciation_target: Option,
+ pub connection_names: Option>,
+ pub cable_color_pattern: Option>,
+ pub storage_locations: Option>,
+ pub is_on_loan: Option,
+ pub qr_code_type: Option,
+ pub is_disposed: Option,
+ pub image_url: Option,
+ pub created_at: DateTime,
+ pub updated_at: DateTime,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize, Validate)]
+pub struct CreateItemRequest {
+ #[validate(length(min = 1, max = 255))]
+ pub name: String,
+
+ #[validate(length(min = 1, max = 50))]
+ pub label_id: String,
+
+ #[validate(length(max = 255))]
+ pub model_number: Option,
+
+ pub remarks: Option,
+
+ #[validate(range(min = 1900, max = 2100))]
+ pub purchase_year: Option,
+
+ pub purchase_amount: Option,
+
+ #[validate(range(min = 1, max = 100))]
+ pub durability_years: Option,
+
+ pub is_depreciation_target: Option,
+
+ pub connection_names: Option>,
+
+ pub cable_color_pattern: Option>,
+
+ pub storage_locations: Option>,
+
+ pub qr_code_type: Option,
+
+ #[validate(url)]
+ pub image_url: Option,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize, Validate)]
+pub struct UpdateItemRequest {
+ #[validate(length(min = 1, max = 255))]
+ pub name: Option,
+
+ #[validate(length(min = 1, max = 50))]
+ pub label_id: Option,
+
+ #[validate(length(max = 255))]
+ pub model_number: Option,
+
+ pub remarks: Option,
+
+ #[validate(range(min = 1900, max = 2100))]
+ pub purchase_year: Option,
+
+ pub purchase_amount: Option,
+
+ #[validate(range(min = 1, max = 100))]
+ pub durability_years: Option,
+
+ pub is_depreciation_target: Option,
+
+ pub connection_names: Option>,
+
+ pub cable_color_pattern: Option>,
+
+ pub storage_locations: Option>,
+
+ pub is_on_loan: Option,
+
+ pub qr_code_type: Option,
+
+ pub is_disposed: Option,
+
+ #[validate(url)]
+ pub image_url: Option,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct ItemsListResponse {
+ pub items: Vec
- ,
+ pub total: i64,
+ pub page: u32,
+ pub per_page: u32,
+}
\ No newline at end of file
diff --git a/src/models/loan.rs b/src/models/loan.rs
new file mode 100644
index 0000000..222848e
--- /dev/null
+++ b/src/models/loan.rs
@@ -0,0 +1,72 @@
+use chrono::{DateTime, Utc};
+use serde::{Deserialize, Serialize};
+use validator::Validate;
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct Loan {
+ pub id: i64,
+ pub item_id: i64,
+ pub student_number: String,
+ pub student_name: String,
+ pub organization: Option,
+ pub loan_date: DateTime,
+ pub return_date: Option>,
+ pub remarks: Option,
+ pub created_at: DateTime,
+ pub updated_at: DateTime,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize, Validate)]
+pub struct CreateLoanRequest {
+ pub item_id: i64,
+
+ #[validate(length(min = 1, max = 20))]
+ pub student_number: String,
+
+ #[validate(length(min = 1, max = 100))]
+ pub student_name: String,
+
+ #[validate(length(max = 255))]
+ pub organization: Option,
+
+ pub remarks: Option,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize, Validate)]
+pub struct ReturnLoanRequest {
+ pub return_date: Option>,
+ pub remarks: Option,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct LoanWithItem {
+ pub id: i64,
+ pub item_id: i64,
+ pub item_name: String,
+ pub item_label_id: String,
+ pub student_number: String,
+ pub student_name: String,
+ pub organization: Option,
+ pub loan_date: DateTime,
+ pub return_date: Option>,
+ pub remarks: Option,
+ pub created_at: DateTime,
+ pub updated_at: DateTime,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct LoansListResponse {
+ pub loans: Vec,
+ pub total: i64,
+ pub page: u32,
+ pub per_page: u32,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct LoanFilters {
+ pub item_id: Option,
+ pub student_number: Option,
+ pub active_only: Option,
+ pub page: Option,
+ pub per_page: Option,
+}
\ No newline at end of file
diff --git a/src/models/mod.rs b/src/models/mod.rs
new file mode 100644
index 0000000..30865ce
--- /dev/null
+++ b/src/models/mod.rs
@@ -0,0 +1,7 @@
+pub mod item;
+pub mod loan;
+pub mod cable_color;
+
+pub use item::*;
+pub use loan::*;
+pub use cable_color::*;
\ No newline at end of file
diff --git a/src/services/cable_color_service.rs b/src/services/cable_color_service.rs
new file mode 100644
index 0000000..ec2ba72
--- /dev/null
+++ b/src/services/cable_color_service.rs
@@ -0,0 +1,170 @@
+use crate::db::DatabasePool;
+use crate::error::{AppError, AppResult};
+use crate::models::{CableColor, CableColorsListResponse, CreateCableColorRequest, UpdateCableColorRequest};
+use sqlx::Row;
+
+pub struct CableColorService {
+ db: DatabasePool,
+}
+
+impl CableColorService {
+ pub fn new(db: DatabasePool) -> Self {
+ Self { db }
+ }
+
+ pub async fn create_cable_color(&self, req: CreateCableColorRequest) -> AppResult {
+ match &self.db {
+ DatabasePool::Postgres(_pool) => {
+ Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
+ }
+ DatabasePool::Sqlite(pool) => {
+ let result = sqlx::query(
+ r#"
+ INSERT INTO cable_colors (name, hex_code, description)
+ VALUES (?1, ?2, ?3)
+ "#,
+ )
+ .bind(&req.name)
+ .bind(&req.hex_code)
+ .bind(&req.description)
+ .execute(pool)
+ .await?;
+
+ let id = result.last_insert_rowid();
+ self.get_cable_color(id).await
+ }
+ }
+ }
+
+ pub async fn get_cable_color(&self, id: i64) -> AppResult {
+ match &self.db {
+ DatabasePool::Postgres(_pool) => {
+ Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
+ }
+ DatabasePool::Sqlite(pool) => {
+ let row = sqlx::query(
+ r#"
+ SELECT id, name, hex_code, description, created_at, updated_at
+ FROM cable_colors
+ WHERE id = ?1
+ "#,
+ )
+ .bind(id)
+ .fetch_optional(pool)
+ .await?
+ .ok_or_else(|| AppError::NotFound(format!("Cable color with id {} not found", id)))?;
+
+ Ok(self.row_to_cable_color(row))
+ }
+ }
+ }
+
+ pub async fn list_cable_colors(
+ &self,
+ page: u32,
+ per_page: u32,
+ ) -> AppResult {
+ let offset = ((page - 1) * per_page) as i64;
+ let limit = per_page as i64;
+
+ match &self.db {
+ DatabasePool::Postgres(_pool) => {
+ Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
+ }
+ DatabasePool::Sqlite(pool) => {
+ let rows = sqlx::query(
+ r#"
+ SELECT id, name, hex_code, description, created_at, updated_at
+ FROM cable_colors
+ ORDER BY created_at DESC
+ LIMIT ?1 OFFSET ?2
+ "#,
+ )
+ .bind(limit)
+ .bind(offset)
+ .fetch_all(pool)
+ .await?;
+
+ let cable_colors: Vec = rows.into_iter()
+ .map(|row| self.row_to_cable_color(row))
+ .collect();
+
+ let count_row = sqlx::query("SELECT COUNT(*) as count FROM cable_colors")
+ .fetch_one(pool)
+ .await?;
+ let total: i64 = count_row.get("count");
+
+ Ok(CableColorsListResponse {
+ cable_colors,
+ total,
+ page,
+ per_page,
+ })
+ }
+ }
+ }
+
+ pub async fn update_cable_color(&self, id: i64, req: UpdateCableColorRequest) -> AppResult {
+ match &self.db {
+ DatabasePool::Postgres(_pool) => {
+ Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
+ }
+ DatabasePool::Sqlite(pool) => {
+ // まず色が存在するかチェック
+ let _existing_color = self.get_cable_color(id).await?;
+
+ let now = chrono::Utc::now();
+
+ sqlx::query(
+ r#"
+ UPDATE cable_colors SET
+ name = COALESCE(?2, name),
+ hex_code = COALESCE(?3, hex_code),
+ description = COALESCE(?4, description),
+ updated_at = ?5
+ WHERE id = ?1
+ "#,
+ )
+ .bind(id)
+ .bind(&req.name)
+ .bind(&req.hex_code)
+ .bind(&req.description)
+ .bind(now)
+ .execute(pool)
+ .await?;
+
+ self.get_cable_color(id).await
+ }
+ }
+ }
+
+ pub async fn delete_cable_color(&self, id: i64) -> AppResult<()> {
+ match &self.db {
+ DatabasePool::Postgres(_pool) => {
+ Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
+ }
+ DatabasePool::Sqlite(pool) => {
+ let result = sqlx::query("DELETE FROM cable_colors WHERE id = ?1")
+ .bind(id)
+ .execute(pool)
+ .await?;
+
+ if result.rows_affected() == 0 {
+ return Err(AppError::NotFound(format!("Cable color with id {} not found", id)));
+ }
+ Ok(())
+ }
+ }
+ }
+
+ fn row_to_cable_color(&self, row: sqlx::sqlite::SqliteRow) -> CableColor {
+ CableColor {
+ id: row.get("id"),
+ name: row.get("name"),
+ hex_code: row.get("hex_code"),
+ description: row.get("description"),
+ created_at: row.get("created_at"),
+ updated_at: row.get("updated_at"),
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/services/item_service.rs b/src/services/item_service.rs
new file mode 100644
index 0000000..fa432ef
--- /dev/null
+++ b/src/services/item_service.rs
@@ -0,0 +1,496 @@
+use crate::db::DatabasePool;
+use crate::error::{AppError, AppResult};
+use crate::models::{CreateItemRequest, Item, ItemsListResponse, UpdateItemRequest};
+use chrono::Utc;
+use sqlx::Row;
+
+pub struct ItemService {
+ db: DatabasePool,
+}
+
+impl ItemService {
+ pub fn new(db: DatabasePool) -> Self {
+ Self { db }
+ }
+
+ pub async fn create_item(&self, req: CreateItemRequest) -> AppResult
- {
+ match &self.db {
+ DatabasePool::Postgres(_pool) => {
+ Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
+ }
+ DatabasePool::Sqlite(pool) => {
+ let connection_names = req.connection_names
+ .map(|v| serde_json::to_string(&v).unwrap_or_default());
+ let cable_color_pattern = req.cable_color_pattern
+ .map(|v| serde_json::to_string(&v).unwrap_or_default());
+ let storage_locations = req.storage_locations
+ .map(|v| serde_json::to_string(&v).unwrap_or_default());
+ let is_depreciation_target = req.is_depreciation_target.unwrap_or(false);
+
+ let result = sqlx::query!(
+ r#"
+ INSERT INTO items (
+ name, label_id, model_number, remarks, purchase_year,
+ purchase_amount, durability_years, is_depreciation_target, connection_names,
+ cable_color_pattern, storage_locations, qr_code_type, image_url
+ ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)
+ "#,
+ req.name,
+ req.label_id,
+ req.model_number,
+ req.remarks,
+ req.purchase_year,
+ req.purchase_amount,
+ req.durability_years,
+ is_depreciation_target,
+ connection_names,
+ cable_color_pattern,
+ storage_locations,
+ req.qr_code_type,
+ req.image_url
+ )
+ .execute(pool)
+ .await?;
+
+ let id = result.last_insert_rowid();
+ self.get_item(id).await
+ }
+ }
+ }
+
+ pub async fn get_item(&self, id: i64) -> AppResult
- {
+ match &self.db {
+ DatabasePool::Postgres(_pool) => {
+ Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
+ }
+ DatabasePool::Sqlite(pool) => {
+ let row = sqlx::query(
+ r#"
+ SELECT
+ id, name, label_id, model_number, remarks, purchase_year,
+ purchase_amount, durability_years, is_depreciation_target,
+ connection_names, cable_color_pattern, storage_locations,
+ is_on_loan, qr_code_type, is_disposed, image_url,
+ created_at, updated_at
+ FROM items
+ WHERE id = ?1
+ "#,
+ )
+ .bind(id)
+ .fetch_optional(pool)
+ .await?
+ .ok_or_else(|| AppError::NotFound(format!("Item with id {} not found", id)))?;
+
+ Ok(self.row_to_item(row))
+ }
+ }
+ }
+
+ pub async fn get_item_by_label(&self, label_id: &str) -> AppResult
- {
+ match &self.db {
+ DatabasePool::Postgres(_pool) => {
+ Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
+ }
+ DatabasePool::Sqlite(pool) => {
+ let row = sqlx::query(
+ r#"
+ SELECT
+ id, name, label_id, model_number, remarks, purchase_year,
+ purchase_amount, durability_years, is_depreciation_target,
+ connection_names, cable_color_pattern, storage_locations,
+ is_on_loan, qr_code_type, is_disposed, image_url,
+ created_at, updated_at
+ FROM items
+ WHERE label_id = ?1
+ "#,
+ )
+ .bind(label_id)
+ .fetch_optional(pool)
+ .await?
+ .ok_or_else(|| AppError::NotFound(format!("Item with label_id {} not found", label_id)))?;
+
+ Ok(self.row_to_item(row))
+ }
+ }
+ }
+
+ pub async fn list_items(
+ &self,
+ page: u32,
+ per_page: u32,
+ search: Option,
+ is_on_loan: Option,
+ is_disposed: Option,
+ ) -> AppResult {
+ let offset = ((page - 1) * per_page) as i64;
+ let limit = per_page as i64;
+
+ match &self.db {
+ DatabasePool::Postgres(_pool) => {
+ Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
+ }
+ DatabasePool::Sqlite(pool) => {
+ // 動的WHEREクエリを構築(簡単な方法)
+ let mut where_conditions = Vec::new();
+
+ // 検索条件
+ if search.is_some() {
+ where_conditions.push("(name LIKE ? OR label_id LIKE ? OR model_number LIKE ? OR remarks LIKE ?)".to_string());
+ }
+
+ // 貸出状態フィルター
+ if is_on_loan.is_some() {
+ where_conditions.push("is_on_loan = ?".to_string());
+ }
+
+ // 廃棄状態フィルター
+ if is_disposed.is_some() {
+ where_conditions.push("is_disposed = ?".to_string());
+ }
+
+ let where_clause = if where_conditions.is_empty() {
+ String::new()
+ } else {
+ format!("WHERE {}", where_conditions.join(" AND "))
+ };
+
+ // シンプルなアプローチで実装(フィルター条件ごとに分岐)
+ let (items, total) = if search.is_none() && is_on_loan.is_none() && is_disposed.is_none() {
+ // フィルターなし
+ let rows = sqlx::query(
+ r#"
+ SELECT
+ id, name, label_id, model_number, remarks, purchase_year,
+ purchase_amount, durability_years, is_depreciation_target,
+ connection_names, cable_color_pattern, storage_locations,
+ is_on_loan, qr_code_type, is_disposed, image_url,
+ created_at, updated_at
+ FROM items
+ ORDER BY created_at DESC
+ LIMIT ?1 OFFSET ?2
+ "#,
+ )
+ .bind(limit)
+ .bind(offset)
+ .fetch_all(pool)
+ .await?;
+
+ let items: Vec
- = rows.into_iter()
+ .map(|row| self.row_to_item(row))
+ .collect();
+
+ let count_row = sqlx::query("SELECT COUNT(*) as count FROM items")
+ .fetch_one(pool)
+ .await?;
+ let total: i64 = count_row.get("count");
+
+ (items, total)
+ } else {
+ // フィルターあり - 動的クエリを使用
+ let query_str = format!(
+ r#"
+ SELECT
+ id, name, label_id, model_number, remarks, purchase_year,
+ purchase_amount, durability_years, is_depreciation_target,
+ connection_names, cable_color_pattern, storage_locations,
+ is_on_loan, qr_code_type, is_disposed, image_url,
+ created_at, updated_at
+ FROM items
+ {}
+ ORDER BY created_at DESC
+ LIMIT ? OFFSET ?
+ "#,
+ where_clause
+ );
+
+ let count_query_str = format!("SELECT COUNT(*) as count FROM items {}", where_clause);
+
+ // パラメーターをバインドするためのヘルパー関数
+ let mut query = sqlx::query(&query_str);
+ let mut count_query = sqlx::query(&count_query_str);
+
+ // 検索条件
+ if let Some(search_term) = &search {
+ let search_pattern = format!("%{}%", search_term);
+ query = query.bind(search_pattern.clone()).bind(search_pattern.clone()).bind(search_pattern.clone()).bind(search_pattern.clone());
+ count_query = count_query.bind(search_pattern.clone()).bind(search_pattern.clone()).bind(search_pattern.clone()).bind(search_pattern);
+ }
+
+ // 貸出状態フィルター
+ if let Some(loan_status) = is_on_loan {
+ let loan_value = if loan_status { 1i32 } else { 0i32 };
+ query = query.bind(loan_value);
+ count_query = count_query.bind(loan_value);
+ }
+
+ // 廃棄状態フィルター
+ if let Some(disposed_status) = is_disposed {
+ let disposed_value = if disposed_status { 1i32 } else { 0i32 };
+ query = query.bind(disposed_value);
+ count_query = count_query.bind(disposed_value);
+ }
+
+ // LIMIT/OFFSETをバインド
+ query = query.bind(limit).bind(offset);
+
+ let rows = query.fetch_all(pool).await?;
+ let items: Vec
- = rows.into_iter()
+ .map(|row| self.row_to_item(row))
+ .collect();
+
+ let count_row = count_query.fetch_one(pool).await?;
+ let total: i64 = count_row.get("count");
+
+ (items, total)
+ };
+
+ Ok(ItemsListResponse {
+ items,
+ total,
+ page,
+ per_page,
+ })
+ }
+ }
+ }
+
+ pub async fn update_item(&self, id: i64, req: UpdateItemRequest) -> AppResult
- {
+ match &self.db {
+ DatabasePool::Postgres(_pool) => {
+ Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
+ }
+ DatabasePool::Sqlite(pool) => {
+ // まず物品が存在するかチェック
+ let _existing_item = self.get_item(id).await?;
+
+ // JSON配列フィールドをシリアライズ
+ let connection_names_json = req.connection_names
+ .as_ref()
+ .map(|names| serde_json::to_string(names))
+ .transpose()
+ .map_err(|e| AppError::InternalServerError(format!("Failed to serialize connection_names: {}", e)))?;
+
+ let cable_color_pattern_json = req.cable_color_pattern
+ .as_ref()
+ .map(|pattern| serde_json::to_string(pattern))
+ .transpose()
+ .map_err(|e| AppError::InternalServerError(format!("Failed to serialize cable_color_pattern: {}", e)))?;
+
+ let storage_locations_json = req.storage_locations
+ .as_ref()
+ .map(|locations| serde_json::to_string(locations))
+ .transpose()
+ .map_err(|e| AppError::InternalServerError(format!("Failed to serialize storage_locations: {}", e)))?;
+
+ let now = chrono::Utc::now();
+
+ sqlx::query!(
+ r#"
+ UPDATE items SET
+ name = COALESCE(?2, name),
+ label_id = COALESCE(?3, label_id),
+ model_number = COALESCE(?4, model_number),
+ remarks = COALESCE(?5, remarks),
+ purchase_year = COALESCE(?6, purchase_year),
+ purchase_amount = COALESCE(?7, purchase_amount),
+ durability_years = COALESCE(?8, durability_years),
+ is_depreciation_target = COALESCE(?9, is_depreciation_target),
+ connection_names = COALESCE(?10, connection_names),
+ cable_color_pattern = COALESCE(?11, cable_color_pattern),
+ storage_locations = COALESCE(?12, storage_locations),
+ qr_code_type = COALESCE(?13, qr_code_type),
+ image_url = COALESCE(?14, image_url),
+ updated_at = ?15
+ WHERE id = ?1
+ "#,
+ id,
+ req.name,
+ req.label_id,
+ req.model_number,
+ req.remarks,
+ req.purchase_year,
+ req.purchase_amount,
+ req.durability_years,
+ req.is_depreciation_target,
+ connection_names_json,
+ cable_color_pattern_json,
+ storage_locations_json,
+ req.qr_code_type,
+ req.image_url,
+ now
+ )
+ .execute(pool)
+ .await?;
+
+ // 更新後の物品を取得して返す
+ self.get_item(id).await
+ }
+ }
+ }
+
+ pub async fn delete_item(&self, id: i64) -> AppResult<()> {
+ match &self.db {
+ DatabasePool::Postgres(_pool) => {
+ Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
+ }
+ DatabasePool::Sqlite(pool) => {
+ // まず物品が存在し、貸出中でないかチェック
+ let item = self.get_item(id).await?;
+
+ if item.is_on_loan.unwrap_or(false) {
+ return Err(AppError::BadRequest("Cannot delete item that is currently on loan".to_string()));
+ }
+
+ // アクティブな貸出がないかチェック
+ let active_loans = sqlx::query!(
+ "SELECT COUNT(*) as count FROM loans WHERE item_id = ?1 AND return_date IS NULL",
+ id
+ )
+ .fetch_one(pool)
+ .await?;
+
+ if active_loans.count > 0 {
+ return Err(AppError::BadRequest("Cannot delete item with active loans".to_string()));
+ }
+
+ let result = sqlx::query("DELETE FROM items WHERE id = ?1")
+ .bind(id)
+ .execute(pool)
+ .await?;
+
+ if result.rows_affected() == 0 {
+ return Err(AppError::NotFound(format!("Item with id {} not found", id)));
+ }
+ Ok(())
+ }
+ }
+ }
+
+ pub async fn dispose_item(&self, id: i64) -> AppResult
- {
+ match &self.db {
+ DatabasePool::Postgres(_pool) => {
+ Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
+ }
+ DatabasePool::Sqlite(pool) => {
+ let now = Utc::now();
+ let result = sqlx::query("UPDATE items SET is_disposed = 1, updated_at = ?2 WHERE id = ?1")
+ .bind(id)
+ .bind(now)
+ .execute(pool)
+ .await?;
+
+ if result.rows_affected() == 0 {
+ return Err(AppError::NotFound(format!("Item with id {} not found", id)));
+ }
+
+ self.get_item(id).await
+ }
+ }
+ }
+
+ pub async fn undispose_item(&self, id: i64) -> AppResult
- {
+ match &self.db {
+ DatabasePool::Postgres(_pool) => {
+ Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
+ }
+ DatabasePool::Sqlite(pool) => {
+ let now = Utc::now();
+ let result = sqlx::query("UPDATE items SET is_disposed = 0, updated_at = ?2 WHERE id = ?1")
+ .bind(id)
+ .bind(now)
+ .execute(pool)
+ .await?;
+
+ if result.rows_affected() == 0 {
+ return Err(AppError::NotFound(format!("Item with id {} not found", id)));
+ }
+
+ self.get_item(id).await
+ }
+ }
+ }
+
+ pub async fn get_connection_names_suggestions(&self) -> AppResult> {
+ match &self.db {
+ DatabasePool::Postgres(_pool) => {
+ Err(AppError::InternalServerError("PostgreSQL support not implemented yet".to_string()))
+ }
+ DatabasePool::Sqlite(pool) => {
+ let rows = sqlx::query("SELECT DISTINCT connection_names FROM items WHERE connection_names IS NOT NULL AND connection_names != ''")
+ .fetch_all(pool)
+ .await?;
+
+ let mut suggestions = Vec::new();
+ for row in rows {
+ if let Some(json_str) = row.get::