Compare commits

...

8 Commits

Author SHA1 Message Date
spinline
4b3e713657 refactor: move DB to shared crate, convert push endpoints to server functions, remove dead REST handlers
All checks were successful
Build MIPS Binary / build (push) Successful in 5m17s
2026-02-10 02:05:04 +03:00
spinline
c2bf6e6fd5 fix: remove unnecessary SSE user auth check - login already guards access
All checks were successful
Build MIPS Binary / build (push) Successful in 5m17s
2026-02-10 01:43:40 +03:00
spinline
94bc7cb91d fix: patch coarsetime with portable-atomic for MIPS AtomicU64 support
All checks were successful
Build MIPS Binary / build (push) Successful in 5m18s
2026-02-10 01:29:06 +03:00
spinline
5e1f4b18c2 refactor: migrate torrent/settings endpoints to Leptos Server Functions and remove third_party/coarsetime
Some checks failed
Build MIPS Binary / build (push) Failing after 4m15s
2026-02-10 00:27:39 +03:00
spinline
3f370389aa fix: SSE auth by capturing store context before async block and redirect to setup when needed 2026-02-10 00:12:08 +03:00
spinline
a4fe8d065c debug: log user value in SSE loop
All checks were successful
Build MIPS Binary / build (push) Successful in 5m21s
2026-02-09 23:55:57 +03:00
spinline
3215b38272 fix: add missing spawn_local import
All checks were successful
Build MIPS Binary / build (push) Successful in 5m20s
2026-02-09 23:48:52 +03:00
spinline
8eb594e804 refactor: move SSE logic to spawn_local with continuous user check
Some checks failed
Build MIPS Binary / build (push) Failing after 1m15s
2026-02-09 22:48:00 +03:00
37 changed files with 933 additions and 1680 deletions

3
Cargo.lock generated
View File

@@ -330,7 +330,6 @@ dependencies = [
"serde",
"serde_json",
"shared",
"sqlx",
"thiserror 2.0.18",
"time",
"tokio",
@@ -3651,12 +3650,14 @@ dependencies = [
name = "shared"
version = "0.1.0"
dependencies = [
"anyhow",
"bytes",
"leptos",
"leptos_axum",
"leptos_router",
"quick-xml",
"serde",
"sqlx",
"thiserror 2.0.18",
"tokio",
"utoipa",

View File

@@ -17,4 +17,5 @@ strip = true
incremental = false
[patch.crates-io]
coarsetime = { path = "third_party/coarsetime" }
coarsetime = { path = "patches/coarsetime" }

View File

@@ -33,7 +33,6 @@ utoipa-swagger-ui = { version = "9.0", features = ["axum"], optional = true }
web-push = { version = "0.10", default-features = false, features = ["hyper-client"], optional = true }
base64 = "0.22"
openssl = { version = "0.10", features = ["vendored"], optional = true }
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite"] }
bcrypt = "0.17.0"
axum-extra = { version = "0.10", features = ["cookie"] }
rand = "0.8"

View File

@@ -1,13 +1,4 @@
use shared::{
xmlrpc::{self, RpcParam},
AddTorrentRequest, GlobalLimitRequest, SetFilePriorityRequest, SetLabelRequest, TorrentActionRequest,
TorrentFile, TorrentPeer, TorrentTracker,
};
use crate::AppState;
#[cfg(feature = "push-notifications")]
use crate::push;
use axum::{
extract::{Json, Path, State},
http::{header, StatusCode, Uri},
response::IntoResponse,
BoxError,
@@ -37,7 +28,6 @@ pub async fn static_handler(uri: Uri) -> impl IntoResponse {
if path.contains('.') {
return StatusCode::NOT_FOUND.into_response();
}
// Fallback to index.html for SPA routing
match Asset::get("index.html") {
Some(content) => {
let mime = mime_guess::from_path("index.html").first_or_octet_stream();
@@ -49,614 +39,6 @@ pub async fn static_handler(uri: Uri) -> impl IntoResponse {
}
}
// --- TORRENT ACTIONS ---
/// Add a new torrent via magnet link or URL
#[utoipa::path(
post,
path = "/api/torrents/add",
request_body = AddTorrentRequest,
responses(
(status = 200, description = "Torrent added successfully"),
(status = 500, description = "Internal server error or rTorrent fault")
)
)]
pub async fn add_torrent_handler(
State(state): State<AppState>,
Json(payload): Json<AddTorrentRequest>,
) -> StatusCode {
tracing::info!(
"Received add_torrent request. URI length: {}",
payload.uri.len()
);
let client = xmlrpc::RtorrentClient::new(&state.scgi_socket_path);
let params = vec![RpcParam::from(""), RpcParam::from(payload.uri.as_str())];
match client.call("load.start", &params).await {
Ok(response) => {
tracing::debug!("rTorrent response to load.start: {}", response);
if response.contains("faultCode") {
tracing::error!("rTorrent returned fault: {}", response);
return StatusCode::INTERNAL_SERVER_ERROR;
}
// Note: Frontend shows its own toast, no SSE notification needed
StatusCode::OK
}
Err(e) => {
tracing::error!("Failed to add torrent: {}", e);
// Note: Frontend shows its own toast, no SSE notification needed
StatusCode::INTERNAL_SERVER_ERROR
}
}
}
/// Perform an action on a torrent (start, stop, delete)
#[utoipa::path(
post,
path = "/api/torrents/action",
request_body = TorrentActionRequest,
responses(
(status = 200, description = "Action executed successfully"),
(status = 400, description = "Invalid action or request"),
(status = 403, description = "Forbidden: Security risk detected"),
(status = 500, description = "Internal server error")
)
)]
pub async fn handle_torrent_action(
State(state): State<AppState>,
Json(payload): Json<TorrentActionRequest>,
) -> impl IntoResponse {
tracing::info!(
"Received action: {} for hash: {}",
payload.action,
payload.hash
);
let client = xmlrpc::RtorrentClient::new(&state.scgi_socket_path);
// Special handling for delete_with_data
if payload.action == "delete_with_data" {
return match delete_torrent_with_data(&client, &payload.hash).await {
Ok(msg) => {
// Note: Frontend shows its own toast
(StatusCode::OK, msg).into_response()
}
Err((status, msg)) => (status, msg).into_response(),
};
}
let method = match payload.action.as_str() {
"start" => "d.start",
"stop" => "d.stop",
"delete" => "d.erase",
_ => return (StatusCode::BAD_REQUEST, "Invalid action").into_response(),
};
let params = vec![RpcParam::from(payload.hash.as_str())];
match client.call(method, &params).await {
Ok(_) => {
// Note: Frontend shows its own toast, no SSE notification needed
(StatusCode::OK, "Action executed").into_response()
}
Err(e) => {
tracing::error!("RPC error: {}", e);
(
StatusCode::INTERNAL_SERVER_ERROR,
"Failed to execute action",
)
.into_response()
}
}
}
/// Helper function to handle secure deletion of torrent data
async fn delete_torrent_with_data(
client: &xmlrpc::RtorrentClient,
hash: &str,
) -> Result<&'static str, (StatusCode, String)> {
let params_hash = vec![RpcParam::from(hash)];
// 1. Get Base Path
let path_xml = client
.call("d.base_path", &params_hash)
.await
.map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to call rTorrent: {}", e),
)
})?;
let path = xmlrpc::parse_string_response(&path_xml).map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to parse path: {}", e),
)
})?;
// 1.5 Get Default Download Directory (Sandbox Root)
let root_xml = client.call("directory.default", &[]).await.map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to get valid download root: {}", e),
)
})?;
let root_path_str = xmlrpc::parse_string_response(&root_xml).map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to parse root path: {}", e),
)
})?;
// Resolve Paths (Canonicalize) to prevent .. traversal and symlink attacks
let root_path = tokio::fs::canonicalize(std::path::Path::new(&root_path_str))
.await
.map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Invalid download root configuration (on server): {}", e),
)
})?;
// Check if target path exists before trying to resolve it
let target_path_raw = std::path::Path::new(&path);
if !tokio::fs::try_exists(target_path_raw)
.await
.unwrap_or(false)
{
tracing::warn!(
"Data path not found: {:?}. Removing torrent only.",
target_path_raw
);
// If file doesn't exist, we just remove the torrent entry
client.call("d.erase", &params_hash).await.map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to erase torrent: {}", e),
)
})?;
return Ok("Torrent removed (Data not found)");
}
let target_path = tokio::fs::canonicalize(target_path_raw)
.await
.map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Invalid data path: {}", e),
)
})?;
tracing::info!(
"Delete request: Target='{:?}', Root='{:?}'",
target_path,
root_path
);
// SECURITY CHECK: Ensure path is inside root_path
if !target_path.starts_with(&root_path) {
tracing::error!(
"Security Risk: Attempted to delete path outside download directory: {:?}",
target_path
);
return Err((
StatusCode::FORBIDDEN,
"Security Error: Cannot delete files outside default download directory".to_string(),
));
}
// SECURITY CHECK: Ensure we are not deleting the root itself
if target_path == root_path {
return Err((
StatusCode::BAD_REQUEST,
"Security Error: Cannot delete the download root directory itself".to_string(),
));
}
// 2. Erase Torrent first
client.call("d.erase", &params_hash).await.map_err(|e| {
tracing::warn!("Failed to erase torrent entry: {}", e);
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to erase torrent: {}", e),
)
})?;
// 3. Delete Files via Native FS
let delete_result = if target_path.is_dir() {
tokio::fs::remove_dir_all(&target_path).await
} else {
tokio::fs::remove_file(&target_path).await
};
match delete_result {
Ok(_) => Ok("Torrent and data deleted"),
Err(e) => {
tracing::error!("Failed to delete data at {:?}: {}", target_path, e);
Err((
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to delete data: {}", e),
))
}
}
}
// --- NEW HANDLERS ---
/// Get rTorrent version
#[utoipa::path(
get,
path = "/api/system/version",
responses(
(status = 200, description = "rTorrent version", body = String),
(status = 500, description = "Internal server error")
)
)]
pub async fn get_version_handler(State(state): State<AppState>) -> impl IntoResponse {
let client = xmlrpc::RtorrentClient::new(&state.scgi_socket_path);
match client.call("system.client_version", &[]).await {
Ok(xml) => {
let version = xmlrpc::parse_string_response(&xml).unwrap_or(xml);
(StatusCode::OK, version).into_response()
}
Err(e) => {
tracing::error!("Failed to get version: {}", e);
(StatusCode::INTERNAL_SERVER_ERROR, "Failed to get version").into_response()
}
}
}
/// Get files for a torrent
#[utoipa::path(
get,
path = "/api/torrents/{hash}/files",
responses(
(status = 200, description = "Files list", body = Vec<TorrentFile>),
(status = 500, description = "Internal server error")
),
params(
("hash" = String, Path, description = "Torrent Hash")
)
)]
pub async fn get_files_handler(
State(state): State<AppState>,
Path(hash): Path<String>,
) -> impl IntoResponse {
let client = xmlrpc::RtorrentClient::new(&state.scgi_socket_path);
let params = vec![
RpcParam::from(hash.as_str()),
RpcParam::from(""),
RpcParam::from("f.path="),
RpcParam::from("f.size_bytes="),
RpcParam::from("f.completed_chunks="),
RpcParam::from("f.priority="),
];
match client.call("f.multicall", &params).await {
Ok(xml) => match xmlrpc::parse_multicall_response(&xml) {
Ok(rows) => {
let files: Vec<TorrentFile> = rows
.into_iter()
.enumerate()
.map(|(idx, row)| TorrentFile {
index: idx as u32,
path: row.get(0).cloned().unwrap_or_default(),
size: row.get(1).and_then(|s| s.parse().ok()).unwrap_or(0),
completed_chunks: row.get(2).and_then(|s| s.parse().ok()).unwrap_or(0),
priority: row.get(3).and_then(|s| s.parse().ok()).unwrap_or(0),
})
.collect();
(StatusCode::OK, Json(files)).into_response()
}
Err(e) => (
StatusCode::INTERNAL_SERVER_ERROR,
format!("Parse error: {}", e),
)
.into_response(),
},
Err(e) => (
StatusCode::INTERNAL_SERVER_ERROR,
format!("RPC error: {}", e),
)
.into_response(),
}
}
/// Get peers for a torrent
#[utoipa::path(
get,
path = "/api/torrents/{hash}/peers",
responses(
(status = 200, description = "Peers list", body = Vec<TorrentPeer>),
(status = 500, description = "Internal server error")
),
params(
("hash" = String, Path, description = "Torrent Hash")
)
)]
pub async fn get_peers_handler(
State(state): State<AppState>,
Path(hash): Path<String>,
) -> impl IntoResponse {
let client = xmlrpc::RtorrentClient::new(&state.scgi_socket_path);
let params = vec![
RpcParam::from(hash.as_str()),
RpcParam::from(""),
RpcParam::from("p.address="),
RpcParam::from("p.client_version="),
RpcParam::from("p.down_rate="),
RpcParam::from("p.up_rate="),
RpcParam::from("p.completed_percent="),
];
match client.call("p.multicall", &params).await {
Ok(xml) => match xmlrpc::parse_multicall_response(&xml) {
Ok(rows) => {
let peers: Vec<TorrentPeer> = rows
.into_iter()
.map(|row| TorrentPeer {
ip: row.get(0).cloned().unwrap_or_default(),
client: row.get(1).cloned().unwrap_or_default(),
down_rate: row.get(2).and_then(|s| s.parse().ok()).unwrap_or(0),
up_rate: row.get(3).and_then(|s| s.parse().ok()).unwrap_or(0),
progress: row.get(4).and_then(|s| s.parse().ok()).unwrap_or(0.0),
})
.collect();
(StatusCode::OK, Json(peers)).into_response()
}
Err(e) => (
StatusCode::INTERNAL_SERVER_ERROR,
format!("Parse error: {}", e),
)
.into_response(),
},
Err(e) => (
StatusCode::INTERNAL_SERVER_ERROR,
format!("RPC error: {}", e),
)
.into_response(),
}
}
/// Get trackers for a torrent
#[utoipa::path(
get,
path = "/api/torrents/{hash}/trackers",
responses(
(status = 200, description = "Trackers list", body = Vec<TorrentTracker>),
(status = 500, description = "Internal server error")
),
params(
("hash" = String, Path, description = "Torrent Hash")
)
)]
pub async fn get_trackers_handler(
State(state): State<AppState>,
Path(hash): Path<String>,
) -> impl IntoResponse {
let client = xmlrpc::RtorrentClient::new(&state.scgi_socket_path);
let params = vec![
RpcParam::from(hash.as_str()),
RpcParam::from(""),
RpcParam::from("t.url="),
RpcParam::from("t.activity_date_last="),
RpcParam::from("t.message="),
];
match client.call("t.multicall", &params).await {
Ok(xml) => {
match xmlrpc::parse_multicall_response(&xml) {
Ok(rows) => {
let trackers: Vec<TorrentTracker> = rows
.into_iter()
.map(|row| {
TorrentTracker {
url: row.get(0).cloned().unwrap_or_default(),
status: "Unknown".to_string(), // Derive from type/activity?
message: row.get(2).cloned().unwrap_or_default(),
}
})
.collect();
(StatusCode::OK, Json(trackers)).into_response()
}
Err(e) => (
StatusCode::INTERNAL_SERVER_ERROR,
format!("Parse error: {}", e),
)
.into_response(),
}
}
Err(e) => (
StatusCode::INTERNAL_SERVER_ERROR,
format!("RPC error: {}", e),
)
.into_response(),
}
}
/// Set file priority
#[utoipa::path(
post,
path = "/api/torrents/files/priority",
request_body = SetFilePriorityRequest,
responses(
(status = 200, description = "Priority updated"),
(status = 500, description = "Internal server error")
)
)]
pub async fn set_file_priority_handler(
State(state): State<AppState>,
Json(payload): Json<SetFilePriorityRequest>,
) -> impl IntoResponse {
let client = xmlrpc::RtorrentClient::new(&state.scgi_socket_path);
// f.set_priority takes "hash", index, priority
// Priority: 0 (off), 1 (normal), 2 (high)
// f.set_priority is tricky. Let's send as string first as before, or int if we knew.
// Usually priorities are small integers.
// But since we are updating everything to RpcParam, let's use Int if possible or String.
// The previous implementation used string. Let's stick to string for now or try Int.
// Actually, f.set_priority likely takes an integer.
let target = format!("{}:f{}", payload.hash, payload.file_index);
let params = vec![
RpcParam::from(target.as_str()),
RpcParam::from(payload.priority as i64),
];
match client.call("f.set_priority", &params).await {
Ok(_) => {
let _ = client
.call(
"d.update_priorities",
&[RpcParam::from(payload.hash.as_str())],
)
.await;
(StatusCode::OK, "Priority updated").into_response()
}
Err(e) => (
StatusCode::INTERNAL_SERVER_ERROR,
format!("RPC error: {}", e),
)
.into_response(),
}
}
/// Set torrent label
#[utoipa::path(
post,
path = "/api/torrents/label",
request_body = SetLabelRequest,
responses(
(status = 200, description = "Label updated"),
(status = 500, description = "Internal server error")
)
)]
pub async fn set_label_handler(
State(state): State<AppState>,
Json(payload): Json<SetLabelRequest>,
) -> impl IntoResponse {
let client = xmlrpc::RtorrentClient::new(&state.scgi_socket_path);
let params = vec![
RpcParam::from(payload.hash.as_str()),
RpcParam::from(payload.label),
];
match client.call("d.custom1.set", &params).await {
Ok(_) => (StatusCode::OK, "Label updated").into_response(),
Err(e) => (
StatusCode::INTERNAL_SERVER_ERROR,
format!("RPC error: {}", e),
)
.into_response(),
}
}
/// Get global speed limits
#[utoipa::path(
get,
path = "/api/settings/global-limits",
responses(
(status = 200, description = "Current limits", body = GlobalLimitRequest),
(status = 500, description = "Internal server error")
)
)]
pub async fn get_global_limit_handler(State(state): State<AppState>) -> impl IntoResponse {
let client = xmlrpc::RtorrentClient::new(&state.scgi_socket_path);
// throttle.global_down.max_rate, throttle.global_up.max_rate
let down_fut = client.call("throttle.global_down.max_rate", &[]);
let up_fut = client.call("throttle.global_up.max_rate", &[]);
let down = match down_fut.await {
Ok(xml) => xmlrpc::parse_i64_response(&xml).unwrap_or(0),
Err(_) => -1,
};
let up = match up_fut.await {
Ok(xml) => xmlrpc::parse_i64_response(&xml).unwrap_or(0),
Err(_) => -1,
};
let resp = GlobalLimitRequest {
max_download_rate: Some(down),
max_upload_rate: Some(up),
};
(StatusCode::OK, Json(resp)).into_response()
}
/// Set global speed limits
#[utoipa::path(
post,
path = "/api/settings/global-limits",
request_body = GlobalLimitRequest,
responses(
(status = 200, description = "Limits updated"),
(status = 500, description = "Internal server error")
)
)]
pub async fn set_global_limit_handler(
State(state): State<AppState>,
Json(payload): Json<GlobalLimitRequest>,
) -> impl IntoResponse {
let client = xmlrpc::RtorrentClient::new(&state.scgi_socket_path);
// Use throttle.global_*.max_rate.set_kb which is more reliable than .set (which is buggy)
// The .set_kb method expects KB/s, so we convert bytes to KB
if let Some(down) = payload.max_download_rate {
// Convert bytes/s to KB/s (divide by 1024)
let down_kb = down / 1024;
tracing::info!(
"Setting download limit: {} bytes/s = {} KB/s",
down,
down_kb
);
// Use set_kb with empty string as first param (throttle name), then value
if let Err(e) = client
.call(
"throttle.global_down.max_rate.set_kb",
&[RpcParam::from(""), RpcParam::Int(down_kb)],
)
.await
{
tracing::error!("Failed to set download limit: {}", e);
return (
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to set down limit: {}", e),
)
.into_response();
}
}
if let Some(up) = payload.max_upload_rate {
// Convert bytes/s to KB/s
let up_kb = up / 1024;
tracing::info!("Setting upload limit: {} bytes/s = {} KB/s", up, up_kb);
if let Err(e) = client
.call(
"throttle.global_up.max_rate.set_kb",
&[RpcParam::from(""), RpcParam::Int(up_kb)],
)
.await
{
tracing::error!("Failed to set upload limit: {}", e);
return (
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to set up limit: {}", e),
)
.into_response();
}
}
(StatusCode::OK, "Limits updated").into_response()
}
pub async fn handle_timeout_error(err: BoxError) -> (StatusCode, &'static str) {
if err.is::<tower::timeout::error::Elapsed>() {
(StatusCode::REQUEST_TIMEOUT, "Request timed out")
@@ -667,44 +49,3 @@ pub async fn handle_timeout_error(err: BoxError) -> (StatusCode, &'static str) {
)
}
}
// --- PUSH NOTIFICATION HANDLERS ---
#[cfg(feature = "push-notifications")]
/// Get VAPID public key for push subscription
#[utoipa::path(
get,
path = "/api/push/public-key",
responses(
(status = 200, description = "VAPID public key", body = String)
)
)]
pub async fn get_push_public_key_handler(
State(state): State<AppState>,
) -> impl IntoResponse {
let public_key = state.push_store.get_public_key();
(StatusCode::OK, Json(serde_json::json!({ "publicKey": public_key }))).into_response()
}
#[cfg(feature = "push-notifications")]
/// Subscribe to push notifications
#[utoipa::path(
post,
path = "/api/push/subscribe",
request_body = push::PushSubscription,
responses(
(status = 200, description = "Subscription saved"),
(status = 400, description = "Invalid subscription data")
)
)]
pub async fn subscribe_push_handler(
State(state): State<AppState>,
Json(subscription): Json<push::PushSubscription>,
) -> impl IntoResponse {
tracing::info!("Received push subscription: {:?}", subscription);
state.push_store.add_subscription(subscription).await;
(StatusCode::OK, "Subscription saved").into_response()
}

View File

@@ -1,4 +1,3 @@
mod db;
mod diff;
mod handlers;
#[cfg(feature = "push-notifications")]
@@ -42,7 +41,7 @@ pub struct AppState {
pub tx: Arc<watch::Sender<Vec<Torrent>>>,
pub event_bus: broadcast::Sender<AppEvent>,
pub scgi_socket_path: String,
pub db: db::Db,
pub db: shared::db::Db,
#[cfg(feature = "push-notifications")]
pub push_store: push::PushSubscriptionStore,
pub notify_poll: Arc<tokio::sync::Notify>,
@@ -103,69 +102,9 @@ struct Args {
}
#[cfg(feature = "swagger")]
#[cfg(feature = "push-notifications")]
#[derive(OpenApi)]
#[openapi(
paths(
handlers::add_torrent_handler,
handlers::handle_torrent_action,
handlers::get_version_handler,
handlers::get_files_handler,
handlers::get_peers_handler,
handlers::get_trackers_handler,
handlers::set_file_priority_handler,
handlers::set_label_handler,
handlers::get_global_limit_handler,
handlers::set_global_limit_handler,
handlers::get_push_public_key_handler,
handlers::subscribe_push_handler,
handlers::auth::login_handler,
handlers::auth::logout_handler,
handlers::auth::check_auth_handler,
handlers::setup::setup_handler,
handlers::setup::get_setup_status_handler
),
components(
schemas(
shared::AddTorrentRequest,
shared::TorrentActionRequest,
shared::Torrent,
shared::TorrentStatus,
shared::TorrentFile,
shared::TorrentPeer,
shared::TorrentTracker,
shared::SetFilePriorityRequest,
shared::SetLabelRequest,
shared::GlobalLimitRequest,
push::PushSubscription,
push::PushKeys,
handlers::auth::LoginRequest,
handlers::setup::SetupRequest,
handlers::setup::SetupStatusResponse,
handlers::auth::UserResponse
)
),
tags(
(name = "vibetorrent", description = "VibeTorrent API")
)
)]
struct ApiDoc;
#[cfg(feature = "swagger")]
#[cfg(not(feature = "push-notifications"))]
#[derive(OpenApi)]
#[openapi(
paths(
handlers::add_torrent_handler,
handlers::handle_torrent_action,
handlers::get_version_handler,
handlers::get_files_handler,
handlers::get_peers_handler,
handlers::get_trackers_handler,
handlers::set_file_priority_handler,
handlers::set_label_handler,
handlers::get_global_limit_handler,
handlers::set_global_limit_handler,
handlers::auth::login_handler,
handlers::auth::logout_handler,
handlers::auth::check_auth_handler,
@@ -226,7 +165,7 @@ async fn main() {
}
}
let db: db::Db = match db::Db::new(&args.db_url).await {
let db: shared::db::Db = match shared::db::Db::new(&args.db_url).await {
Ok(db) => db,
Err(e) => {
tracing::error!("Failed to connect to database: {}", e);
@@ -336,7 +275,7 @@ async fn main() {
};
#[cfg(not(feature = "push-notifications"))]
let push_store = ();
let _push_store = ();
let notify_poll = Arc::new(tokio::sync::Notify::new());
@@ -488,7 +427,9 @@ async fn main() {
#[cfg(feature = "swagger")]
let app = app.merge(SwaggerUi::new("/swagger-ui").url("/api-docs/openapi.json", ApiDoc::openapi()));
// Setup & Auth Routes
// Setup & Auth Routes (cookie-based, stay as REST)
let scgi_path_for_ctx = args.socket.clone();
let db_for_ctx = db.clone();
let app = app
.route("/api/setup/status", get(handlers::setup::get_setup_status_handler))
.route("/api/setup", post(handlers::setup::setup_handler))
@@ -500,42 +441,27 @@ async fn main() {
)
.route("/api/auth/logout", post(handlers::auth::logout_handler))
.route("/api/auth/check", get(handlers::auth::check_auth_handler))
// App Routes
.route("/api/events", get(sse::sse_handler))
.route("/api/torrents/add", post(handlers::add_torrent_handler))
.route(
"/api/torrents/action",
post(handlers::handle_torrent_action),
)
.route("/api/system/version", get(handlers::get_version_handler))
.route(
"/api/torrents/{hash}/files",
get(handlers::get_files_handler),
)
.route(
"/api/torrents/{hash}/peers",
get(handlers::get_peers_handler),
)
.route(
"/api/torrents/{hash}/trackers",
get(handlers::get_trackers_handler),
)
.route(
"/api/torrents/files/priority",
post(handlers::set_file_priority_handler),
)
.route("/api/torrents/label", post(handlers::set_label_handler))
.route(
"/api/settings/global-limits",
get(handlers::get_global_limit_handler).post(handlers::set_global_limit_handler),
)
.route("/api/server_fns/{*fn_name}", post(leptos_axum::handle_server_fns))
.fallback(handlers::static_handler); // Serve static files for everything else
#[cfg(feature = "push-notifications")]
let app = app
.route("/api/push/public-key", get(handlers::get_push_public_key_handler))
.route("/api/push/subscribe", post(handlers::subscribe_push_handler));
.route("/api/server_fns/{*fn_name}", post({
let scgi_path = scgi_path_for_ctx.clone();
let db = db_for_ctx.clone();
move |req: Request<Body>| {
let scgi_path = scgi_path.clone();
let db = db.clone();
leptos_axum::handle_server_fns_with_context(
move || {
leptos::context::provide_context(shared::ServerContext {
scgi_socket_path: scgi_path.clone(),
});
leptos::context::provide_context(shared::DbContext {
db: db.clone(),
});
},
req,
)
}
}))
.fallback(handlers::static_handler);
let app = app
.layer(middleware::from_fn_with_state(app_state.clone(), auth_middleware))

View File

@@ -7,7 +7,7 @@ use web_push::{
};
use futures::StreamExt;
use crate::db::Db;
use shared::db::Db;
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
pub struct PushSubscription {

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,4 @@
use gloo_net::http::Request;
use shared::{AddTorrentRequest, TorrentActionRequest};
use thiserror::Error;
#[derive(Debug, Error)]
@@ -14,6 +13,8 @@ pub enum ApiError {
Unauthorized,
#[error("Too many requests")]
RateLimited,
#[error("Server function error: {0}")]
ServerFn(String),
}
fn base_url() -> String {
@@ -130,37 +131,32 @@ pub mod settings {
use shared::GlobalLimitRequest;
pub async fn set_global_limits(req: &GlobalLimitRequest) -> Result<(), ApiError> {
Request::post(&format!("{}/settings/global-limits", base_url()))
.json(req)
.map_err(|_| ApiError::Network)?
.send()
.await
.map_err(|_| ApiError::Network)?;
Ok(())
shared::server_fns::settings::set_global_limits(
req.max_download_rate,
req.max_upload_rate,
)
.await
.map_err(|e| ApiError::ServerFn(e.to_string()))
}
}
pub mod push {
use super::*;
use crate::store::PushSubscriptionData;
pub async fn get_public_key() -> Result<String, ApiError> {
let resp = Request::get(&format!("{}/push/public-key", base_url()))
.send()
shared::server_fns::push::get_public_key()
.await
.map_err(|_| ApiError::Network)?;
let key = resp.text().await.map_err(|_| ApiError::Network)?;
Ok(key)
.map_err(|e| ApiError::ServerFn(e.to_string()))
}
pub async fn subscribe(req: &PushSubscriptionData) -> Result<(), ApiError> {
Request::post(&format!("{}/push/subscribe", base_url()))
.json(req)
.map_err(|_| ApiError::Network)?
.send()
.await
.map_err(|_| ApiError::Network)?;
Ok(())
pub async fn subscribe(endpoint: &str, p256dh: &str, auth: &str) -> Result<(), ApiError> {
shared::server_fns::push::subscribe_push(
endpoint.to_string(),
p256dh.to_string(),
auth.to_string(),
)
.await
.map_err(|e| ApiError::ServerFn(e.to_string()))
}
}
@@ -168,30 +164,16 @@ pub mod torrent {
use super::*;
pub async fn add(uri: &str) -> Result<(), ApiError> {
let req = AddTorrentRequest {
uri: uri.to_string(),
};
Request::post(&format!("{}/torrents/add", base_url()))
.json(&req)
.map_err(|_| ApiError::Network)?
.send()
shared::server_fns::torrent::add_torrent(uri.to_string())
.await
.map_err(|_| ApiError::Network)?;
Ok(())
.map_err(|e| ApiError::ServerFn(e.to_string()))
}
pub async fn action(hash: &str, action: &str) -> Result<(), ApiError> {
let req = TorrentActionRequest {
hash: hash.to_string(),
action: action.to_string(),
};
Request::post(&format!("{}/torrents/action", base_url()))
.json(&req)
.map_err(|_| ApiError::Network)?
.send()
shared::server_fns::torrent::torrent_action(hash.to_string(), action.to_string())
.await
.map_err(|_| ApiError::Network)?;
Ok(())
.map(|_| ())
.map_err(|e| ApiError::ServerFn(e.to_string()))
}
pub async fn delete(hash: &str) -> Result<(), ApiError> {
@@ -211,33 +193,18 @@ pub mod torrent {
}
pub async fn set_label(hash: &str, label: &str) -> Result<(), ApiError> {
use shared::SetLabelRequest;
let req = SetLabelRequest {
hash: hash.to_string(),
label: label.to_string(),
};
Request::post(&format!("{}/torrents/set_label", base_url()))
.json(&req)
.map_err(|_| ApiError::Network)?
.send()
shared::server_fns::torrent::set_label(hash.to_string(), label.to_string())
.await
.map_err(|_| ApiError::Network)?;
Ok(())
.map_err(|e| ApiError::ServerFn(e.to_string()))
}
pub async fn set_priority(hash: &str, file_index: u32, priority: u8) -> Result<(), ApiError> {
use shared::SetFilePriorityRequest;
let req = SetFilePriorityRequest {
hash: hash.to_string(),
shared::server_fns::torrent::set_file_priority(
hash.to_string(),
file_index,
priority,
};
Request::post(&format!("{}/torrents/set_priority", base_url()))
.json(&req)
.map_err(|_| ApiError::Network)?
.send()
.await
.map_err(|_| ApiError::Network)?;
Ok(())
)
.await
.map_err(|e| ApiError::ServerFn(e.to_string()))
}
}

View File

@@ -12,9 +12,11 @@ use leptos_router::hooks::use_navigate;
#[component]
pub fn App() -> impl IntoView {
crate::store::provide_torrent_store();
let store = use_context::<crate::store::TorrentStore>();
let is_loading = signal(true);
let is_authenticated = signal(false);
let needs_setup = signal(false);
Effect::new(move |_| {
spawn_local(async move {
@@ -26,6 +28,7 @@ pub fn App() -> impl IntoView {
Ok(status) => {
if !status.completed {
log::info!("Setup not completed");
needs_setup.1.set(true);
is_loading.1.set(false);
return;
}
@@ -40,8 +43,8 @@ pub fn App() -> impl IntoView {
log::info!("Authenticated!");
if let Ok(user_info) = api::auth::get_user().await {
if let Some(store) = use_context::<crate::store::TorrentStore>() {
store.user.set(Some(user_info.username));
if let Some(s) = store {
s.user.set(Some(user_info.username));
}
}
@@ -77,9 +80,13 @@ pub fn App() -> impl IntoView {
<Routes fallback=|| view! { <div class="p-4">"404 Not Found"</div> }>
<Route path=leptos_router::path!("/login") view=move || {
let authenticated = is_authenticated.0.get();
let setup_needed = needs_setup.0.get();
Effect::new(move |_| {
if authenticated {
if setup_needed {
let navigate = use_navigate();
navigate("/setup", Default::default());
} else if authenticated {
log::info!("Already authenticated, redirecting to home");
let navigate = use_navigate();
navigate("/", Default::default());
@@ -101,7 +108,11 @@ pub fn App() -> impl IntoView {
<Route path=leptos_router::path!("/") view=move || {
Effect::new(move |_| {
if !is_loading.0.get() && !is_authenticated.0.get() {
if !is_loading.0.get() && needs_setup.0.get() {
log::info!("Setup not completed, redirecting to setup");
let navigate = use_navigate();
navigate("/setup", Default::default());
} else if !is_loading.0.get() && !is_authenticated.0.get() {
log::info!("Not authenticated, redirecting to login");
let navigate = use_navigate();
navigate("/login", Default::default());

View File

@@ -1,6 +1,7 @@
use futures::StreamExt;
use gloo_net::eventsource::futures::EventSource;
use leptos::prelude::*;
use leptos::task::spawn_local;
use shared::{AppEvent, GlobalStats, NotificationLevel, SystemNotification, Torrent};
use std::collections::HashMap;
use serde::{Serialize, Deserialize};
@@ -11,10 +12,6 @@ pub struct NotificationItem {
pub notification: SystemNotification,
}
// ============================================================================
// Toast Helper Functions
// ============================================================================
pub fn show_toast_with_signal(
notifications: RwSignal<Vec<NotificationItem>>,
level: NotificationLevel,
@@ -29,7 +26,6 @@ pub fn show_toast_with_signal(
notifications.update(|list| list.push(item));
// Auto-remove after 5 seconds
leptos::prelude::set_timeout(
move || {
notifications.update(|list| list.retain(|i| i.id != id));
@@ -47,10 +43,6 @@ pub fn show_toast(level: NotificationLevel, message: impl Into<String>) {
pub fn toast_success(message: impl Into<String>) { show_toast(NotificationLevel::Success, message); }
pub fn toast_error(message: impl Into<String>) { show_toast(NotificationLevel::Error, message); }
// ============================================================================
// Action Message Mapping
// ============================================================================
pub fn get_action_messages(action: &str) -> (&'static str, &'static str) {
match action {
"start" => ("Torrent başlatıldı", "Torrent başlatılamadı"),
@@ -63,22 +55,6 @@ pub fn get_action_messages(action: &str) -> (&'static str, &'static str) {
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct PushSubscriptionData {
pub endpoint: String,
pub keys: PushKeys,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct PushKeys {
pub p256dh: String,
pub auth: String,
}
// ============================================================================
// Store Definition
// ============================================================================
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum FilterStatus {
All, Downloading, Seeding, Completed, Paused, Inactive, Active, Error,
@@ -107,111 +83,97 @@ pub fn provide_torrent_store() {
let store = TorrentStore { torrents, filter, search_query, global_stats, notifications, user };
provide_context(store);
let notifications_for_effect = notifications;
let global_stats_for_effect = global_stats;
let torrents_for_effect = torrents;
let notifications_for_sse = notifications;
let global_stats_for_sse = global_stats;
let torrents_for_sse = torrents;
let show_browser_notification = show_browser_notification.clone();
Effect::new(move |_| {
let user_val = user.get();
log::debug!("SSE Effect: user = {:?}", user_val);
if user_val.is_none() {
log::debug!("SSE Effect: user is None, skipping connection");
return;
}
spawn_local(async move {
let mut backoff_ms: u32 = 1000;
let mut was_connected = false;
let mut disconnect_notified = false;
let notifications = notifications_for_effect;
let global_stats = global_stats_for_effect;
let torrents = torrents_for_effect;
let show_browser_notification = show_browser_notification.clone();
loop {
log::info!("SSE: Starting connection (user logged in)");
leptos::task::spawn_local(async move {
let mut backoff_ms: u32 = 1000;
let mut was_connected = false;
let mut disconnect_notified = false;
loop {
log::debug!("SSE: Creating EventSource...");
let es_result = EventSource::new("/api/events");
match es_result {
Ok(mut es) => {
log::debug!("SSE: EventSource created, subscribing to message channel...");
if let Ok(mut stream) = es.subscribe("message") {
log::debug!("SSE: Subscribed to message channel");
let mut got_first_message = false;
while let Some(Ok((_, msg))) = stream.next().await {
log::debug!("SSE: Received message: {:?}", msg.data());
if !got_first_message {
got_first_message = true;
backoff_ms = 1000;
if was_connected && disconnect_notified {
show_toast_with_signal(notifications, NotificationLevel::Success, "Sunucu bağlantısı yeniden kuruldu");
disconnect_notified = false;
}
was_connected = true;
log::debug!("SSE: Creating EventSource...");
let es_result = EventSource::new("/api/events");
match es_result {
Ok(mut es) => {
log::debug!("SSE: EventSource created, subscribing...");
if let Ok(mut stream) = es.subscribe("message") {
log::debug!("SSE: Subscribed to message channel");
let mut got_first_message = false;
while let Some(Ok((_, msg))) = stream.next().await {
log::debug!("SSE: Received message");
if !got_first_message {
got_first_message = true;
backoff_ms = 1000;
if was_connected && disconnect_notified {
show_toast_with_signal(notifications_for_sse, NotificationLevel::Success, "Sunucu bağlantısı yeniden kuruldu");
disconnect_notified = false;
}
was_connected = true;
}
if let Some(data_str) = msg.data().as_string() {
log::debug!("SSE: Parsing JSON: {}", data_str);
if let Ok(event) = serde_json::from_str::<AppEvent>(&data_str) {
match event {
AppEvent::FullList { torrents: list, .. } => {
log::info!("SSE: Received FullList with {} torrents", list.len());
torrents.update(|map| {
let new_hashes: std::collections::HashSet<String> = list.iter().map(|t| t.hash.clone()).collect();
map.retain(|hash, _| new_hashes.contains(hash));
for new_torrent in list {
map.insert(new_torrent.hash.clone(), new_torrent);
}
});
log::debug!("SSE: torrents map now has {} entries", torrents.with(|m| m.len()));
}
AppEvent::Update(update) => {
torrents.update(|map| {
if let Some(t) = map.get_mut(&update.hash) {
if let Some(v) = update.name { t.name = v; }
if let Some(v) = update.size { t.size = v; }
if let Some(v) = update.down_rate { t.down_rate = v; }
if let Some(v) = update.up_rate { t.up_rate = v; }
if let Some(v) = update.percent_complete { t.percent_complete = v; }
if let Some(v) = update.completed { t.completed = v; }
if let Some(v) = update.eta { t.eta = v; }
if let Some(v) = update.status { t.status = v; }
if let Some(v) = update.error_message { t.error_message = v; }
if let Some(v) = update.label { t.label = Some(v); }
}
});
}
AppEvent::Stats(stats) => { global_stats.set(stats); }
AppEvent::Notification(n) => {
show_toast_with_signal(notifications, n.level.clone(), n.message.clone());
if n.message.contains("tamamlandı") || n.level == shared::NotificationLevel::Error {
show_browser_notification("VibeTorrent", &n.message);
if let Some(data_str) = msg.data().as_string() {
log::debug!("SSE: Parsing JSON: {}", data_str);
if let Ok(event) = serde_json::from_str::<AppEvent>(&data_str) {
match event {
AppEvent::FullList { torrents: list, .. } => {
log::info!("SSE: Received FullList with {} torrents", list.len());
torrents_for_sse.update(|map| {
let new_hashes: std::collections::HashSet<String> = list.iter().map(|t| t.hash.clone()).collect();
map.retain(|hash, _| new_hashes.contains(hash));
for new_torrent in list {
map.insert(new_torrent.hash.clone(), new_torrent);
}
});
log::debug!("SSE: torrents map now has {} entries", torrents_for_sse.with(|m| m.len()));
}
AppEvent::Update(update) => {
torrents_for_sse.update(|map| {
if let Some(t) = map.get_mut(&update.hash) {
if let Some(v) = update.name { t.name = v; }
if let Some(v) = update.size { t.size = v; }
if let Some(v) = update.down_rate { t.down_rate = v; }
if let Some(v) = update.up_rate { t.up_rate = v; }
if let Some(v) = update.percent_complete { t.percent_complete = v; }
if let Some(v) = update.completed { t.completed = v; }
if let Some(v) = update.eta { t.eta = v; }
if let Some(v) = update.status { t.status = v; }
if let Some(v) = update.error_message { t.error_message = v; }
if let Some(v) = update.label { t.label = Some(v); }
}
});
}
AppEvent::Stats(stats) => { global_stats_for_sse.set(stats); }
AppEvent::Notification(n) => {
show_toast_with_signal(notifications_for_sse, n.level.clone(), n.message.clone());
if n.message.contains("tamamlandı") || n.level == shared::NotificationLevel::Error {
show_browser_notification("VibeTorrent", &n.message);
}
}
}
}
}
if was_connected && !disconnect_notified {
show_toast_with_signal(notifications, NotificationLevel::Warning, "Sunucu bağlantısı kesildi, yeniden bağlanılıyor...");
disconnect_notified = true;
}
}
}
Err(_) => {
if was_connected && !disconnect_notified {
show_toast_with_signal(notifications, NotificationLevel::Warning, "Sunucu bağlantısı kurulamıyor...");
show_toast_with_signal(notifications_for_sse, NotificationLevel::Warning, "Sunucu bağlantısı kesildi, yeniden bağlanılıyor...");
disconnect_notified = true;
}
}
}
gloo_timers::future::TimeoutFuture::new(backoff_ms).await;
backoff_ms = std::cmp::min(backoff_ms * 2, 30000);
Err(_) => {
if was_connected && !disconnect_notified {
show_toast_with_signal(notifications_for_sse, NotificationLevel::Warning, "Sunucu bağlantısı kurulamıyor...");
disconnect_notified = true;
}
}
}
});
log::debug!("SSE: Reconnecting in {}ms...", backoff_ms);
gloo_timers::future::TimeoutFuture::new(backoff_ms).await;
backoff_ms = std::cmp::min(backoff_ms * 2, 30000);
}
});
}

View File

@@ -0,0 +1,30 @@
[package]
edition = "2018"
name = "coarsetime"
version = "0.1.37"
description = "Time and duration crate optimized for speed (patched for MIPS)"
license = "BSD-2-Clause"
[features]
wasi-abi2 = ["dep:wasi-abi2"]
[lib]
name = "coarsetime"
path = "src/lib.rs"
[dependencies]
portable-atomic = { version = "1", default-features = false, features = ["fallback"] }
[target.'cfg(all(any(target_arch = "wasm32", target_arch = "wasm64"), target_os = "unknown"))'.dependencies.wasm-bindgen]
version = "0.2"
[target.'cfg(any(target_os = "wasix", target_os = "wasi"))'.dependencies.wasix]
version = "0.13"
[target.'cfg(not(any(target_os = "wasix", target_os = "wasi")))'.dependencies.libc]
version = "0.2"
[target.'cfg(target_os = "wasi")'.dependencies.wasi-abi2]
version = "0.14.7"
optional = true
package = "wasi"

View File

@@ -4,9 +4,6 @@
)))]
use std::time;
#[cfg(target_has_atomic = "64")]
use std::sync::atomic::{AtomicU64, Ordering};
#[cfg(not(target_has_atomic = "64"))]
use portable_atomic::{AtomicU64, Ordering};
use super::Duration;

View File

@@ -3,9 +3,6 @@ use std::mem::MaybeUninit;
use std::ops::*;
#[allow(unused_imports)]
use std::ptr::*;
#[cfg(target_has_atomic = "64")]
use std::sync::atomic::{AtomicU64, Ordering};
#[cfg(not(target_has_atomic = "64"))]
use portable_atomic::{AtomicU64, Ordering};
use super::duration::*;

View File

@@ -11,6 +11,8 @@ ssr = [
"dep:thiserror",
"dep:quick-xml",
"dep:leptos_axum",
"dep:sqlx",
"dep:anyhow",
"leptos/ssr",
"leptos_router/ssr",
]
@@ -29,4 +31,8 @@ leptos_axum = { version = "0.8.7", optional = true }
tokio = { version = "1", features = ["full"], optional = true }
bytes = { version = "1", optional = true }
thiserror = { version = "2", optional = true }
quick-xml = { version = "0.31", features = ["serde", "serialize"], optional = true }
quick-xml = { version = "0.31", features = ["serde", "serialize"], optional = true }
# Database
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite"], optional = true }
anyhow = { version = "1.0", optional = true }

View File

@@ -7,8 +7,22 @@ pub mod scgi;
#[cfg(feature = "ssr")]
pub mod xmlrpc;
#[cfg(feature = "ssr")]
pub mod db;
pub mod server_fns;
#[derive(Clone, Debug)]
pub struct ServerContext {
pub scgi_socket_path: String,
}
#[cfg(feature = "ssr")]
#[derive(Clone)]
pub struct DbContext {
pub db: db::Db,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, ToSchema)]
pub struct Torrent {
pub hash: String,

View File

@@ -1,26 +1,3 @@
use leptos::*;
use leptos::prelude::*;
#[cfg(feature = "ssr")]
use crate::xmlrpc::{self, RtorrentClient};
#[server(GetVersion, "/api/server_fns")]
pub async fn get_version() -> Result<String, ServerFnError> {
let socket_path = std::env::var("RTORRENT_SOCKET").unwrap_or_else(|_| "/tmp/rtorrent.sock".to_string());
#[cfg(feature = "ssr")]
{
let client = RtorrentClient::new(&socket_path);
match client.call("system.client_version", &[]).await {
Ok(xml) => {
let version = xmlrpc::parse_string_response(&xml).unwrap_or(xml);
Ok(version)
},
Err(e) => Err(ServerFnError::ServerError(e.to_string())),
}
}
#[cfg(not(feature = "ssr"))]
{
unreachable!()
}
}
pub mod torrent;
pub mod settings;
pub mod push;

View File

@@ -0,0 +1,22 @@
use leptos::prelude::*;
#[server(GetPushPublicKey, "/api/server_fns")]
pub async fn get_public_key() -> Result<String, ServerFnError> {
let key = std::env::var("VAPID_PUBLIC_KEY")
.map_err(|_| ServerFnError::new("VAPID_PUBLIC_KEY not configured"))?;
Ok(key)
}
#[server(SubscribePush, "/api/server_fns")]
pub async fn subscribe_push(
endpoint: String,
p256dh: String,
auth: String,
) -> Result<(), ServerFnError> {
let db_ctx = expect_context::<crate::DbContext>();
db_ctx
.db
.save_push_subscription(&endpoint, &p256dh, &auth)
.await
.map_err(|e| ServerFnError::new(format!("Failed to save subscription: {}", e)))
}

View File

@@ -0,0 +1,58 @@
use leptos::prelude::*;
use crate::GlobalLimitRequest;
#[server(GetGlobalLimits, "/api/server_fns")]
pub async fn get_global_limits() -> Result<GlobalLimitRequest, ServerFnError> {
use crate::xmlrpc::{self, RtorrentClient};
let ctx = expect_context::<crate::ServerContext>();
let client = RtorrentClient::new(&ctx.scgi_socket_path);
let down = match client.call("throttle.global_down.max_rate", &[]).await {
Ok(xml) => xmlrpc::parse_i64_response(&xml).unwrap_or(0),
Err(_) => -1,
};
let up = match client.call("throttle.global_up.max_rate", &[]).await {
Ok(xml) => xmlrpc::parse_i64_response(&xml).unwrap_or(0),
Err(_) => -1,
};
Ok(GlobalLimitRequest {
max_download_rate: Some(down),
max_upload_rate: Some(up),
})
}
#[server(SetGlobalLimits, "/api/server_fns")]
pub async fn set_global_limits(
max_download_rate: Option<i64>,
max_upload_rate: Option<i64>,
) -> Result<(), ServerFnError> {
use crate::xmlrpc::{RpcParam, RtorrentClient};
let ctx = expect_context::<crate::ServerContext>();
let client = RtorrentClient::new(&ctx.scgi_socket_path);
if let Some(down) = max_download_rate {
let down_kb = down / 1024;
client
.call(
"throttle.global_down.max_rate.set_kb",
&[RpcParam::from(""), RpcParam::Int(down_kb)],
)
.await
.map_err(|e| ServerFnError::new(format!("Failed to set down limit: {}", e)))?;
}
if let Some(up) = max_upload_rate {
let up_kb = up / 1024;
client
.call(
"throttle.global_up.max_rate.set_kb",
&[RpcParam::from(""), RpcParam::Int(up_kb)],
)
.await
.map_err(|e| ServerFnError::new(format!("Failed to set up limit: {}", e)))?;
}
Ok(())
}

View File

@@ -0,0 +1,274 @@
use leptos::prelude::*;
use crate::{TorrentFile, TorrentPeer, TorrentTracker};
#[server(AddTorrent, "/api/server_fns")]
pub async fn add_torrent(uri: String) -> Result<(), ServerFnError> {
use crate::xmlrpc::{RpcParam, RtorrentClient};
let ctx = expect_context::<crate::ServerContext>();
let client = RtorrentClient::new(&ctx.scgi_socket_path);
let params = vec![RpcParam::from(""), RpcParam::from(uri.as_str())];
match client.call("load.start", &params).await {
Ok(response) => {
if response.contains("faultCode") {
return Err(ServerFnError::new("rTorrent returned fault"));
}
Ok(())
}
Err(e) => Err(ServerFnError::new(format!("Failed to add torrent: {}", e))),
}
}
#[server(TorrentAction, "/api/server_fns")]
pub async fn torrent_action(hash: String, action: String) -> Result<String, ServerFnError> {
use crate::xmlrpc::{RpcParam, RtorrentClient};
let ctx = expect_context::<crate::ServerContext>();
let client = RtorrentClient::new(&ctx.scgi_socket_path);
if action == "delete_with_data" {
return delete_torrent_with_data_inner(&client, &hash).await;
}
let method = match action.as_str() {
"start" => "d.start",
"stop" => "d.stop",
"delete" => "d.erase",
_ => return Err(ServerFnError::new("Invalid action")),
};
let params = vec![RpcParam::from(hash.as_str())];
match client.call(method, &params).await {
Ok(_) => Ok("Action executed".to_string()),
Err(e) => Err(ServerFnError::new(format!("RPC error: {}", e))),
}
}
#[cfg(feature = "ssr")]
async fn delete_torrent_with_data_inner(
client: &crate::xmlrpc::RtorrentClient,
hash: &str,
) -> Result<String, ServerFnError> {
use crate::xmlrpc::{parse_string_response, RpcParam};
let params_hash = vec![RpcParam::from(hash)];
let path_xml = client
.call("d.base_path", &params_hash)
.await
.map_err(|e| ServerFnError::new(format!("Failed to call rTorrent: {}", e)))?;
let path = parse_string_response(&path_xml)
.map_err(|e| ServerFnError::new(format!("Failed to parse path: {}", e)))?;
let root_xml = client
.call("directory.default", &[])
.await
.map_err(|e| ServerFnError::new(format!("Failed to get download root: {}", e)))?;
let root_path_str = parse_string_response(&root_xml)
.map_err(|e| ServerFnError::new(format!("Failed to parse root path: {}", e)))?;
let root_path = tokio::fs::canonicalize(std::path::Path::new(&root_path_str))
.await
.map_err(|e| ServerFnError::new(format!("Invalid download root: {}", e)))?;
let target_path_raw = std::path::Path::new(&path);
if !tokio::fs::try_exists(target_path_raw).await.unwrap_or(false) {
client
.call("d.erase", &params_hash)
.await
.map_err(|e| ServerFnError::new(format!("Failed to erase torrent: {}", e)))?;
return Ok("Torrent removed (Data not found)".to_string());
}
let target_path = tokio::fs::canonicalize(target_path_raw)
.await
.map_err(|e| ServerFnError::new(format!("Invalid data path: {}", e)))?;
if !target_path.starts_with(&root_path) {
return Err(ServerFnError::new(
"Security Error: Cannot delete files outside download directory",
));
}
if target_path == root_path {
return Err(ServerFnError::new(
"Security Error: Cannot delete the download root directory",
));
}
client
.call("d.erase", &params_hash)
.await
.map_err(|e| ServerFnError::new(format!("Failed to erase torrent: {}", e)))?;
let delete_result = if target_path.is_dir() {
tokio::fs::remove_dir_all(&target_path).await
} else {
tokio::fs::remove_file(&target_path).await
};
match delete_result {
Ok(_) => Ok("Torrent and data deleted".to_string()),
Err(e) => Err(ServerFnError::new(format!("Failed to delete data: {}", e))),
}
}
#[server(GetFiles, "/api/server_fns")]
pub async fn get_files(hash: String) -> Result<Vec<TorrentFile>, ServerFnError> {
use crate::xmlrpc::{parse_multicall_response, RpcParam, RtorrentClient};
let ctx = expect_context::<crate::ServerContext>();
let client = RtorrentClient::new(&ctx.scgi_socket_path);
let params = vec![
RpcParam::from(hash.as_str()),
RpcParam::from(""),
RpcParam::from("f.path="),
RpcParam::from("f.size_bytes="),
RpcParam::from("f.completed_chunks="),
RpcParam::from("f.priority="),
];
let xml = client
.call("f.multicall", &params)
.await
.map_err(|e| ServerFnError::new(format!("RPC error: {}", e)))?;
let rows = parse_multicall_response(&xml)
.map_err(|e| ServerFnError::new(format!("Parse error: {}", e)))?;
Ok(rows
.into_iter()
.enumerate()
.map(|(idx, row)| TorrentFile {
index: idx as u32,
path: row.get(0).cloned().unwrap_or_default(),
size: row.get(1).and_then(|s| s.parse().ok()).unwrap_or(0),
completed_chunks: row.get(2).and_then(|s| s.parse().ok()).unwrap_or(0),
priority: row.get(3).and_then(|s| s.parse().ok()).unwrap_or(0),
})
.collect())
}
#[server(GetPeers, "/api/server_fns")]
pub async fn get_peers(hash: String) -> Result<Vec<TorrentPeer>, ServerFnError> {
use crate::xmlrpc::{parse_multicall_response, RpcParam, RtorrentClient};
let ctx = expect_context::<crate::ServerContext>();
let client = RtorrentClient::new(&ctx.scgi_socket_path);
let params = vec![
RpcParam::from(hash.as_str()),
RpcParam::from(""),
RpcParam::from("p.address="),
RpcParam::from("p.client_version="),
RpcParam::from("p.down_rate="),
RpcParam::from("p.up_rate="),
RpcParam::from("p.completed_percent="),
];
let xml = client
.call("p.multicall", &params)
.await
.map_err(|e| ServerFnError::new(format!("RPC error: {}", e)))?;
let rows = parse_multicall_response(&xml)
.map_err(|e| ServerFnError::new(format!("Parse error: {}", e)))?;
Ok(rows
.into_iter()
.map(|row| TorrentPeer {
ip: row.get(0).cloned().unwrap_or_default(),
client: row.get(1).cloned().unwrap_or_default(),
down_rate: row.get(2).and_then(|s| s.parse().ok()).unwrap_or(0),
up_rate: row.get(3).and_then(|s| s.parse().ok()).unwrap_or(0),
progress: row.get(4).and_then(|s| s.parse().ok()).unwrap_or(0.0),
})
.collect())
}
#[server(GetTrackers, "/api/server_fns")]
pub async fn get_trackers(hash: String) -> Result<Vec<TorrentTracker>, ServerFnError> {
use crate::xmlrpc::{parse_multicall_response, RpcParam, RtorrentClient};
let ctx = expect_context::<crate::ServerContext>();
let client = RtorrentClient::new(&ctx.scgi_socket_path);
let params = vec![
RpcParam::from(hash.as_str()),
RpcParam::from(""),
RpcParam::from("t.url="),
RpcParam::from("t.activity_date_last="),
RpcParam::from("t.message="),
];
let xml = client
.call("t.multicall", &params)
.await
.map_err(|e| ServerFnError::new(format!("RPC error: {}", e)))?;
let rows = parse_multicall_response(&xml)
.map_err(|e| ServerFnError::new(format!("Parse error: {}", e)))?;
Ok(rows
.into_iter()
.map(|row| TorrentTracker {
url: row.get(0).cloned().unwrap_or_default(),
status: "Unknown".to_string(),
message: row.get(2).cloned().unwrap_or_default(),
})
.collect())
}
#[server(SetFilePriority, "/api/server_fns")]
pub async fn set_file_priority(
hash: String,
file_index: u32,
priority: u8,
) -> Result<(), ServerFnError> {
use crate::xmlrpc::{RpcParam, RtorrentClient};
let ctx = expect_context::<crate::ServerContext>();
let client = RtorrentClient::new(&ctx.scgi_socket_path);
let target = format!("{}:f{}", hash, file_index);
let params = vec![
RpcParam::from(target.as_str()),
RpcParam::from(priority as i64),
];
client
.call("f.set_priority", &params)
.await
.map_err(|e| ServerFnError::new(format!("RPC error: {}", e)))?;
let _ = client
.call("d.update_priorities", &[RpcParam::from(hash.as_str())])
.await;
Ok(())
}
#[server(SetLabel, "/api/server_fns")]
pub async fn set_label(hash: String, label: String) -> Result<(), ServerFnError> {
use crate::xmlrpc::{RpcParam, RtorrentClient};
let ctx = expect_context::<crate::ServerContext>();
let client = RtorrentClient::new(&ctx.scgi_socket_path);
let params = vec![RpcParam::from(hash.as_str()), RpcParam::from(label)];
client
.call("d.custom1.set", &params)
.await
.map_err(|e| ServerFnError::new(format!("RPC error: {}", e)))?;
Ok(())
}
#[server(GetVersion, "/api/server_fns")]
pub async fn get_version() -> Result<String, ServerFnError> {
use crate::xmlrpc::{parse_string_response, RtorrentClient};
let ctx = expect_context::<crate::ServerContext>();
let client = RtorrentClient::new(&ctx.scgi_socket_path);
match client.call("system.client_version", &[]).await {
Ok(xml) => {
let version = parse_string_response(&xml).unwrap_or(xml);
Ok(version)
}
Err(e) => Err(ServerFnError::new(format!("Failed to get version: {}", e))),
}
}

View File

@@ -1 +0,0 @@
{"v":1}

View File

@@ -1,6 +0,0 @@
{
"git": {
"sha1": "831c97016aa3d8f7851999aa1deea8407e7cbd42"
},
"path_in_vcs": ""
}

View File

@@ -1,8 +0,0 @@
version: 2
updates:
- package-ecosystem: cargo
directory: "/"
schedule:
interval: daily
time: "04:00"
open-pull-requests-limit: 10

View File

@@ -1,17 +0,0 @@
name: Close inactive issues
on:
schedule:
- cron: "30 1 * * *"
jobs:
close-issues:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v9
with:
stale-issue-message: "This issue is stale because it has been open for 30 days with no activity."
close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
repo-token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,4 +0,0 @@
target
Cargo.lock
.vscode
zig-cache

View File

@@ -1,82 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
name = "coarsetime"
version = "0.1.37"
authors = ["Frank Denis <github@pureftpd.org>"]
build = false
autolib = false
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = "Time and duration crate optimized for speed"
homepage = "https://github.com/jedisct1/rust-coarsetime"
readme = "README.md"
keywords = [
"time",
"date",
"duration",
]
categories = [
"concurrency",
"date-and-time",
"os",
]
license = "BSD-2-Clause"
repository = "https://github.com/jedisct1/rust-coarsetime"
[features]
wasi-abi2 = ["dep:wasi-abi2"]
[lib]
name = "coarsetime"
path = "src/lib.rs"
[[bench]]
name = "benchmark"
path = "benches/benchmark.rs"
harness = false
[dev-dependencies.benchmark-simple]
version = "0.1.10"
[dependencies.portable-atomic]
version = "1.6"
[target.'cfg(all(any(target_arch = "wasm32", target_arch = "wasm64"), target_os = "unknown"))'.dependencies.wasm-bindgen]
version = "0.2"
[target.'cfg(any(target_os = "wasix", target_os = "wasi"))'.dependencies.wasix]
version = "0.13"
[target.'cfg(not(any(target_os = "wasix", target_os = "wasi")))'.dependencies.libc]
version = "0.2"
[target.'cfg(target_os = "wasi")'.dependencies.wasi-abi2]
version = "0.14.7"
optional = true
package = "wasi"
[profile.bench]
codegen-units = 1
[profile.dev]
overflow-checks = true
[profile.release]
opt-level = 3
lto = true
codegen-units = 1
panic = "abort"
incremental = false

View File

@@ -1,47 +0,0 @@
[package]
name = "coarsetime"
version = "0.1.37"
description = "Time and duration crate optimized for speed"
authors = ["Frank Denis <github@pureftpd.org>"]
keywords = ["time", "date", "duration"]
readme = "README.md"
license = "BSD-2-Clause"
homepage = "https://github.com/jedisct1/rust-coarsetime"
repository = "https://github.com/jedisct1/rust-coarsetime"
categories = ["concurrency", "date-and-time", "os"]
edition = "2018"
[features]
wasi-abi2 = ["dep:wasi-abi2"]
[target.'cfg(not(any(target_os = "wasix", target_os = "wasi")))'.dependencies]
libc = "0.2"
[target.'cfg(target_os = "wasi")'.dependencies]
wasi-abi2 = { package = "wasi", version = "0.14.7", optional = true }
[target.'cfg(any(target_os = "wasix", target_os = "wasi"))'.dependencies]
wasix = "0.13"
[target.'cfg(all(any(target_arch = "wasm32", target_arch = "wasm64"), target_os = "unknown"))'.dependencies]
wasm-bindgen = "0.2"
[dev-dependencies]
benchmark-simple = "0.1.10"
[profile.bench]
codegen-units = 1
[[bench]]
name = "benchmark"
harness = false
[profile.release]
lto = true
panic = "abort"
opt-level = 3
codegen-units = 1
incremental = false
[profile.dev]
overflow-checks=true

View File

@@ -1,25 +0,0 @@
BSD 2-Clause License
Copyright (c) 2016-2026, Frank Denis
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,90 +0,0 @@
[![Documentation](https://docs.rs/coarsetime/badge.svg)](https://docs.rs/coarsetime)
[![Windows build status](https://ci.appveyor.com/api/projects/status/xlbhk9850dvl5ylh?svg=true)](https://ci.appveyor.com/project/jedisct1/rust-coarsetime)
# coarsetime
A Rust crate to make time measurements, that focuses on speed, API stability and portability.
This crate is a partial replacement for the `Time` and `Duration` structures
from the standard library, with the following differences:
* Speed is privileged over accuracy. In particular, `CLOCK_MONOTONIC_COARSE` is
used to retrieve the clock value on Linux systems, and transformations avoid
operations that can be slow on non-Intel systems.
* The number of system calls can be kept to a minimum. The "most recent
timestamp" is always kept in memory. It can be read with just a load operation,
and can be updated only as frequently as necessary.
* The API is stable, and the same for all platforms. Unlike the standard library, it doesn't silently compile functions that do nothing but panic at runtime on some platforms.
# Installation
`coarsetime` is available on [crates.io](https://crates.io/crates/coarsetime)
and works on Rust stable, beta, and nightly.
Windows and Unix-like systems are supported.
Available feature:
* `wasi-abi2`: when targeting WASI, use the second preview of the ABI. Default is to use the regular WASI-core ABI.
# Documentation
[API documentation](https://docs.rs/coarsetime)
# Example
```rust
extern crate coarsetime;
use coarsetime::{Duration, Instant, Updater};
// Get the current instant. This may require a system call, but it may also
// be faster than the stdlib equivalent.
let now = Instant::now();
// Get the latest known instant. This operation is super fast.
// In this case, the value will be identical to `now`, because we haven't
// updated the latest known instant yet.
let ts1 = Instant::recent();
// Update the latest known instant. This may require a system call.
// Note that a call to `Instant::now()` also updates the stored instant.
Instant::update();
// Now, we may get a different instant. This call is also super fast.
let ts2 = Instant::recent();
// Compute the time elapsed between ts2 and ts1.
let elapsed_ts2_ts1 = ts2.duration_since(ts1);
// Operations such as `+` and `-` between `Instant` and `Duration` are also
// available.
let elapsed_ts2_ts1 = ts2 - ts1;
// Returns the time elapsed since ts1.
// This retrieves the actual current time, and may require a system call.
let elapsed_since_ts1 = ts1.elapsed();
// Returns the approximate time elapsed since ts1.
// This uses the latest known instant, and is super fast.
let elapsed_since_recent = ts1.elapsed_since_recent();
// Instant::update() should be called periodically, for example using an
// event loop. Alternatively, the crate provides an easy way to spawn a
// background task that will periodically update the latest known instant.
// Here, the update will happen every 250ms.
let updater = Updater::new(250).start().unwrap();
// From now on, Instant::recent() will always return an approximation of the
// current instant.
let ts3 = Instant::recent();
// Stop the task.
updater.stop().unwrap();
// Returns the elapsed time since the UNIX epoch
let unix_timestamp = Clock::now_since_epoch();
// Returns an approximation of the elapsed time since the UNIX epoch, based on
// the latest time update
let unix_timestamp_approx = Clock::recent_since_epoch();
```

View File

@@ -1,61 +0,0 @@
use benchmark_simple::*;
use coarsetime::*;
use std::time;
fn main() {
let options = &Options {
iterations: 250_000,
warmup_iterations: 25_000,
min_samples: 5,
max_samples: 10,
max_rsd: 1.0,
..Default::default()
};
bench_coarsetime_now(options);
bench_coarsetime_recent(options);
bench_coarsetime_elapsed(options);
bench_coarsetime_elapsed_since_recent(options);
bench_stdlib_now(options);
bench_stdlib_elapsed(options);
}
fn bench_coarsetime_now(options: &Options) {
let b = Bench::new();
Instant::update();
let res = b.run(options, Instant::now);
println!("coarsetime_now(): {}", res.throughput(1));
}
fn bench_coarsetime_recent(options: &Options) {
let b = Bench::new();
Instant::update();
let res = b.run(options, Instant::recent);
println!("coarsetime_recent(): {}", res.throughput(1));
}
fn bench_coarsetime_elapsed(options: &Options) {
let b = Bench::new();
let ts = Instant::now();
let res = b.run(options, || ts.elapsed());
println!("coarsetime_elapsed(): {}", res.throughput(1));
}
fn bench_coarsetime_elapsed_since_recent(options: &Options) {
let b = Bench::new();
let ts = Instant::now();
let res = b.run(options, || ts.elapsed_since_recent());
println!("coarsetime_since_recent(): {}", res.throughput(1));
}
fn bench_stdlib_now(options: &Options) {
let b = Bench::new();
let res = b.run(options, time::Instant::now);
println!("stdlib_now(): {}", res.throughput(1));
}
fn bench_stdlib_elapsed(options: &Options) {
let b = Bench::new();
let ts = time::Instant::now();
let res = b.run(options, || ts.elapsed());
println!("stdlib_elapsed(): {}", res.throughput(1));
}

View File

@@ -1,55 +0,0 @@
use std::thread::sleep;
use std::time;
#[cfg(not(any(target_arch = "wasm32", target_arch = "wasm64")))]
use super::Updater;
use super::{Clock, Duration, Instant};
#[test]
fn tests() {
let ts = Instant::now();
let d = Duration::from_secs(2);
sleep(time::Duration::new(3, 0));
let elapsed = ts.elapsed().as_secs();
println!("Elapsed: {elapsed} secs");
assert!(elapsed >= 2);
assert!(elapsed < 100);
assert!(ts.elapsed_since_recent() > d);
let ts = Instant::now();
sleep(time::Duration::new(1, 0));
assert_eq!(Instant::recent(), ts);
Instant::update();
assert!(Instant::recent() > ts);
let clock_now = Clock::recent_since_epoch();
sleep(time::Duration::new(1, 0));
assert_eq!(Clock::recent_since_epoch(), clock_now);
assert!(Clock::now_since_epoch() > clock_now);
#[cfg(not(any(target_arch = "wasm32", target_arch = "wasm64")))]
tests_updater();
}
#[cfg(not(any(target_arch = "wasm32", target_arch = "wasm64")))]
#[test]
fn tests_updater() {
let updater = Updater::new(250)
.start()
.expect("Unable to start a background updater");
let ts = Instant::recent();
let clock_recent = Clock::recent_since_epoch();
sleep(time::Duration::new(2, 0));
assert!(Clock::recent_since_epoch() > clock_recent);
assert!(Instant::recent() != ts);
updater.stop().unwrap();
let clock_recent = Clock::recent_since_epoch();
sleep(time::Duration::new(1, 0));
assert_eq!(Clock::recent_since_epoch(), clock_recent);
}
#[test]
fn tests_duration() {
let duration = Duration::from_days(1000);
assert_eq!(duration.as_days(), 1000);
}