refactor: migrate torrent/settings endpoints to Leptos Server Functions and remove third_party/coarsetime
Some checks failed
Build MIPS Binary / build (push) Failing after 4m15s

This commit is contained in:
spinline
2026-02-10 00:27:39 +03:00
parent 3f370389aa
commit 5e1f4b18c2
26 changed files with 382 additions and 1990 deletions

3
Cargo.lock generated
View File

@@ -559,9 +559,10 @@ checksum = "c3e64b0cc0439b12df2fa678eae89a1c56a529fd067a9115f7827f1fffd22b32"
[[package]] [[package]]
name = "coarsetime" name = "coarsetime"
version = "0.1.37" version = "0.1.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e58eb270476aa4fc7843849f8a35063e8743b4dbcdf6dd0f8ea0886980c204c2"
dependencies = [ dependencies = [
"libc", "libc",
"portable-atomic",
"wasix", "wasix",
"wasm-bindgen", "wasm-bindgen",
] ]

View File

@@ -16,5 +16,3 @@ strip = true
# Artık (incremental) build'i kapat ki optimizasyon tam olsun # Artık (incremental) build'i kapat ki optimizasyon tam olsun
incremental = false incremental = false
[patch.crates-io]
coarsetime = { path = "third_party/coarsetime" }

View File

@@ -1,13 +1,4 @@
use shared::{
xmlrpc::{self, RpcParam},
AddTorrentRequest, GlobalLimitRequest, SetFilePriorityRequest, SetLabelRequest, TorrentActionRequest,
TorrentFile, TorrentPeer, TorrentTracker,
};
use crate::AppState;
#[cfg(feature = "push-notifications")]
use crate::push;
use axum::{ use axum::{
extract::{Json, Path, State},
http::{header, StatusCode, Uri}, http::{header, StatusCode, Uri},
response::IntoResponse, response::IntoResponse,
BoxError, BoxError,
@@ -37,7 +28,6 @@ pub async fn static_handler(uri: Uri) -> impl IntoResponse {
if path.contains('.') { if path.contains('.') {
return StatusCode::NOT_FOUND.into_response(); return StatusCode::NOT_FOUND.into_response();
} }
// Fallback to index.html for SPA routing
match Asset::get("index.html") { match Asset::get("index.html") {
Some(content) => { Some(content) => {
let mime = mime_guess::from_path("index.html").first_or_octet_stream(); let mime = mime_guess::from_path("index.html").first_or_octet_stream();
@@ -49,614 +39,6 @@ pub async fn static_handler(uri: Uri) -> impl IntoResponse {
} }
} }
// --- TORRENT ACTIONS ---
/// Add a new torrent via magnet link or URL
#[utoipa::path(
post,
path = "/api/torrents/add",
request_body = AddTorrentRequest,
responses(
(status = 200, description = "Torrent added successfully"),
(status = 500, description = "Internal server error or rTorrent fault")
)
)]
pub async fn add_torrent_handler(
State(state): State<AppState>,
Json(payload): Json<AddTorrentRequest>,
) -> StatusCode {
tracing::info!(
"Received add_torrent request. URI length: {}",
payload.uri.len()
);
let client = xmlrpc::RtorrentClient::new(&state.scgi_socket_path);
let params = vec![RpcParam::from(""), RpcParam::from(payload.uri.as_str())];
match client.call("load.start", &params).await {
Ok(response) => {
tracing::debug!("rTorrent response to load.start: {}", response);
if response.contains("faultCode") {
tracing::error!("rTorrent returned fault: {}", response);
return StatusCode::INTERNAL_SERVER_ERROR;
}
// Note: Frontend shows its own toast, no SSE notification needed
StatusCode::OK
}
Err(e) => {
tracing::error!("Failed to add torrent: {}", e);
// Note: Frontend shows its own toast, no SSE notification needed
StatusCode::INTERNAL_SERVER_ERROR
}
}
}
/// Perform an action on a torrent (start, stop, delete)
#[utoipa::path(
post,
path = "/api/torrents/action",
request_body = TorrentActionRequest,
responses(
(status = 200, description = "Action executed successfully"),
(status = 400, description = "Invalid action or request"),
(status = 403, description = "Forbidden: Security risk detected"),
(status = 500, description = "Internal server error")
)
)]
pub async fn handle_torrent_action(
State(state): State<AppState>,
Json(payload): Json<TorrentActionRequest>,
) -> impl IntoResponse {
tracing::info!(
"Received action: {} for hash: {}",
payload.action,
payload.hash
);
let client = xmlrpc::RtorrentClient::new(&state.scgi_socket_path);
// Special handling for delete_with_data
if payload.action == "delete_with_data" {
return match delete_torrent_with_data(&client, &payload.hash).await {
Ok(msg) => {
// Note: Frontend shows its own toast
(StatusCode::OK, msg).into_response()
}
Err((status, msg)) => (status, msg).into_response(),
};
}
let method = match payload.action.as_str() {
"start" => "d.start",
"stop" => "d.stop",
"delete" => "d.erase",
_ => return (StatusCode::BAD_REQUEST, "Invalid action").into_response(),
};
let params = vec![RpcParam::from(payload.hash.as_str())];
match client.call(method, &params).await {
Ok(_) => {
// Note: Frontend shows its own toast, no SSE notification needed
(StatusCode::OK, "Action executed").into_response()
}
Err(e) => {
tracing::error!("RPC error: {}", e);
(
StatusCode::INTERNAL_SERVER_ERROR,
"Failed to execute action",
)
.into_response()
}
}
}
/// Helper function to handle secure deletion of torrent data
async fn delete_torrent_with_data(
client: &xmlrpc::RtorrentClient,
hash: &str,
) -> Result<&'static str, (StatusCode, String)> {
let params_hash = vec![RpcParam::from(hash)];
// 1. Get Base Path
let path_xml = client
.call("d.base_path", &params_hash)
.await
.map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to call rTorrent: {}", e),
)
})?;
let path = xmlrpc::parse_string_response(&path_xml).map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to parse path: {}", e),
)
})?;
// 1.5 Get Default Download Directory (Sandbox Root)
let root_xml = client.call("directory.default", &[]).await.map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to get valid download root: {}", e),
)
})?;
let root_path_str = xmlrpc::parse_string_response(&root_xml).map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to parse root path: {}", e),
)
})?;
// Resolve Paths (Canonicalize) to prevent .. traversal and symlink attacks
let root_path = tokio::fs::canonicalize(std::path::Path::new(&root_path_str))
.await
.map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Invalid download root configuration (on server): {}", e),
)
})?;
// Check if target path exists before trying to resolve it
let target_path_raw = std::path::Path::new(&path);
if !tokio::fs::try_exists(target_path_raw)
.await
.unwrap_or(false)
{
tracing::warn!(
"Data path not found: {:?}. Removing torrent only.",
target_path_raw
);
// If file doesn't exist, we just remove the torrent entry
client.call("d.erase", &params_hash).await.map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to erase torrent: {}", e),
)
})?;
return Ok("Torrent removed (Data not found)");
}
let target_path = tokio::fs::canonicalize(target_path_raw)
.await
.map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Invalid data path: {}", e),
)
})?;
tracing::info!(
"Delete request: Target='{:?}', Root='{:?}'",
target_path,
root_path
);
// SECURITY CHECK: Ensure path is inside root_path
if !target_path.starts_with(&root_path) {
tracing::error!(
"Security Risk: Attempted to delete path outside download directory: {:?}",
target_path
);
return Err((
StatusCode::FORBIDDEN,
"Security Error: Cannot delete files outside default download directory".to_string(),
));
}
// SECURITY CHECK: Ensure we are not deleting the root itself
if target_path == root_path {
return Err((
StatusCode::BAD_REQUEST,
"Security Error: Cannot delete the download root directory itself".to_string(),
));
}
// 2. Erase Torrent first
client.call("d.erase", &params_hash).await.map_err(|e| {
tracing::warn!("Failed to erase torrent entry: {}", e);
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to erase torrent: {}", e),
)
})?;
// 3. Delete Files via Native FS
let delete_result = if target_path.is_dir() {
tokio::fs::remove_dir_all(&target_path).await
} else {
tokio::fs::remove_file(&target_path).await
};
match delete_result {
Ok(_) => Ok("Torrent and data deleted"),
Err(e) => {
tracing::error!("Failed to delete data at {:?}: {}", target_path, e);
Err((
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to delete data: {}", e),
))
}
}
}
// --- NEW HANDLERS ---
/// Get rTorrent version
#[utoipa::path(
get,
path = "/api/system/version",
responses(
(status = 200, description = "rTorrent version", body = String),
(status = 500, description = "Internal server error")
)
)]
pub async fn get_version_handler(State(state): State<AppState>) -> impl IntoResponse {
let client = xmlrpc::RtorrentClient::new(&state.scgi_socket_path);
match client.call("system.client_version", &[]).await {
Ok(xml) => {
let version = xmlrpc::parse_string_response(&xml).unwrap_or(xml);
(StatusCode::OK, version).into_response()
}
Err(e) => {
tracing::error!("Failed to get version: {}", e);
(StatusCode::INTERNAL_SERVER_ERROR, "Failed to get version").into_response()
}
}
}
/// Get files for a torrent
#[utoipa::path(
get,
path = "/api/torrents/{hash}/files",
responses(
(status = 200, description = "Files list", body = Vec<TorrentFile>),
(status = 500, description = "Internal server error")
),
params(
("hash" = String, Path, description = "Torrent Hash")
)
)]
pub async fn get_files_handler(
State(state): State<AppState>,
Path(hash): Path<String>,
) -> impl IntoResponse {
let client = xmlrpc::RtorrentClient::new(&state.scgi_socket_path);
let params = vec![
RpcParam::from(hash.as_str()),
RpcParam::from(""),
RpcParam::from("f.path="),
RpcParam::from("f.size_bytes="),
RpcParam::from("f.completed_chunks="),
RpcParam::from("f.priority="),
];
match client.call("f.multicall", &params).await {
Ok(xml) => match xmlrpc::parse_multicall_response(&xml) {
Ok(rows) => {
let files: Vec<TorrentFile> = rows
.into_iter()
.enumerate()
.map(|(idx, row)| TorrentFile {
index: idx as u32,
path: row.get(0).cloned().unwrap_or_default(),
size: row.get(1).and_then(|s| s.parse().ok()).unwrap_or(0),
completed_chunks: row.get(2).and_then(|s| s.parse().ok()).unwrap_or(0),
priority: row.get(3).and_then(|s| s.parse().ok()).unwrap_or(0),
})
.collect();
(StatusCode::OK, Json(files)).into_response()
}
Err(e) => (
StatusCode::INTERNAL_SERVER_ERROR,
format!("Parse error: {}", e),
)
.into_response(),
},
Err(e) => (
StatusCode::INTERNAL_SERVER_ERROR,
format!("RPC error: {}", e),
)
.into_response(),
}
}
/// Get peers for a torrent
#[utoipa::path(
get,
path = "/api/torrents/{hash}/peers",
responses(
(status = 200, description = "Peers list", body = Vec<TorrentPeer>),
(status = 500, description = "Internal server error")
),
params(
("hash" = String, Path, description = "Torrent Hash")
)
)]
pub async fn get_peers_handler(
State(state): State<AppState>,
Path(hash): Path<String>,
) -> impl IntoResponse {
let client = xmlrpc::RtorrentClient::new(&state.scgi_socket_path);
let params = vec![
RpcParam::from(hash.as_str()),
RpcParam::from(""),
RpcParam::from("p.address="),
RpcParam::from("p.client_version="),
RpcParam::from("p.down_rate="),
RpcParam::from("p.up_rate="),
RpcParam::from("p.completed_percent="),
];
match client.call("p.multicall", &params).await {
Ok(xml) => match xmlrpc::parse_multicall_response(&xml) {
Ok(rows) => {
let peers: Vec<TorrentPeer> = rows
.into_iter()
.map(|row| TorrentPeer {
ip: row.get(0).cloned().unwrap_or_default(),
client: row.get(1).cloned().unwrap_or_default(),
down_rate: row.get(2).and_then(|s| s.parse().ok()).unwrap_or(0),
up_rate: row.get(3).and_then(|s| s.parse().ok()).unwrap_or(0),
progress: row.get(4).and_then(|s| s.parse().ok()).unwrap_or(0.0),
})
.collect();
(StatusCode::OK, Json(peers)).into_response()
}
Err(e) => (
StatusCode::INTERNAL_SERVER_ERROR,
format!("Parse error: {}", e),
)
.into_response(),
},
Err(e) => (
StatusCode::INTERNAL_SERVER_ERROR,
format!("RPC error: {}", e),
)
.into_response(),
}
}
/// Get trackers for a torrent
#[utoipa::path(
get,
path = "/api/torrents/{hash}/trackers",
responses(
(status = 200, description = "Trackers list", body = Vec<TorrentTracker>),
(status = 500, description = "Internal server error")
),
params(
("hash" = String, Path, description = "Torrent Hash")
)
)]
pub async fn get_trackers_handler(
State(state): State<AppState>,
Path(hash): Path<String>,
) -> impl IntoResponse {
let client = xmlrpc::RtorrentClient::new(&state.scgi_socket_path);
let params = vec![
RpcParam::from(hash.as_str()),
RpcParam::from(""),
RpcParam::from("t.url="),
RpcParam::from("t.activity_date_last="),
RpcParam::from("t.message="),
];
match client.call("t.multicall", &params).await {
Ok(xml) => {
match xmlrpc::parse_multicall_response(&xml) {
Ok(rows) => {
let trackers: Vec<TorrentTracker> = rows
.into_iter()
.map(|row| {
TorrentTracker {
url: row.get(0).cloned().unwrap_or_default(),
status: "Unknown".to_string(), // Derive from type/activity?
message: row.get(2).cloned().unwrap_or_default(),
}
})
.collect();
(StatusCode::OK, Json(trackers)).into_response()
}
Err(e) => (
StatusCode::INTERNAL_SERVER_ERROR,
format!("Parse error: {}", e),
)
.into_response(),
}
}
Err(e) => (
StatusCode::INTERNAL_SERVER_ERROR,
format!("RPC error: {}", e),
)
.into_response(),
}
}
/// Set file priority
#[utoipa::path(
post,
path = "/api/torrents/files/priority",
request_body = SetFilePriorityRequest,
responses(
(status = 200, description = "Priority updated"),
(status = 500, description = "Internal server error")
)
)]
pub async fn set_file_priority_handler(
State(state): State<AppState>,
Json(payload): Json<SetFilePriorityRequest>,
) -> impl IntoResponse {
let client = xmlrpc::RtorrentClient::new(&state.scgi_socket_path);
// f.set_priority takes "hash", index, priority
// Priority: 0 (off), 1 (normal), 2 (high)
// f.set_priority is tricky. Let's send as string first as before, or int if we knew.
// Usually priorities are small integers.
// But since we are updating everything to RpcParam, let's use Int if possible or String.
// The previous implementation used string. Let's stick to string for now or try Int.
// Actually, f.set_priority likely takes an integer.
let target = format!("{}:f{}", payload.hash, payload.file_index);
let params = vec![
RpcParam::from(target.as_str()),
RpcParam::from(payload.priority as i64),
];
match client.call("f.set_priority", &params).await {
Ok(_) => {
let _ = client
.call(
"d.update_priorities",
&[RpcParam::from(payload.hash.as_str())],
)
.await;
(StatusCode::OK, "Priority updated").into_response()
}
Err(e) => (
StatusCode::INTERNAL_SERVER_ERROR,
format!("RPC error: {}", e),
)
.into_response(),
}
}
/// Set torrent label
#[utoipa::path(
post,
path = "/api/torrents/label",
request_body = SetLabelRequest,
responses(
(status = 200, description = "Label updated"),
(status = 500, description = "Internal server error")
)
)]
pub async fn set_label_handler(
State(state): State<AppState>,
Json(payload): Json<SetLabelRequest>,
) -> impl IntoResponse {
let client = xmlrpc::RtorrentClient::new(&state.scgi_socket_path);
let params = vec![
RpcParam::from(payload.hash.as_str()),
RpcParam::from(payload.label),
];
match client.call("d.custom1.set", &params).await {
Ok(_) => (StatusCode::OK, "Label updated").into_response(),
Err(e) => (
StatusCode::INTERNAL_SERVER_ERROR,
format!("RPC error: {}", e),
)
.into_response(),
}
}
/// Get global speed limits
#[utoipa::path(
get,
path = "/api/settings/global-limits",
responses(
(status = 200, description = "Current limits", body = GlobalLimitRequest),
(status = 500, description = "Internal server error")
)
)]
pub async fn get_global_limit_handler(State(state): State<AppState>) -> impl IntoResponse {
let client = xmlrpc::RtorrentClient::new(&state.scgi_socket_path);
// throttle.global_down.max_rate, throttle.global_up.max_rate
let down_fut = client.call("throttle.global_down.max_rate", &[]);
let up_fut = client.call("throttle.global_up.max_rate", &[]);
let down = match down_fut.await {
Ok(xml) => xmlrpc::parse_i64_response(&xml).unwrap_or(0),
Err(_) => -1,
};
let up = match up_fut.await {
Ok(xml) => xmlrpc::parse_i64_response(&xml).unwrap_or(0),
Err(_) => -1,
};
let resp = GlobalLimitRequest {
max_download_rate: Some(down),
max_upload_rate: Some(up),
};
(StatusCode::OK, Json(resp)).into_response()
}
/// Set global speed limits
#[utoipa::path(
post,
path = "/api/settings/global-limits",
request_body = GlobalLimitRequest,
responses(
(status = 200, description = "Limits updated"),
(status = 500, description = "Internal server error")
)
)]
pub async fn set_global_limit_handler(
State(state): State<AppState>,
Json(payload): Json<GlobalLimitRequest>,
) -> impl IntoResponse {
let client = xmlrpc::RtorrentClient::new(&state.scgi_socket_path);
// Use throttle.global_*.max_rate.set_kb which is more reliable than .set (which is buggy)
// The .set_kb method expects KB/s, so we convert bytes to KB
if let Some(down) = payload.max_download_rate {
// Convert bytes/s to KB/s (divide by 1024)
let down_kb = down / 1024;
tracing::info!(
"Setting download limit: {} bytes/s = {} KB/s",
down,
down_kb
);
// Use set_kb with empty string as first param (throttle name), then value
if let Err(e) = client
.call(
"throttle.global_down.max_rate.set_kb",
&[RpcParam::from(""), RpcParam::Int(down_kb)],
)
.await
{
tracing::error!("Failed to set download limit: {}", e);
return (
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to set down limit: {}", e),
)
.into_response();
}
}
if let Some(up) = payload.max_upload_rate {
// Convert bytes/s to KB/s
let up_kb = up / 1024;
tracing::info!("Setting upload limit: {} bytes/s = {} KB/s", up, up_kb);
if let Err(e) = client
.call(
"throttle.global_up.max_rate.set_kb",
&[RpcParam::from(""), RpcParam::Int(up_kb)],
)
.await
{
tracing::error!("Failed to set upload limit: {}", e);
return (
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to set up limit: {}", e),
)
.into_response();
}
}
(StatusCode::OK, "Limits updated").into_response()
}
pub async fn handle_timeout_error(err: BoxError) -> (StatusCode, &'static str) { pub async fn handle_timeout_error(err: BoxError) -> (StatusCode, &'static str) {
if err.is::<tower::timeout::error::Elapsed>() { if err.is::<tower::timeout::error::Elapsed>() {
(StatusCode::REQUEST_TIMEOUT, "Request timed out") (StatusCode::REQUEST_TIMEOUT, "Request timed out")
@@ -668,43 +50,20 @@ pub async fn handle_timeout_error(err: BoxError) -> (StatusCode, &'static str) {
} }
} }
// --- PUSH NOTIFICATION HANDLERS ---
#[cfg(feature = "push-notifications")] #[cfg(feature = "push-notifications")]
/// Get VAPID public key for push subscription
#[utoipa::path(
get,
path = "/api/push/public-key",
responses(
(status = 200, description = "VAPID public key", body = String)
)
)]
pub async fn get_push_public_key_handler( pub async fn get_push_public_key_handler(
State(state): State<AppState>, axum::extract::State(state): axum::extract::State<crate::AppState>,
) -> impl IntoResponse { ) -> impl IntoResponse {
let public_key = state.push_store.get_public_key(); let public_key = state.push_store.get_public_key();
(StatusCode::OK, Json(serde_json::json!({ "publicKey": public_key }))).into_response() (StatusCode::OK, axum::extract::Json(serde_json::json!({ "publicKey": public_key }))).into_response()
} }
#[cfg(feature = "push-notifications")] #[cfg(feature = "push-notifications")]
/// Subscribe to push notifications
#[utoipa::path(
post,
path = "/api/push/subscribe",
request_body = push::PushSubscription,
responses(
(status = 200, description = "Subscription saved"),
(status = 400, description = "Invalid subscription data")
)
)]
pub async fn subscribe_push_handler( pub async fn subscribe_push_handler(
State(state): State<AppState>, axum::extract::State(state): axum::extract::State<crate::AppState>,
Json(subscription): Json<push::PushSubscription>, axum::extract::Json(subscription): axum::extract::Json<crate::push::PushSubscription>,
) -> impl IntoResponse { ) -> impl IntoResponse {
tracing::info!("Received push subscription: {:?}", subscription); tracing::info!("Received push subscription: {:?}", subscription);
state.push_store.add_subscription(subscription).await; state.push_store.add_subscription(subscription).await;
(StatusCode::OK, "Subscription saved").into_response() (StatusCode::OK, "Subscription saved").into_response()
} }

View File

@@ -107,16 +107,6 @@ struct Args {
#[derive(OpenApi)] #[derive(OpenApi)]
#[openapi( #[openapi(
paths( paths(
handlers::add_torrent_handler,
handlers::handle_torrent_action,
handlers::get_version_handler,
handlers::get_files_handler,
handlers::get_peers_handler,
handlers::get_trackers_handler,
handlers::set_file_priority_handler,
handlers::set_label_handler,
handlers::get_global_limit_handler,
handlers::set_global_limit_handler,
handlers::get_push_public_key_handler, handlers::get_push_public_key_handler,
handlers::subscribe_push_handler, handlers::subscribe_push_handler,
handlers::auth::login_handler, handlers::auth::login_handler,
@@ -156,16 +146,6 @@ struct ApiDoc;
#[derive(OpenApi)] #[derive(OpenApi)]
#[openapi( #[openapi(
paths( paths(
handlers::add_torrent_handler,
handlers::handle_torrent_action,
handlers::get_version_handler,
handlers::get_files_handler,
handlers::get_peers_handler,
handlers::get_trackers_handler,
handlers::set_file_priority_handler,
handlers::set_label_handler,
handlers::get_global_limit_handler,
handlers::set_global_limit_handler,
handlers::auth::login_handler, handlers::auth::login_handler,
handlers::auth::logout_handler, handlers::auth::logout_handler,
handlers::auth::check_auth_handler, handlers::auth::check_auth_handler,
@@ -336,7 +316,7 @@ async fn main() {
}; };
#[cfg(not(feature = "push-notifications"))] #[cfg(not(feature = "push-notifications"))]
let push_store = (); let _push_store = ();
let notify_poll = Arc::new(tokio::sync::Notify::new()); let notify_poll = Arc::new(tokio::sync::Notify::new());
@@ -488,7 +468,8 @@ async fn main() {
#[cfg(feature = "swagger")] #[cfg(feature = "swagger")]
let app = app.merge(SwaggerUi::new("/swagger-ui").url("/api-docs/openapi.json", ApiDoc::openapi())); let app = app.merge(SwaggerUi::new("/swagger-ui").url("/api-docs/openapi.json", ApiDoc::openapi()));
// Setup & Auth Routes // Setup & Auth Routes (cookie-based, stay as REST)
let scgi_path_for_ctx = args.socket.clone();
let app = app let app = app
.route("/api/setup/status", get(handlers::setup::get_setup_status_handler)) .route("/api/setup/status", get(handlers::setup::get_setup_status_handler))
.route("/api/setup", post(handlers::setup::setup_handler)) .route("/api/setup", post(handlers::setup::setup_handler))
@@ -500,37 +481,21 @@ async fn main() {
) )
.route("/api/auth/logout", post(handlers::auth::logout_handler)) .route("/api/auth/logout", post(handlers::auth::logout_handler))
.route("/api/auth/check", get(handlers::auth::check_auth_handler)) .route("/api/auth/check", get(handlers::auth::check_auth_handler))
// App Routes
.route("/api/events", get(sse::sse_handler)) .route("/api/events", get(sse::sse_handler))
.route("/api/torrents/add", post(handlers::add_torrent_handler)) .route("/api/server_fns/{*fn_name}", post({
.route( let scgi_path = scgi_path_for_ctx.clone();
"/api/torrents/action", move |req: Request<Body>| {
post(handlers::handle_torrent_action), leptos_axum::handle_server_fns_with_context(
move || {
leptos::context::provide_context(shared::ServerContext {
scgi_socket_path: scgi_path.clone(),
});
},
req,
) )
.route("/api/system/version", get(handlers::get_version_handler)) }
.route( }))
"/api/torrents/{hash}/files", .fallback(handlers::static_handler);
get(handlers::get_files_handler),
)
.route(
"/api/torrents/{hash}/peers",
get(handlers::get_peers_handler),
)
.route(
"/api/torrents/{hash}/trackers",
get(handlers::get_trackers_handler),
)
.route(
"/api/torrents/files/priority",
post(handlers::set_file_priority_handler),
)
.route("/api/torrents/label", post(handlers::set_label_handler))
.route(
"/api/settings/global-limits",
get(handlers::get_global_limit_handler).post(handlers::set_global_limit_handler),
)
.route("/api/server_fns/{*fn_name}", post(leptos_axum::handle_server_fns))
.fallback(handlers::static_handler); // Serve static files for everything else
#[cfg(feature = "push-notifications")] #[cfg(feature = "push-notifications")]
let app = app let app = app

View File

@@ -1,5 +1,4 @@
use gloo_net::http::Request; use gloo_net::http::Request;
use shared::{AddTorrentRequest, TorrentActionRequest};
use thiserror::Error; use thiserror::Error;
#[derive(Debug, Error)] #[derive(Debug, Error)]
@@ -14,6 +13,8 @@ pub enum ApiError {
Unauthorized, Unauthorized,
#[error("Too many requests")] #[error("Too many requests")]
RateLimited, RateLimited,
#[error("Server function error: {0}")]
ServerFn(String),
} }
fn base_url() -> String { fn base_url() -> String {
@@ -130,13 +131,12 @@ pub mod settings {
use shared::GlobalLimitRequest; use shared::GlobalLimitRequest;
pub async fn set_global_limits(req: &GlobalLimitRequest) -> Result<(), ApiError> { pub async fn set_global_limits(req: &GlobalLimitRequest) -> Result<(), ApiError> {
Request::post(&format!("{}/settings/global-limits", base_url())) shared::server_fns::settings::set_global_limits(
.json(req) req.max_download_rate,
.map_err(|_| ApiError::Network)? req.max_upload_rate,
.send() )
.await .await
.map_err(|_| ApiError::Network)?; .map_err(|e| ApiError::ServerFn(e.to_string()))
Ok(())
} }
} }
@@ -168,30 +168,16 @@ pub mod torrent {
use super::*; use super::*;
pub async fn add(uri: &str) -> Result<(), ApiError> { pub async fn add(uri: &str) -> Result<(), ApiError> {
let req = AddTorrentRequest { shared::server_fns::torrent::add_torrent(uri.to_string())
uri: uri.to_string(),
};
Request::post(&format!("{}/torrents/add", base_url()))
.json(&req)
.map_err(|_| ApiError::Network)?
.send()
.await .await
.map_err(|_| ApiError::Network)?; .map_err(|e| ApiError::ServerFn(e.to_string()))
Ok(())
} }
pub async fn action(hash: &str, action: &str) -> Result<(), ApiError> { pub async fn action(hash: &str, action: &str) -> Result<(), ApiError> {
let req = TorrentActionRequest { shared::server_fns::torrent::torrent_action(hash.to_string(), action.to_string())
hash: hash.to_string(),
action: action.to_string(),
};
Request::post(&format!("{}/torrents/action", base_url()))
.json(&req)
.map_err(|_| ApiError::Network)?
.send()
.await .await
.map_err(|_| ApiError::Network)?; .map(|_| ())
Ok(()) .map_err(|e| ApiError::ServerFn(e.to_string()))
} }
pub async fn delete(hash: &str) -> Result<(), ApiError> { pub async fn delete(hash: &str) -> Result<(), ApiError> {
@@ -211,33 +197,18 @@ pub mod torrent {
} }
pub async fn set_label(hash: &str, label: &str) -> Result<(), ApiError> { pub async fn set_label(hash: &str, label: &str) -> Result<(), ApiError> {
use shared::SetLabelRequest; shared::server_fns::torrent::set_label(hash.to_string(), label.to_string())
let req = SetLabelRequest {
hash: hash.to_string(),
label: label.to_string(),
};
Request::post(&format!("{}/torrents/set_label", base_url()))
.json(&req)
.map_err(|_| ApiError::Network)?
.send()
.await .await
.map_err(|_| ApiError::Network)?; .map_err(|e| ApiError::ServerFn(e.to_string()))
Ok(())
} }
pub async fn set_priority(hash: &str, file_index: u32, priority: u8) -> Result<(), ApiError> { pub async fn set_priority(hash: &str, file_index: u32, priority: u8) -> Result<(), ApiError> {
use shared::SetFilePriorityRequest; shared::server_fns::torrent::set_file_priority(
let req = SetFilePriorityRequest { hash.to_string(),
hash: hash.to_string(),
file_index, file_index,
priority, priority,
}; )
Request::post(&format!("{}/torrents/set_priority", base_url()))
.json(&req)
.map_err(|_| ApiError::Network)?
.send()
.await .await
.map_err(|_| ApiError::Network)?; .map_err(|e| ApiError::ServerFn(e.to_string()))
Ok(())
} }
} }

View File

@@ -9,6 +9,11 @@ pub mod xmlrpc;
pub mod server_fns; pub mod server_fns;
#[derive(Clone, Debug)]
pub struct ServerContext {
pub scgi_socket_path: String,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, ToSchema)] #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, ToSchema)]
pub struct Torrent { pub struct Torrent {
pub hash: String, pub hash: String,

View File

@@ -1,26 +1,2 @@
use leptos::*; pub mod torrent;
use leptos::prelude::*; pub mod settings;
#[cfg(feature = "ssr")]
use crate::xmlrpc::{self, RtorrentClient};
#[server(GetVersion, "/api/server_fns")]
pub async fn get_version() -> Result<String, ServerFnError> {
let socket_path = std::env::var("RTORRENT_SOCKET").unwrap_or_else(|_| "/tmp/rtorrent.sock".to_string());
#[cfg(feature = "ssr")]
{
let client = RtorrentClient::new(&socket_path);
match client.call("system.client_version", &[]).await {
Ok(xml) => {
let version = xmlrpc::parse_string_response(&xml).unwrap_or(xml);
Ok(version)
},
Err(e) => Err(ServerFnError::ServerError(e.to_string())),
}
}
#[cfg(not(feature = "ssr"))]
{
unreachable!()
}
}

View File

@@ -0,0 +1,58 @@
use leptos::prelude::*;
use crate::GlobalLimitRequest;
#[server(GetGlobalLimits, "/api/server_fns")]
pub async fn get_global_limits() -> Result<GlobalLimitRequest, ServerFnError> {
use crate::xmlrpc::{self, RtorrentClient};
let ctx = expect_context::<crate::ServerContext>();
let client = RtorrentClient::new(&ctx.scgi_socket_path);
let down = match client.call("throttle.global_down.max_rate", &[]).await {
Ok(xml) => xmlrpc::parse_i64_response(&xml).unwrap_or(0),
Err(_) => -1,
};
let up = match client.call("throttle.global_up.max_rate", &[]).await {
Ok(xml) => xmlrpc::parse_i64_response(&xml).unwrap_or(0),
Err(_) => -1,
};
Ok(GlobalLimitRequest {
max_download_rate: Some(down),
max_upload_rate: Some(up),
})
}
#[server(SetGlobalLimits, "/api/server_fns")]
pub async fn set_global_limits(
max_download_rate: Option<i64>,
max_upload_rate: Option<i64>,
) -> Result<(), ServerFnError> {
use crate::xmlrpc::{RpcParam, RtorrentClient};
let ctx = expect_context::<crate::ServerContext>();
let client = RtorrentClient::new(&ctx.scgi_socket_path);
if let Some(down) = max_download_rate {
let down_kb = down / 1024;
client
.call(
"throttle.global_down.max_rate.set_kb",
&[RpcParam::from(""), RpcParam::Int(down_kb)],
)
.await
.map_err(|e| ServerFnError::new(format!("Failed to set down limit: {}", e)))?;
}
if let Some(up) = max_upload_rate {
let up_kb = up / 1024;
client
.call(
"throttle.global_up.max_rate.set_kb",
&[RpcParam::from(""), RpcParam::Int(up_kb)],
)
.await
.map_err(|e| ServerFnError::new(format!("Failed to set up limit: {}", e)))?;
}
Ok(())
}

View File

@@ -0,0 +1,274 @@
use leptos::prelude::*;
use crate::{TorrentFile, TorrentPeer, TorrentTracker};
#[server(AddTorrent, "/api/server_fns")]
pub async fn add_torrent(uri: String) -> Result<(), ServerFnError> {
use crate::xmlrpc::{RpcParam, RtorrentClient};
let ctx = expect_context::<crate::ServerContext>();
let client = RtorrentClient::new(&ctx.scgi_socket_path);
let params = vec![RpcParam::from(""), RpcParam::from(uri.as_str())];
match client.call("load.start", &params).await {
Ok(response) => {
if response.contains("faultCode") {
return Err(ServerFnError::new("rTorrent returned fault"));
}
Ok(())
}
Err(e) => Err(ServerFnError::new(format!("Failed to add torrent: {}", e))),
}
}
#[server(TorrentAction, "/api/server_fns")]
pub async fn torrent_action(hash: String, action: String) -> Result<String, ServerFnError> {
use crate::xmlrpc::{RpcParam, RtorrentClient};
let ctx = expect_context::<crate::ServerContext>();
let client = RtorrentClient::new(&ctx.scgi_socket_path);
if action == "delete_with_data" {
return delete_torrent_with_data_inner(&client, &hash).await;
}
let method = match action.as_str() {
"start" => "d.start",
"stop" => "d.stop",
"delete" => "d.erase",
_ => return Err(ServerFnError::new("Invalid action")),
};
let params = vec![RpcParam::from(hash.as_str())];
match client.call(method, &params).await {
Ok(_) => Ok("Action executed".to_string()),
Err(e) => Err(ServerFnError::new(format!("RPC error: {}", e))),
}
}
#[cfg(feature = "ssr")]
async fn delete_torrent_with_data_inner(
client: &crate::xmlrpc::RtorrentClient,
hash: &str,
) -> Result<String, ServerFnError> {
use crate::xmlrpc::{parse_string_response, RpcParam};
let params_hash = vec![RpcParam::from(hash)];
let path_xml = client
.call("d.base_path", &params_hash)
.await
.map_err(|e| ServerFnError::new(format!("Failed to call rTorrent: {}", e)))?;
let path = parse_string_response(&path_xml)
.map_err(|e| ServerFnError::new(format!("Failed to parse path: {}", e)))?;
let root_xml = client
.call("directory.default", &[])
.await
.map_err(|e| ServerFnError::new(format!("Failed to get download root: {}", e)))?;
let root_path_str = parse_string_response(&root_xml)
.map_err(|e| ServerFnError::new(format!("Failed to parse root path: {}", e)))?;
let root_path = tokio::fs::canonicalize(std::path::Path::new(&root_path_str))
.await
.map_err(|e| ServerFnError::new(format!("Invalid download root: {}", e)))?;
let target_path_raw = std::path::Path::new(&path);
if !tokio::fs::try_exists(target_path_raw).await.unwrap_or(false) {
client
.call("d.erase", &params_hash)
.await
.map_err(|e| ServerFnError::new(format!("Failed to erase torrent: {}", e)))?;
return Ok("Torrent removed (Data not found)".to_string());
}
let target_path = tokio::fs::canonicalize(target_path_raw)
.await
.map_err(|e| ServerFnError::new(format!("Invalid data path: {}", e)))?;
if !target_path.starts_with(&root_path) {
return Err(ServerFnError::new(
"Security Error: Cannot delete files outside download directory",
));
}
if target_path == root_path {
return Err(ServerFnError::new(
"Security Error: Cannot delete the download root directory",
));
}
client
.call("d.erase", &params_hash)
.await
.map_err(|e| ServerFnError::new(format!("Failed to erase torrent: {}", e)))?;
let delete_result = if target_path.is_dir() {
tokio::fs::remove_dir_all(&target_path).await
} else {
tokio::fs::remove_file(&target_path).await
};
match delete_result {
Ok(_) => Ok("Torrent and data deleted".to_string()),
Err(e) => Err(ServerFnError::new(format!("Failed to delete data: {}", e))),
}
}
#[server(GetFiles, "/api/server_fns")]
pub async fn get_files(hash: String) -> Result<Vec<TorrentFile>, ServerFnError> {
use crate::xmlrpc::{parse_multicall_response, RpcParam, RtorrentClient};
let ctx = expect_context::<crate::ServerContext>();
let client = RtorrentClient::new(&ctx.scgi_socket_path);
let params = vec![
RpcParam::from(hash.as_str()),
RpcParam::from(""),
RpcParam::from("f.path="),
RpcParam::from("f.size_bytes="),
RpcParam::from("f.completed_chunks="),
RpcParam::from("f.priority="),
];
let xml = client
.call("f.multicall", &params)
.await
.map_err(|e| ServerFnError::new(format!("RPC error: {}", e)))?;
let rows = parse_multicall_response(&xml)
.map_err(|e| ServerFnError::new(format!("Parse error: {}", e)))?;
Ok(rows
.into_iter()
.enumerate()
.map(|(idx, row)| TorrentFile {
index: idx as u32,
path: row.get(0).cloned().unwrap_or_default(),
size: row.get(1).and_then(|s| s.parse().ok()).unwrap_or(0),
completed_chunks: row.get(2).and_then(|s| s.parse().ok()).unwrap_or(0),
priority: row.get(3).and_then(|s| s.parse().ok()).unwrap_or(0),
})
.collect())
}
#[server(GetPeers, "/api/server_fns")]
pub async fn get_peers(hash: String) -> Result<Vec<TorrentPeer>, ServerFnError> {
use crate::xmlrpc::{parse_multicall_response, RpcParam, RtorrentClient};
let ctx = expect_context::<crate::ServerContext>();
let client = RtorrentClient::new(&ctx.scgi_socket_path);
let params = vec![
RpcParam::from(hash.as_str()),
RpcParam::from(""),
RpcParam::from("p.address="),
RpcParam::from("p.client_version="),
RpcParam::from("p.down_rate="),
RpcParam::from("p.up_rate="),
RpcParam::from("p.completed_percent="),
];
let xml = client
.call("p.multicall", &params)
.await
.map_err(|e| ServerFnError::new(format!("RPC error: {}", e)))?;
let rows = parse_multicall_response(&xml)
.map_err(|e| ServerFnError::new(format!("Parse error: {}", e)))?;
Ok(rows
.into_iter()
.map(|row| TorrentPeer {
ip: row.get(0).cloned().unwrap_or_default(),
client: row.get(1).cloned().unwrap_or_default(),
down_rate: row.get(2).and_then(|s| s.parse().ok()).unwrap_or(0),
up_rate: row.get(3).and_then(|s| s.parse().ok()).unwrap_or(0),
progress: row.get(4).and_then(|s| s.parse().ok()).unwrap_or(0.0),
})
.collect())
}
#[server(GetTrackers, "/api/server_fns")]
pub async fn get_trackers(hash: String) -> Result<Vec<TorrentTracker>, ServerFnError> {
use crate::xmlrpc::{parse_multicall_response, RpcParam, RtorrentClient};
let ctx = expect_context::<crate::ServerContext>();
let client = RtorrentClient::new(&ctx.scgi_socket_path);
let params = vec![
RpcParam::from(hash.as_str()),
RpcParam::from(""),
RpcParam::from("t.url="),
RpcParam::from("t.activity_date_last="),
RpcParam::from("t.message="),
];
let xml = client
.call("t.multicall", &params)
.await
.map_err(|e| ServerFnError::new(format!("RPC error: {}", e)))?;
let rows = parse_multicall_response(&xml)
.map_err(|e| ServerFnError::new(format!("Parse error: {}", e)))?;
Ok(rows
.into_iter()
.map(|row| TorrentTracker {
url: row.get(0).cloned().unwrap_or_default(),
status: "Unknown".to_string(),
message: row.get(2).cloned().unwrap_or_default(),
})
.collect())
}
#[server(SetFilePriority, "/api/server_fns")]
pub async fn set_file_priority(
hash: String,
file_index: u32,
priority: u8,
) -> Result<(), ServerFnError> {
use crate::xmlrpc::{RpcParam, RtorrentClient};
let ctx = expect_context::<crate::ServerContext>();
let client = RtorrentClient::new(&ctx.scgi_socket_path);
let target = format!("{}:f{}", hash, file_index);
let params = vec![
RpcParam::from(target.as_str()),
RpcParam::from(priority as i64),
];
client
.call("f.set_priority", &params)
.await
.map_err(|e| ServerFnError::new(format!("RPC error: {}", e)))?;
let _ = client
.call("d.update_priorities", &[RpcParam::from(hash.as_str())])
.await;
Ok(())
}
#[server(SetLabel, "/api/server_fns")]
pub async fn set_label(hash: String, label: String) -> Result<(), ServerFnError> {
use crate::xmlrpc::{RpcParam, RtorrentClient};
let ctx = expect_context::<crate::ServerContext>();
let client = RtorrentClient::new(&ctx.scgi_socket_path);
let params = vec![RpcParam::from(hash.as_str()), RpcParam::from(label)];
client
.call("d.custom1.set", &params)
.await
.map_err(|e| ServerFnError::new(format!("RPC error: {}", e)))?;
Ok(())
}
#[server(GetVersion, "/api/server_fns")]
pub async fn get_version() -> Result<String, ServerFnError> {
use crate::xmlrpc::{parse_string_response, RtorrentClient};
let ctx = expect_context::<crate::ServerContext>();
let client = RtorrentClient::new(&ctx.scgi_socket_path);
match client.call("system.client_version", &[]).await {
Ok(xml) => {
let version = parse_string_response(&xml).unwrap_or(xml);
Ok(version)
}
Err(e) => Err(ServerFnError::new(format!("Failed to get version: {}", e))),
}
}

View File

@@ -1 +0,0 @@
{"v":1}

View File

@@ -1,6 +0,0 @@
{
"git": {
"sha1": "831c97016aa3d8f7851999aa1deea8407e7cbd42"
},
"path_in_vcs": ""
}

View File

@@ -1,8 +0,0 @@
version: 2
updates:
- package-ecosystem: cargo
directory: "/"
schedule:
interval: daily
time: "04:00"
open-pull-requests-limit: 10

View File

@@ -1,17 +0,0 @@
name: Close inactive issues
on:
schedule:
- cron: "30 1 * * *"
jobs:
close-issues:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v9
with:
stale-issue-message: "This issue is stale because it has been open for 30 days with no activity."
close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
repo-token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,4 +0,0 @@
target
Cargo.lock
.vscode
zig-cache

View File

@@ -1,82 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
name = "coarsetime"
version = "0.1.37"
authors = ["Frank Denis <github@pureftpd.org>"]
build = false
autolib = false
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = "Time and duration crate optimized for speed"
homepage = "https://github.com/jedisct1/rust-coarsetime"
readme = "README.md"
keywords = [
"time",
"date",
"duration",
]
categories = [
"concurrency",
"date-and-time",
"os",
]
license = "BSD-2-Clause"
repository = "https://github.com/jedisct1/rust-coarsetime"
[features]
wasi-abi2 = ["dep:wasi-abi2"]
[lib]
name = "coarsetime"
path = "src/lib.rs"
[[bench]]
name = "benchmark"
path = "benches/benchmark.rs"
harness = false
[dev-dependencies.benchmark-simple]
version = "0.1.10"
[dependencies.portable-atomic]
version = "1.6"
[target.'cfg(all(any(target_arch = "wasm32", target_arch = "wasm64"), target_os = "unknown"))'.dependencies.wasm-bindgen]
version = "0.2"
[target.'cfg(any(target_os = "wasix", target_os = "wasi"))'.dependencies.wasix]
version = "0.13"
[target.'cfg(not(any(target_os = "wasix", target_os = "wasi")))'.dependencies.libc]
version = "0.2"
[target.'cfg(target_os = "wasi")'.dependencies.wasi-abi2]
version = "0.14.7"
optional = true
package = "wasi"
[profile.bench]
codegen-units = 1
[profile.dev]
overflow-checks = true
[profile.release]
opt-level = 3
lto = true
codegen-units = 1
panic = "abort"
incremental = false

View File

@@ -1,47 +0,0 @@
[package]
name = "coarsetime"
version = "0.1.37"
description = "Time and duration crate optimized for speed"
authors = ["Frank Denis <github@pureftpd.org>"]
keywords = ["time", "date", "duration"]
readme = "README.md"
license = "BSD-2-Clause"
homepage = "https://github.com/jedisct1/rust-coarsetime"
repository = "https://github.com/jedisct1/rust-coarsetime"
categories = ["concurrency", "date-and-time", "os"]
edition = "2018"
[features]
wasi-abi2 = ["dep:wasi-abi2"]
[target.'cfg(not(any(target_os = "wasix", target_os = "wasi")))'.dependencies]
libc = "0.2"
[target.'cfg(target_os = "wasi")'.dependencies]
wasi-abi2 = { package = "wasi", version = "0.14.7", optional = true }
[target.'cfg(any(target_os = "wasix", target_os = "wasi"))'.dependencies]
wasix = "0.13"
[target.'cfg(all(any(target_arch = "wasm32", target_arch = "wasm64"), target_os = "unknown"))'.dependencies]
wasm-bindgen = "0.2"
[dev-dependencies]
benchmark-simple = "0.1.10"
[profile.bench]
codegen-units = 1
[[bench]]
name = "benchmark"
harness = false
[profile.release]
lto = true
panic = "abort"
opt-level = 3
codegen-units = 1
incremental = false
[profile.dev]
overflow-checks=true

View File

@@ -1,25 +0,0 @@
BSD 2-Clause License
Copyright (c) 2016-2026, Frank Denis
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,90 +0,0 @@
[![Documentation](https://docs.rs/coarsetime/badge.svg)](https://docs.rs/coarsetime)
[![Windows build status](https://ci.appveyor.com/api/projects/status/xlbhk9850dvl5ylh?svg=true)](https://ci.appveyor.com/project/jedisct1/rust-coarsetime)
# coarsetime
A Rust crate to make time measurements, that focuses on speed, API stability and portability.
This crate is a partial replacement for the `Time` and `Duration` structures
from the standard library, with the following differences:
* Speed is privileged over accuracy. In particular, `CLOCK_MONOTONIC_COARSE` is
used to retrieve the clock value on Linux systems, and transformations avoid
operations that can be slow on non-Intel systems.
* The number of system calls can be kept to a minimum. The "most recent
timestamp" is always kept in memory. It can be read with just a load operation,
and can be updated only as frequently as necessary.
* The API is stable, and the same for all platforms. Unlike the standard library, it doesn't silently compile functions that do nothing but panic at runtime on some platforms.
# Installation
`coarsetime` is available on [crates.io](https://crates.io/crates/coarsetime)
and works on Rust stable, beta, and nightly.
Windows and Unix-like systems are supported.
Available feature:
* `wasi-abi2`: when targeting WASI, use the second preview of the ABI. Default is to use the regular WASI-core ABI.
# Documentation
[API documentation](https://docs.rs/coarsetime)
# Example
```rust
extern crate coarsetime;
use coarsetime::{Duration, Instant, Updater};
// Get the current instant. This may require a system call, but it may also
// be faster than the stdlib equivalent.
let now = Instant::now();
// Get the latest known instant. This operation is super fast.
// In this case, the value will be identical to `now`, because we haven't
// updated the latest known instant yet.
let ts1 = Instant::recent();
// Update the latest known instant. This may require a system call.
// Note that a call to `Instant::now()` also updates the stored instant.
Instant::update();
// Now, we may get a different instant. This call is also super fast.
let ts2 = Instant::recent();
// Compute the time elapsed between ts2 and ts1.
let elapsed_ts2_ts1 = ts2.duration_since(ts1);
// Operations such as `+` and `-` between `Instant` and `Duration` are also
// available.
let elapsed_ts2_ts1 = ts2 - ts1;
// Returns the time elapsed since ts1.
// This retrieves the actual current time, and may require a system call.
let elapsed_since_ts1 = ts1.elapsed();
// Returns the approximate time elapsed since ts1.
// This uses the latest known instant, and is super fast.
let elapsed_since_recent = ts1.elapsed_since_recent();
// Instant::update() should be called periodically, for example using an
// event loop. Alternatively, the crate provides an easy way to spawn a
// background task that will periodically update the latest known instant.
// Here, the update will happen every 250ms.
let updater = Updater::new(250).start().unwrap();
// From now on, Instant::recent() will always return an approximation of the
// current instant.
let ts3 = Instant::recent();
// Stop the task.
updater.stop().unwrap();
// Returns the elapsed time since the UNIX epoch
let unix_timestamp = Clock::now_since_epoch();
// Returns an approximation of the elapsed time since the UNIX epoch, based on
// the latest time update
let unix_timestamp_approx = Clock::recent_since_epoch();
```

View File

@@ -1,61 +0,0 @@
use benchmark_simple::*;
use coarsetime::*;
use std::time;
fn main() {
let options = &Options {
iterations: 250_000,
warmup_iterations: 25_000,
min_samples: 5,
max_samples: 10,
max_rsd: 1.0,
..Default::default()
};
bench_coarsetime_now(options);
bench_coarsetime_recent(options);
bench_coarsetime_elapsed(options);
bench_coarsetime_elapsed_since_recent(options);
bench_stdlib_now(options);
bench_stdlib_elapsed(options);
}
fn bench_coarsetime_now(options: &Options) {
let b = Bench::new();
Instant::update();
let res = b.run(options, Instant::now);
println!("coarsetime_now(): {}", res.throughput(1));
}
fn bench_coarsetime_recent(options: &Options) {
let b = Bench::new();
Instant::update();
let res = b.run(options, Instant::recent);
println!("coarsetime_recent(): {}", res.throughput(1));
}
fn bench_coarsetime_elapsed(options: &Options) {
let b = Bench::new();
let ts = Instant::now();
let res = b.run(options, || ts.elapsed());
println!("coarsetime_elapsed(): {}", res.throughput(1));
}
fn bench_coarsetime_elapsed_since_recent(options: &Options) {
let b = Bench::new();
let ts = Instant::now();
let res = b.run(options, || ts.elapsed_since_recent());
println!("coarsetime_since_recent(): {}", res.throughput(1));
}
fn bench_stdlib_now(options: &Options) {
let b = Bench::new();
let res = b.run(options, time::Instant::now);
println!("stdlib_now(): {}", res.throughput(1));
}
fn bench_stdlib_elapsed(options: &Options) {
let b = Bench::new();
let ts = time::Instant::now();
let res = b.run(options, || ts.elapsed());
println!("stdlib_elapsed(): {}", res.throughput(1));
}

View File

@@ -1,93 +0,0 @@
#[cfg(not(all(
any(target_arch = "wasm32", target_arch = "wasm64"),
target_os = "unknown"
)))]
use std::time;
#[cfg(target_has_atomic = "64")]
use std::sync::atomic::{AtomicU64, Ordering};
#[cfg(not(target_has_atomic = "64"))]
use portable_atomic::{AtomicU64, Ordering};
use super::Duration;
static RECENT: AtomicU64 = AtomicU64::new(0);
#[cfg(all(
any(target_arch = "wasm32", target_arch = "wasm64"),
target_os = "unknown"
))]
mod js_imports {
use wasm_bindgen::prelude::*;
#[wasm_bindgen]
extern "C" {
pub type Date;
#[wasm_bindgen(static_method_of = Date)]
pub fn now() -> f64;
}
}
/// System time
#[derive(Debug)]
pub struct Clock;
/// Alias for `Duration`.
pub type UnixTimeStamp = Duration;
impl Clock {
/// Returns the elapsed time since the UNIX epoch
#[inline]
pub fn now_since_epoch() -> UnixTimeStamp {
Duration::from_u64(unix_ts())
}
/// Returns the elapsed time since the UNIX epoch, based on the latest
/// explicit time update
#[inline]
pub fn recent_since_epoch() -> UnixTimeStamp {
Duration::from_u64(RECENT.load(Ordering::Relaxed))
}
/// Updates the cached system time.
///
/// This function should be called frequently, for example in an event loop
/// or using an `Updater` task.
#[inline]
pub fn update() {
let now = unix_ts();
RECENT.store(now, Ordering::Relaxed)
}
/// Sets the cached system time to the specified timestamp.
/// This function is intended for testing purposes only.
/// It should not be used in production code.
pub fn set_recent_since_epoch(recent: UnixTimeStamp) {
RECENT.store(recent.as_u64(), Ordering::Relaxed)
}
}
#[cfg(all(
any(target_arch = "wasm32", target_arch = "wasm64"),
target_os = "unknown"
))]
#[inline]
fn unix_ts() -> u64 {
let unix_ts_now_sys = (js_imports::Date::now() / 1000.0).round() as u64;
let unix_ts_now = Duration::from_secs(unix_ts_now_sys);
unix_ts_now.as_u64()
}
#[cfg(not(all(
any(target_arch = "wasm32", target_arch = "wasm64"),
target_os = "unknown"
)))]
#[inline]
fn unix_ts() -> u64 {
let unix_ts_now_sys = time::SystemTime::now()
.duration_since(time::UNIX_EPOCH)
.expect("The system clock is not properly set");
let unix_ts_now = Duration::from(unix_ts_now_sys);
unix_ts_now.as_u64()
}

View File

@@ -1,270 +0,0 @@
use std::convert::From;
use std::ops::*;
use std::time;
use super::helpers::*;
/// A duration type to represent an approximate span of time
#[derive(Copy, Clone, Debug, Hash, Ord, Eq, PartialOrd, PartialEq, Default)]
pub struct Duration(u64);
impl Duration {
/// Creates a new `Duration` from the specified number of seconds and
/// additional nanosecond precision
#[inline]
pub const fn new(sec: u64, nanos: u32) -> Duration {
Duration(_timespec_to_u64(sec, nanos))
}
/// Creates a new Duration from the specified number of days
#[inline]
pub const fn from_days(days: u64) -> Duration {
Duration(_sec_to_u64(days * 86400))
}
/// Creates a new Duration from the specified number of hours
#[inline]
pub const fn from_hours(hours: u64) -> Duration {
Duration(_sec_to_u64(hours * 3600))
}
/// Creates a new Duration from the specified number of minutes
#[inline]
pub const fn from_mins(mins: u64) -> Duration {
Duration(_sec_to_u64(mins * 60))
}
/// Creates a new Duration from the specified number of seconds
#[inline]
pub const fn from_secs(secs: u64) -> Duration {
Duration(_sec_to_u64(secs))
}
/// Creates a new Duration from the specified number of milliseconds
#[inline]
pub const fn from_millis(millis: u64) -> Duration {
Duration(_millis_to_u64(millis))
}
/// Returns the number of days represented by this duration
#[inline]
pub const fn as_days(&self) -> u64 {
self.as_secs() / 86400
}
/// Returns the number of hours represented by this duration
#[inline]
pub const fn as_hours(&self) -> u64 {
self.as_secs() / 3600
}
/// Returns the number of minutes represented by this duration
#[inline]
pub const fn as_mins(&self) -> u64 {
self.as_secs() / 60
}
/// Returns the number of whole seconds represented by this duration
#[inline]
pub const fn as_secs(&self) -> u64 {
self.0 >> 32
}
/// Returns the number of whole milliseconds represented by this duration
#[inline]
pub const fn as_millis(&self) -> u64 {
((self.0 as u128 * 125) >> 29) as u64
}
/// Returns the number of whole microseconds represented by this duration
#[inline]
pub const fn as_micros(&self) -> u64 {
((self.0 as u128 * 125_000) >> 29) as u64
}
/// Returns the number of whole nanoseconds represented by this duration
#[inline]
pub const fn as_nanos(&self) -> u64 {
((self.0 as u128 * 125_000_000) >> 29) as u64
}
/// Returns the nanosecond precision represented by this duration
#[inline]
pub const fn subsec_nanos(&self) -> u32 {
((self.0 as u32 as u64 * 125_000_000) >> 29) as u32
}
/// Return this duration as a number of "ticks".
///
/// Note that length of a 'tick' is not guaranteed to represent
/// the same amount of time across different platforms, or from
/// one version of `coarsetime` to another.
#[inline]
pub const fn as_ticks(&self) -> u64 {
self.as_u64()
}
/// Creates a new Duration from the specified number of "ticks".
///
/// Note that length of a 'tick' is not guaranteed to represent
/// the same amount of time across different platforms, or from
/// one version of `coarsetime` to another.
#[inline]
pub const fn from_ticks(ticks: u64) -> Duration {
Self::from_u64(ticks)
}
#[doc(hidden)]
#[inline]
pub const fn as_u64(&self) -> u64 {
self.0
}
#[doc(hidden)]
#[inline]
pub const fn from_u64(ts: u64) -> Duration {
Duration(ts)
}
/// Returns the duration as a floating point number, representing the number
/// of seconds
#[inline]
pub fn as_f64(&self) -> f64 {
(self.0 as f64) / ((1u64 << 32) as f64)
}
/// Returns the absolute difference between two `Duration`s
#[inline]
pub const fn abs_diff(&self, other: Duration) -> Duration {
Duration(self.0.abs_diff(other.0))
}
/// Add two durations, saturating on overflow
#[inline]
pub const fn saturating_add(self, rhs: Duration) -> Duration {
Duration(self.0.saturating_add(rhs.0))
}
/// Add two durations, returning `None` on overflow
#[inline]
pub fn checked_add(self, rhs: Duration) -> Option<Duration> {
self.0.checked_add(rhs.0).map(Duration)
}
/// Subtract two durations, saturating on underflow/overflow
#[inline]
pub const fn saturating_sub(self, rhs: Duration) -> Duration {
Duration(self.0.saturating_sub(rhs.0))
}
/// Subtract two durations, returning `None` on underflow/overflow
#[inline]
pub fn checked_sub(self, rhs: Duration) -> Option<Duration> {
self.0.checked_sub(rhs.0).map(Duration)
}
/// Multiply a duration by a scalar, saturating on overflow
#[inline]
pub const fn saturating_mul(self, rhs: u32) -> Duration {
Duration(self.0.saturating_mul(rhs as u64))
}
/// Multiply a duration by a scalar, returning `None` on overflow
#[inline]
pub fn checked_mul(self, rhs: u32) -> Option<Duration> {
self.0.checked_mul(rhs as u64).map(Duration)
}
/// Divide a duration by a scalar, returning `None` for division by zero
#[inline]
pub fn checked_div(self, rhs: u32) -> Option<Duration> {
self.0.checked_div(rhs as u64).map(Duration)
}
}
#[doc(hidden)]
impl From<u64> for Duration {
#[doc(hidden)]
#[inline]
fn from(ts: u64) -> Duration {
Duration::from_u64(ts)
}
}
impl Add for Duration {
type Output = Duration;
#[inline]
fn add(self, rhs: Duration) -> Duration {
Duration(self.0 + rhs.0)
}
}
impl AddAssign for Duration {
#[inline]
fn add_assign(&mut self, rhs: Duration) {
*self = *self + rhs;
}
}
impl Sub for Duration {
type Output = Duration;
#[inline]
fn sub(self, rhs: Duration) -> Duration {
Duration(self.0 - rhs.0)
}
}
impl SubAssign for Duration {
#[inline]
fn sub_assign(&mut self, rhs: Duration) {
*self = *self - rhs;
}
}
impl Mul<u32> for Duration {
type Output = Duration;
#[inline]
fn mul(self, rhs: u32) -> Duration {
Duration(self.0 * rhs as u64)
}
}
impl MulAssign<u32> for Duration {
#[inline]
fn mul_assign(&mut self, rhs: u32) {
*self = *self * rhs;
}
}
impl Div<u32> for Duration {
type Output = Duration;
#[inline]
fn div(self, rhs: u32) -> Duration {
Duration(self.0 / rhs as u64)
}
}
impl DivAssign<u32> for Duration {
#[inline]
fn div_assign(&mut self, rhs: u32) {
*self = *self / rhs;
}
}
impl From<Duration> for time::Duration {
#[inline]
fn from(duration: Duration) -> time::Duration {
time::Duration::new(duration.as_secs(), duration.subsec_nanos())
}
}
impl From<time::Duration> for Duration {
#[inline]
fn from(duration_sys: time::Duration) -> Duration {
Duration::new(duration_sys.as_secs(), duration_sys.subsec_nanos())
}
}

View File

@@ -1,26 +0,0 @@
#[inline]
pub const fn _sec_to_u64(sec: u64) -> u64 {
sec.saturating_mul(1 << 32)
}
#[inline]
pub const fn _millis_to_u64(millis: u64) -> u64 {
let secs = millis / 1_000;
secs.saturating_mul(1 << 32) | ((millis - secs * 1_000) << 22)
}
#[inline]
pub const fn _nsecs_to_u64(nsecs: u64) -> u64 {
let secs = nsecs / 1_000_000_000;
_timespec_to_u64(secs, (nsecs - secs * 1_000_000_000) as u32)
}
#[inline]
pub const fn _timespec_to_u64(tp_sec: u64, tp_nsec: u32) -> u64 {
tp_sec.saturating_mul(1 << 32) | ((tp_nsec as u64 * 9_223_372_037) >> 31)
}
#[inline]
pub const fn _timeval_to_u64(tv_sec: u64, tv_usec: u32) -> u64 {
tv_sec.saturating_mul(1 << 32) | ((tv_usec as u64 * 9_223_372_036_855) >> 31)
}

View File

@@ -1,335 +0,0 @@
#[allow(unused_imports)]
use std::mem::MaybeUninit;
use std::ops::*;
#[allow(unused_imports)]
use std::ptr::*;
#[cfg(target_has_atomic = "64")]
use std::sync::atomic::{AtomicU64, Ordering};
#[cfg(not(target_has_atomic = "64"))]
use portable_atomic::{AtomicU64, Ordering};
use super::duration::*;
#[allow(unused_imports)]
use super::helpers::*;
/// A measurement of a *monotonically* increasing clock.
/// Opaque and useful only with `Duration`.
///
/// Resulting durations are actual durations; they do not get affected by
/// clock adjustments, leap seconds, or similar.
/// In order to get a measurement of the *wall clock*, use `Date` instead.
#[derive(Copy, Clone, Debug, Hash, Ord, Eq, PartialOrd, PartialEq)]
pub struct Instant(u64);
static RECENT: AtomicU64 = AtomicU64::new(0);
#[cfg(windows)]
extern "system" {
pub fn GetTickCount64() -> libc::c_ulonglong;
}
#[cfg(any(target_os = "macos", target_os = "freebsd"))]
#[allow(non_camel_case_types)]
type clockid_t = libc::c_int;
#[cfg(target_os = "macos")]
const CLOCK_MONOTONIC_RAW_APPROX: clockid_t = 5;
#[cfg(target_os = "macos")]
extern "system" {
fn clock_gettime_nsec_np(clk_id: clockid_t) -> u64;
}
#[cfg(target_os = "freebsd")]
const CLOCK_MONOTONIC_FAST: clockid_t = 12;
#[cfg(all(
any(target_arch = "wasm32", target_arch = "wasm64"),
target_os = "unknown"
))]
mod js_imports {
use wasm_bindgen::prelude::*;
#[wasm_bindgen]
extern "C" {
#[allow(non_camel_case_types)]
pub type performance;
#[wasm_bindgen(static_method_of = performance)]
pub fn now() -> f64;
}
}
impl Instant {
/// Returns an instant corresponding to "now"
///
/// This function also updates the stored instant.
pub fn now() -> Instant {
let now = Self::_now();
Self::_update(now);
Instant(now)
}
/// Returns an instant corresponding to "now" without updating the cached value.
/// After this, `recent()` will still return the old instant.
///
/// `now()` is generally preferred over this function.
pub fn now_without_cache_update() -> Instant {
let now = Self::_now();
Instant(now)
}
/// Returns an instant corresponding to the latest update
pub fn recent() -> Instant {
match Self::_recent() {
0 => Instant::now(),
recent => Instant(recent),
}
}
/// Update the stored instant
///
/// This function should be called frequently, for example in an event loop
/// or using an `Updater` task.
pub fn update() {
let now = Self::_now();
Self::_update(now);
}
/// Returns the amount of time elapsed from another instant to this one
#[inline]
pub fn duration_since(&self, earlier: Instant) -> Duration {
*self - earlier
}
/// Returns the amount of time elapsed between the this instant was created
/// and the latest update
#[inline]
pub fn elapsed_since_recent(&self) -> Duration {
Self::recent() - *self
}
/// Returns the amount of time elapsed since this instant was created
///
/// This function also updates the stored instant.
#[inline]
pub fn elapsed(&self) -> Duration {
Self::now() - *self
}
/// Return a representation of this instant as a number of "ticks".
///
/// Note that length of a 'tick' is not guaranteed to represent
/// the same amount of time across different platforms, or from
/// one version of `coarsetime` to another.
///
/// Note also that the instant represented by "0" ticks is
/// unspecified. It is not guaranteed to be the same time across
/// different platforms, or from one version of `coarsetime` to
/// another.
///
/// This API is mainly intended for applications that need to
/// store the value of an `Instant` in an
/// [`AtomicU64`](std::sync::atomic::AtomicU64).
#[inline]
pub const fn as_ticks(&self) -> u64 {
self.as_u64()
}
/// Create an `Instant` from a number of "ticks".
///
/// Note that length of a 'tick' is not guaranteed to represent
/// the same amount of time across different platforms, or from
/// one version of `coarsetime` to another.
///
/// Note also that the instant represented by "0" ticks is
/// unspecified. It is not guaranteed to be the same time across
/// different platforms, or from one version of `coarsetime` to
/// another.
#[inline]
pub const fn from_ticks(ticks: u64) -> Instant {
Self::from_u64(ticks)
}
#[doc(hidden)]
#[inline]
pub const fn as_u64(&self) -> u64 {
self.0
}
#[doc(hidden)]
#[inline]
pub const fn from_u64(ts: u64) -> Instant {
Instant(ts)
}
/// Calculate an `Instant` that is a `Duration` later, saturating on overflow
#[inline]
pub const fn saturating_add(self, rhs: Duration) -> Instant {
Instant(self.0.saturating_add(rhs.as_u64()))
}
/// Calculate an `Instant` that is a `Duration` later, returning `None` on overflow
#[inline]
pub fn checked_add(self, rhs: Duration) -> Option<Instant> {
self.0.checked_add(rhs.as_u64()).map(Instant)
}
/// Calculate an `Instant` that is a `Duration` earlier, saturating on underflow
#[inline]
pub const fn saturating_sub(self, rhs: Duration) -> Instant {
Instant(self.0.saturating_sub(rhs.as_u64()))
}
/// Calculate an `Instant` that is a `Duration` earlier, returning `None` on underflow
#[inline]
pub fn checked_sub(self, rhs: Duration) -> Option<Instant> {
self.0.checked_sub(rhs.as_u64()).map(Instant)
}
#[cfg(any(target_os = "linux", target_os = "android"))]
fn _now() -> u64 {
let mut tp = MaybeUninit::<libc::timespec>::uninit();
let tp = unsafe {
libc::clock_gettime(libc::CLOCK_MONOTONIC_COARSE, tp.as_mut_ptr());
tp.assume_init()
};
_timespec_to_u64(tp.tv_sec as u64, tp.tv_nsec as u32)
}
#[cfg(target_os = "macos")]
fn _now() -> u64 {
let nsec = unsafe { clock_gettime_nsec_np(CLOCK_MONOTONIC_RAW_APPROX) };
_nsecs_to_u64(nsec)
}
#[cfg(any(target_os = "freebsd", target_os = "dragonfly"))]
fn _now() -> u64 {
let mut tp = MaybeUninit::<libc::timespec>::uninit();
let tp = unsafe {
libc::clock_gettime(libc::CLOCK_MONOTONIC_FAST, tp.as_mut_ptr());
tp.assume_init()
};
_timespec_to_u64(tp.tv_sec as u64, tp.tv_nsec as u32)
}
#[cfg(all(
unix,
not(any(
target_os = "macos",
target_os = "linux",
target_os = "android",
target_os = "freebsd",
target_os = "dragonfly"
))
))]
fn _now() -> u64 {
let mut tv = MaybeUninit::<libc::timeval>::uninit();
let tv = unsafe {
libc::gettimeofday(tv.as_mut_ptr(), null_mut());
tv.assume_init()
};
_timeval_to_u64(tv.tv_sec as u64, tv.tv_usec as u32)
}
#[cfg(windows)]
fn _now() -> u64 {
let tc = unsafe { GetTickCount64() } as u64;
_millis_to_u64(tc)
}
#[cfg(all(target_os = "wasi", not(feature = "wasi-abi2")))]
fn _now() -> u64 {
use wasix::{clock_time_get, CLOCKID_MONOTONIC, CLOCKID_REALTIME};
let nsec = unsafe { clock_time_get(CLOCKID_MONOTONIC, 1_000_000) }
.or_else(|_| unsafe { clock_time_get(CLOCKID_REALTIME, 1_000_000) })
.expect("Clock not available");
_nsecs_to_u64(nsec)
}
#[cfg(all(target_os = "wasi", feature = "wasi-abi2"))]
fn _now() -> u64 {
let nsec = wasi_abi2::clocks::monotonic_clock::now();
_nsecs_to_u64(nsec)
}
#[cfg(all(
any(target_arch = "wasm32", target_arch = "wasm64"),
target_os = "unknown"
))]
fn _now() -> u64 {
_millis_to_u64(js_imports::performance::now() as u64)
}
#[cfg(all(target_arch = "x86_64", target_env = "sgx", target_vendor = "fortanix"))]
fn _now() -> u64 {
let timestamp = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap();
timestamp.as_secs() * 1_000_000_000 + (timestamp.subsec_nanos() as u64)
}
#[inline]
fn _update(now: u64) {
RECENT.store(now, Ordering::Relaxed)
}
#[inline]
fn _recent() -> u64 {
let recent = RECENT.load(Ordering::Relaxed);
if recent != 0 {
recent
} else {
let now = Self::_now();
Self::_update(now);
Self::_recent()
}
}
}
impl Default for Instant {
fn default() -> Instant {
Self::now()
}
}
impl Sub<Instant> for Instant {
type Output = Duration;
#[inline]
fn sub(self, other: Instant) -> Duration {
Duration::from_u64(self.0.saturating_sub(other.0))
}
}
impl Sub<Duration> for Instant {
type Output = Instant;
#[inline]
fn sub(self, rhs: Duration) -> Instant {
Instant(self.0 - rhs.as_u64())
}
}
impl SubAssign<Duration> for Instant {
#[inline]
fn sub_assign(&mut self, rhs: Duration) {
*self = *self - rhs;
}
}
impl Add<Duration> for Instant {
type Output = Instant;
#[inline]
fn add(self, rhs: Duration) -> Instant {
Instant(self.0 + rhs.as_u64())
}
}
impl AddAssign<Duration> for Instant {
#[inline]
fn add_assign(&mut self, rhs: Duration) {
*self = *self + rhs;
}
}

View File

@@ -1,37 +0,0 @@
//! A crate to make time measurements that focuses on speed.
//!
//! This crate is a partial replacement for the `Time` and `Duration` structures
//! from the standard library, with the following differences:
//!
//! * Speed is privileged over accuracy. In particular, `CLOCK_MONOTONIC_COARSE`
//! is used to retrieve the clock value on Linux systems, and transformations avoid
//! operations that can be slow on non-Intel systems.
//! * The number of system calls can be kept to a minimum. The "most recent
//! timestamp" is always kept in memory.
//! It can be read with just a load operation, and can be
//! updated only as frequently as necessary.
//!
//! # Installation
//!
//! `coarsetime` is available on [crates.io](https://crates.io/crates/coarsetime) and works on
//! Rust stable, beta, and nightly.
//!
//! Windows and Unix-like systems are supported.
#![allow(clippy::trivially_copy_pass_by_ref)]
mod clock;
mod duration;
mod helpers;
mod instant;
#[cfg(not(any(target_arch = "wasm32", target_arch = "wasm64")))]
mod updater;
#[cfg(test)]
mod tests;
pub use self::clock::*;
pub use self::duration::*;
pub use self::instant::*;
#[cfg(not(any(target_arch = "wasm32", target_arch = "wasm64")))]
pub use self::updater::*;

View File

@@ -1,55 +0,0 @@
use std::thread::sleep;
use std::time;
#[cfg(not(any(target_arch = "wasm32", target_arch = "wasm64")))]
use super::Updater;
use super::{Clock, Duration, Instant};
#[test]
fn tests() {
let ts = Instant::now();
let d = Duration::from_secs(2);
sleep(time::Duration::new(3, 0));
let elapsed = ts.elapsed().as_secs();
println!("Elapsed: {elapsed} secs");
assert!(elapsed >= 2);
assert!(elapsed < 100);
assert!(ts.elapsed_since_recent() > d);
let ts = Instant::now();
sleep(time::Duration::new(1, 0));
assert_eq!(Instant::recent(), ts);
Instant::update();
assert!(Instant::recent() > ts);
let clock_now = Clock::recent_since_epoch();
sleep(time::Duration::new(1, 0));
assert_eq!(Clock::recent_since_epoch(), clock_now);
assert!(Clock::now_since_epoch() > clock_now);
#[cfg(not(any(target_arch = "wasm32", target_arch = "wasm64")))]
tests_updater();
}
#[cfg(not(any(target_arch = "wasm32", target_arch = "wasm64")))]
#[test]
fn tests_updater() {
let updater = Updater::new(250)
.start()
.expect("Unable to start a background updater");
let ts = Instant::recent();
let clock_recent = Clock::recent_since_epoch();
sleep(time::Duration::new(2, 0));
assert!(Clock::recent_since_epoch() > clock_recent);
assert!(Instant::recent() != ts);
updater.stop().unwrap();
let clock_recent = Clock::recent_since_epoch();
sleep(time::Duration::new(1, 0));
assert_eq!(Clock::recent_since_epoch(), clock_recent);
}
#[test]
fn tests_duration() {
let duration = Duration::from_days(1000);
assert_eq!(duration.as_days(), 1000);
}

View File

@@ -1,58 +0,0 @@
use std::io;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread;
use std::time;
use super::clock::*;
use super::instant::*;
/// A service to periodically call `Instant::update()`
#[derive(Debug)]
pub struct Updater {
period: time::Duration,
running: Arc<AtomicBool>,
th: Option<thread::JoinHandle<()>>,
}
impl Updater {
/// Spawns a background task to call `Instant::update()` periodically
pub fn start(mut self) -> Result<Self, io::Error> {
let period = self.period;
let running = self.running.clone();
running.store(true, Ordering::Relaxed);
let th: thread::JoinHandle<()> = thread::Builder::new()
.name("coarsetime".to_string())
.spawn(move || {
while running.load(Ordering::Relaxed) {
thread::sleep(period);
Instant::update();
Clock::update();
}
})?;
self.th = Some(th);
Instant::update();
Clock::update();
Ok(self)
}
/// Stops the periodic updates
pub fn stop(mut self) -> Result<(), io::Error> {
self.running.store(false, Ordering::Relaxed);
self.th
.take()
.expect("updater is not running")
.join()
.map_err(|_| io::Error::other("failed to properly stop the updater"))
}
/// Creates a new `Updater` with the specified update period, in
/// milliseconds.
pub fn new(period_millis: u64) -> Updater {
Updater {
period: time::Duration::from_millis(period_millis),
running: Arc::new(AtomicBool::new(false)),
th: None,
}
}
}