Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ Torrust Tracker is a lightweight but incredibly powerful and feature-rich BitTor
* [X] Peer authentication using time-bound keys
* [X] newTrackon check supported for both HTTP, UDP, where IPv4 and IPv6 is properly handled
* [X] SQLite3 Persistent loading and saving of the torrent hashes and completed count
* [X] MySQL support added as engine option
* [X] Periodically saving added, interval can be configured

### Implemented BEPs
* [BEP 3](https://www.bittorrent.org/beps/bep_0003.html): The BitTorrent Protocol
Expand Down
18 changes: 16 additions & 2 deletions src/mysql_database.rs
Original file line number Diff line number Diff line change
Expand Up @@ -80,12 +80,26 @@ impl Database for MysqlDatabase {

let mut db_transaction = conn.start_transaction(TxOpts::default()).map_err(|_| database::Error::DatabaseError)?;

let mut insert_vector= vec![];

for (info_hash, torrent_entry) in torrents {
let (_seeders, completed, _leechers) = torrent_entry.get_stats();
if db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX(?), ?) ON DUPLICATE KEY UPDATE completed = completed", (info_hash.to_string(), completed.to_string())).is_err() {
insert_vector.push(format!("(UNHEX('{}'), {})", info_hash.to_string(), completed.to_string()));
if insert_vector.len() == 1000 {
let query = format!("INSERT INTO torrents (info_hash, completed) VALUES {} ON DUPLICATE KEY UPDATE completed = VALUES(completed)", insert_vector.join(","));
if db_transaction.query_drop(query).is_err() {
return Err(Error::InvalidQuery);
}
insert_vector.clear();
}
}

if insert_vector.len() != 0 {
let query = format!("INSERT INTO torrents (info_hash, completed) VALUES {} ON DUPLICATE KEY UPDATE completed = VALUES(completed)", insert_vector.join(","));
if db_transaction.query_drop(query).is_err() {
return Err(Error::InvalidQuery);
}
debug!("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX('{}'), {}) ON DUPLICATE KEY UPDATE completed = completed", info_hash.to_string(), completed.to_string());
insert_vector.clear();
}

if db_transaction.commit().is_err() {
Expand Down
3 changes: 2 additions & 1 deletion src/sqlite_database.rs
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,8 @@ impl Database for SqliteDatabase {

for (info_hash, torrent_entry) in torrents {
let (_seeders, completed, _leechers) = torrent_entry.get_stats();
let _ = db_transaction.execute("INSERT OR REPLACE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]);
let _ = db_transaction.execute("INSERT OR IGNORE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]);
let _ = db_transaction.execute("UPDATE torrents SET completed = ? WHERE info_hash = ?", &[completed.to_string(), info_hash.to_string()]);
}

let _ = db_transaction.commit();
Expand Down
24 changes: 16 additions & 8 deletions src/tracker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use std::collections::BTreeMap;
use std::net::SocketAddr;
use std::sync::Arc;

use log::info;
use log::{debug, info};
use serde::{Deserialize, Serialize};
use serde;
use tokio::sync::{RwLock, RwLockReadGuard};
Expand Down Expand Up @@ -128,6 +128,7 @@ impl TorrentTracker {
let torrents = self.database.load_persistent_torrent_data().await?;

for torrent in torrents {
debug!("{:#?}", torrent);
let _ = self.add_torrent(torrent.0, 0, torrent.1, 0).await;
}

Expand Down Expand Up @@ -307,41 +308,48 @@ impl TorrentTracker {
let mut updates = self.updates.write().await;
let mut updates_cloned: std::collections::HashMap<InfoHash, u32> = std::collections::HashMap::new();
// let mut torrent_hashes: Vec<InfoHash> = Vec::new();
info!("Copying updates to updates_cloned...");
for (k, completed) in updates.iter() {
updates_cloned.insert(*k, *completed);
updates_cloned.insert(k.clone(), completed.clone());
}
updates.clear();
drop(updates);

let mut shadows = self.shadow.write().await;
info!("Copying updates_cloned into the shadow to overwrite...");
for (k, completed) in updates_cloned.iter() {
let mut shadows = self.shadow.write().await;
if shadows.contains_key(k) {
shadows.remove(k);
}
shadows.insert(*k, *completed);
shadows.insert(k.clone(), completed.clone());
drop(shadows);
}
drop(updates_cloned);

// We updated the shadow data from the updates data, let's handle shadow data as expected.
info!("Handle shadow_copy to be updated into SQL...");
let mut shadow_copy: BTreeMap<InfoHash, TorrentEntry> = BTreeMap::new();
let shadows = self.shadow.read().await;
for (infohash, completed) in shadows.iter() {
shadow_copy.insert(*infohash, TorrentEntry {
shadow_copy.insert(infohash.clone(), TorrentEntry {
peers: Default::default(),
completed: *completed,
completed: completed.clone(),
seeders: 0,
});
}

// Drop the lock
drop(shadows);

// We will now save the data from the shadow into the database.
// This should not put any strain on the server itself, other then the harddisk/ssd.
info!("Start saving shadow data into SQL...");
let result = self.database.save_persistent_torrent_data(&shadow_copy).await;
if result.is_ok() {
info!("Done saving data to SQL and succeeded, emptying shadow...");
let mut shadow = self.shadow.write().await;
shadow.clear();
drop(shadow);
} else {
info!("Done saving data to SQL and failed, not emptying shadow...");
}
}
}