diff --git a/README.md b/README.md index 929585c11..bb4649271 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,8 @@ Torrust Tracker is a lightweight but incredibly powerful and feature-rich BitTor * [X] Peer authentication using time-bound keys * [X] newTrackon check supported for both HTTP, UDP, where IPv4 and IPv6 is properly handled * [X] SQLite3 Persistent loading and saving of the torrent hashes and completed count +* [X] MySQL support added as engine option +* [X] Periodically saving added, interval can be configured ### Implemented BEPs * [BEP 3](https://www.bittorrent.org/beps/bep_0003.html): The BitTorrent Protocol diff --git a/src/mysql_database.rs b/src/mysql_database.rs index 7ecae214a..15e2de633 100644 --- a/src/mysql_database.rs +++ b/src/mysql_database.rs @@ -80,12 +80,26 @@ impl Database for MysqlDatabase { let mut db_transaction = conn.start_transaction(TxOpts::default()).map_err(|_| database::Error::DatabaseError)?; + let mut insert_vector= vec![]; + for (info_hash, torrent_entry) in torrents { let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - if db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX(?), ?) ON DUPLICATE KEY UPDATE completed = completed", (info_hash.to_string(), completed.to_string())).is_err() { + insert_vector.push(format!("(UNHEX('{}'), {})", info_hash.to_string(), completed.to_string())); + if insert_vector.len() == 1000 { + let query = format!("INSERT INTO torrents (info_hash, completed) VALUES {} ON DUPLICATE KEY UPDATE completed = VALUES(completed)", insert_vector.join(",")); + if db_transaction.query_drop(query).is_err() { + return Err(Error::InvalidQuery); + } + insert_vector.clear(); + } + } + + if insert_vector.len() != 0 { + let query = format!("INSERT INTO torrents (info_hash, completed) VALUES {} ON DUPLICATE KEY UPDATE completed = VALUES(completed)", insert_vector.join(",")); + if db_transaction.query_drop(query).is_err() { return Err(Error::InvalidQuery); } - debug!("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX('{}'), {}) ON DUPLICATE KEY UPDATE completed = completed", info_hash.to_string(), completed.to_string()); + insert_vector.clear(); } if db_transaction.commit().is_err() { diff --git a/src/sqlite_database.rs b/src/sqlite_database.rs index fa519ffd0..82bb9d4fc 100644 --- a/src/sqlite_database.rs +++ b/src/sqlite_database.rs @@ -82,7 +82,8 @@ impl Database for SqliteDatabase { for (info_hash, torrent_entry) in torrents { let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - let _ = db_transaction.execute("INSERT OR REPLACE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); + let _ = db_transaction.execute("INSERT OR IGNORE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); + let _ = db_transaction.execute("UPDATE torrents SET completed = ? WHERE info_hash = ?", &[completed.to_string(), info_hash.to_string()]); } let _ = db_transaction.commit(); diff --git a/src/tracker.rs b/src/tracker.rs index 0e42f69e1..c0c25bc41 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -3,7 +3,7 @@ use std::collections::BTreeMap; use std::net::SocketAddr; use std::sync::Arc; -use log::info; +use log::{debug, info}; use serde::{Deserialize, Serialize}; use serde; use tokio::sync::{RwLock, RwLockReadGuard}; @@ -128,6 +128,7 @@ impl TorrentTracker { let torrents = self.database.load_persistent_torrent_data().await?; for torrent in torrents { + debug!("{:#?}", torrent); let _ = self.add_torrent(torrent.0, 0, torrent.1, 0).await; } @@ -307,41 +308,48 @@ impl TorrentTracker { let mut updates = self.updates.write().await; let mut updates_cloned: std::collections::HashMap = std::collections::HashMap::new(); // let mut torrent_hashes: Vec = Vec::new(); + info!("Copying updates to updates_cloned..."); for (k, completed) in updates.iter() { - updates_cloned.insert(*k, *completed); + updates_cloned.insert(k.clone(), completed.clone()); } updates.clear(); drop(updates); - let mut shadows = self.shadow.write().await; + info!("Copying updates_cloned into the shadow to overwrite..."); for (k, completed) in updates_cloned.iter() { + let mut shadows = self.shadow.write().await; if shadows.contains_key(k) { shadows.remove(k); } - shadows.insert(*k, *completed); + shadows.insert(k.clone(), completed.clone()); + drop(shadows); } drop(updates_cloned); // We updated the shadow data from the updates data, let's handle shadow data as expected. + info!("Handle shadow_copy to be updated into SQL..."); let mut shadow_copy: BTreeMap = BTreeMap::new(); + let shadows = self.shadow.read().await; for (infohash, completed) in shadows.iter() { - shadow_copy.insert(*infohash, TorrentEntry { + shadow_copy.insert(infohash.clone(), TorrentEntry { peers: Default::default(), - completed: *completed, + completed: completed.clone(), seeders: 0, }); } - - // Drop the lock drop(shadows); // We will now save the data from the shadow into the database. // This should not put any strain on the server itself, other then the harddisk/ssd. + info!("Start saving shadow data into SQL..."); let result = self.database.save_persistent_torrent_data(&shadow_copy).await; if result.is_ok() { + info!("Done saving data to SQL and succeeded, emptying shadow..."); let mut shadow = self.shadow.write().await; shadow.clear(); drop(shadow); + } else { + info!("Done saving data to SQL and failed, not emptying shadow..."); } } }