2020-05-01 16:36:20 +01:00
|
|
|
use crate::server::Events;
|
2020-05-07 00:53:25 +01:00
|
|
|
use crate::utils;
|
2020-05-01 16:36:20 +01:00
|
|
|
use log::{error, trace};
|
2020-05-06 01:06:14 +01:00
|
|
|
use serde::{Deserialize, Serialize};
|
|
|
|
use std::borrow::Cow;
|
|
|
|
use std::collections::BTreeMap;
|
2020-05-07 00:53:25 +01:00
|
|
|
use tokio::io::AsyncBufReadExt;
|
2020-05-06 01:06:14 +01:00
|
|
|
use tokio::stream::StreamExt;
|
|
|
|
use tokio::sync::RwLock;
|
2018-06-12 18:23:19 +01:00
|
|
|
|
2020-05-06 02:37:52 +01:00
|
|
|
const TWO_HOURS: std::time::Duration = std::time::Duration::from_secs(3600 * 2);
|
|
|
|
|
2018-12-06 23:58:31 +00:00
|
|
|
#[derive(Deserialize, Clone, PartialEq)]
|
2018-06-12 18:23:19 +01:00
|
|
|
pub enum TrackerMode {
|
|
|
|
/// In static mode torrents are tracked only if they were added ahead of time.
|
2018-12-06 23:58:31 +00:00
|
|
|
#[serde(rename = "static")]
|
2018-06-12 18:23:19 +01:00
|
|
|
StaticMode,
|
|
|
|
|
|
|
|
/// In dynamic mode, torrents are tracked being added ahead of time.
|
2018-12-06 23:58:31 +00:00
|
|
|
#[serde(rename = "dynamic")]
|
2018-06-12 18:23:19 +01:00
|
|
|
DynamicMode,
|
|
|
|
|
|
|
|
/// Tracker will only serve authenticated peers.
|
2018-12-06 23:58:31 +00:00
|
|
|
#[serde(rename = "private")]
|
2018-06-12 18:23:19 +01:00
|
|
|
PrivateMode,
|
|
|
|
}
|
|
|
|
|
2020-05-07 02:27:16 +01:00
|
|
|
#[derive(Clone, Serialize)]
|
|
|
|
pub struct TorrentPeer {
|
2018-06-12 18:23:19 +01:00
|
|
|
ip: std::net::SocketAddr,
|
|
|
|
uploaded: u64,
|
|
|
|
downloaded: u64,
|
|
|
|
left: u64,
|
|
|
|
event: Events,
|
2020-05-07 02:27:16 +01:00
|
|
|
#[serde(serialize_with = "ser_instant")]
|
2020-05-06 02:37:52 +01:00
|
|
|
updated: std::time::Instant,
|
2018-06-12 18:23:19 +01:00
|
|
|
}
|
|
|
|
|
2020-05-07 02:27:16 +01:00
|
|
|
fn ser_instant<S: serde::Serializer>(inst: &std::time::Instant, ser: S) -> Result<S::Ok, S::Error> {
|
|
|
|
ser.serialize_u64(inst.elapsed().as_millis() as u64)
|
|
|
|
}
|
|
|
|
|
2018-10-27 23:59:35 +01:00
|
|
|
#[derive(Ord, PartialEq, Eq, Clone)]
|
|
|
|
pub struct InfoHash {
|
|
|
|
info_hash: [u8; 20],
|
|
|
|
}
|
|
|
|
|
2020-05-06 01:06:14 +01:00
|
|
|
impl std::fmt::Display for InfoHash {
|
|
|
|
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
|
|
|
let mut chars = [0u8; 40];
|
|
|
|
binascii::bin2hex(&self.info_hash, &mut chars).expect("failed to hexlify");
|
|
|
|
write!(f, "{}", std::str::from_utf8(&chars).unwrap())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl std::str::FromStr for InfoHash {
|
|
|
|
type Err = binascii::ConvertError;
|
|
|
|
|
|
|
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
|
|
let mut i = Self { info_hash: [0u8; 20] };
|
|
|
|
if s.len() != 40 {
|
|
|
|
return Err(binascii::ConvertError::InvalidInputLength);
|
|
|
|
}
|
|
|
|
binascii::hex2bin(s.as_bytes(), &mut i.info_hash)?;
|
|
|
|
Ok(i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-27 23:59:35 +01:00
|
|
|
impl std::cmp::PartialOrd<InfoHash> for InfoHash {
|
|
|
|
fn partial_cmp(&self, other: &InfoHash) -> Option<std::cmp::Ordering> {
|
|
|
|
self.info_hash.partial_cmp(&other.info_hash)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-11 00:46:57 +00:00
|
|
|
impl std::convert::From<&[u8]> for InfoHash {
|
|
|
|
fn from(data: &[u8]) -> InfoHash {
|
|
|
|
assert_eq!(data.len(), 20);
|
2020-05-06 01:06:14 +01:00
|
|
|
let mut ret = InfoHash { info_hash: [0u8; 20] };
|
2018-12-11 00:46:57 +00:00
|
|
|
ret.info_hash.clone_from_slice(data);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-27 23:59:35 +01:00
|
|
|
impl std::convert::Into<InfoHash> for [u8; 20] {
|
|
|
|
fn into(self) -> InfoHash {
|
2018-12-06 23:58:31 +00:00
|
|
|
InfoHash { info_hash: self }
|
2018-10-27 23:59:35 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl serde::ser::Serialize for InfoHash {
|
|
|
|
fn serialize<S: serde::ser::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
|
|
|
|
let mut buffer = [0u8; 40];
|
2020-05-06 01:06:14 +01:00
|
|
|
let bytes_out = binascii::bin2hex(&self.info_hash, &mut buffer).ok().unwrap();
|
2018-10-27 23:59:35 +01:00
|
|
|
let str_out = std::str::from_utf8(bytes_out).unwrap();
|
|
|
|
|
|
|
|
serializer.serialize_str(str_out)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-28 00:36:10 +01:00
|
|
|
struct InfoHashVisitor;
|
|
|
|
|
|
|
|
impl<'v> serde::de::Visitor<'v> for InfoHashVisitor {
|
|
|
|
type Value = InfoHash;
|
|
|
|
|
|
|
|
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
|
|
|
|
write!(formatter, "a 40 character long hash")
|
|
|
|
}
|
|
|
|
|
|
|
|
fn visit_str<E: serde::de::Error>(self, v: &str) -> Result<Self::Value, E> {
|
|
|
|
if v.len() != 40 {
|
2018-12-06 23:58:31 +00:00
|
|
|
return Err(serde::de::Error::invalid_value(
|
|
|
|
serde::de::Unexpected::Str(v),
|
|
|
|
&"expected a 40 character long string",
|
|
|
|
));
|
2018-10-28 00:36:10 +01:00
|
|
|
}
|
|
|
|
|
2020-05-06 01:06:14 +01:00
|
|
|
let mut res = InfoHash { info_hash: [0u8; 20] };
|
2018-10-28 00:36:10 +01:00
|
|
|
|
|
|
|
if let Err(_) = binascii::hex2bin(v.as_bytes(), &mut res.info_hash) {
|
2018-12-06 23:58:31 +00:00
|
|
|
return Err(serde::de::Error::invalid_value(
|
|
|
|
serde::de::Unexpected::Str(v),
|
|
|
|
&"expected a hexadecimal string",
|
|
|
|
));
|
2018-10-28 00:36:10 +01:00
|
|
|
} else {
|
|
|
|
return Ok(res);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'de> serde::de::Deserialize<'de> for InfoHash {
|
|
|
|
fn deserialize<D: serde::de::Deserializer<'de>>(des: D) -> Result<Self, D::Error> {
|
|
|
|
des.deserialize_str(InfoHashVisitor)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-07 02:27:16 +01:00
|
|
|
#[repr(transparent)]
|
|
|
|
#[derive(Copy, Clone, PartialOrd, Ord, Eq, PartialEq)]
|
|
|
|
pub struct PeerId([u8; 20]);
|
|
|
|
impl PeerId {
|
|
|
|
pub fn from_array(v: &[u8; 20]) -> &PeerId {
|
|
|
|
unsafe {
|
|
|
|
// This is safe since PeerId's repr is transparent and content's are identical. PeerId == [0u8; 20]
|
|
|
|
core::mem::transmute(v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn get_client_name(&self) -> Option<&'static str> {
|
|
|
|
if self.0[0] == b'M' {
|
|
|
|
return Some("BitTorrent");
|
|
|
|
}
|
|
|
|
if self.0[0] == b'-' {
|
|
|
|
let name = match &self.0[1..3] {
|
|
|
|
b"AG" => "Ares",
|
|
|
|
b"A~" => "Ares",
|
|
|
|
b"AR" => "Arctic",
|
|
|
|
b"AV" => "Avicora",
|
|
|
|
b"AX" => "BitPump",
|
|
|
|
b"AZ" => "Azureus",
|
|
|
|
b"BB" => "BitBuddy",
|
|
|
|
b"BC" => "BitComet",
|
|
|
|
b"BF" => "Bitflu",
|
|
|
|
b"BG" => "BTG (uses Rasterbar libtorrent)",
|
|
|
|
b"BR" => "BitRocket",
|
|
|
|
b"BS" => "BTSlave",
|
|
|
|
b"BX" => "~Bittorrent X",
|
|
|
|
b"CD" => "Enhanced CTorrent",
|
|
|
|
b"CT" => "CTorrent",
|
|
|
|
b"DE" => "DelugeTorrent",
|
|
|
|
b"DP" => "Propagate Data Client",
|
|
|
|
b"EB" => "EBit",
|
|
|
|
b"ES" => "electric sheep",
|
|
|
|
b"FT" => "FoxTorrent",
|
|
|
|
b"FW" => "FrostWire",
|
|
|
|
b"FX" => "Freebox BitTorrent",
|
|
|
|
b"GS" => "GSTorrent",
|
|
|
|
b"HL" => "Halite",
|
|
|
|
b"HN" => "Hydranode",
|
|
|
|
b"KG" => "KGet",
|
|
|
|
b"KT" => "KTorrent",
|
|
|
|
b"LH" => "LH-ABC",
|
|
|
|
b"LP" => "Lphant",
|
|
|
|
b"LT" => "libtorrent",
|
|
|
|
b"lt" => "libTorrent",
|
|
|
|
b"LW" => "LimeWire",
|
|
|
|
b"MO" => "MonoTorrent",
|
|
|
|
b"MP" => "MooPolice",
|
|
|
|
b"MR" => "Miro",
|
|
|
|
b"MT" => "MoonlightTorrent",
|
|
|
|
b"NX" => "Net Transport",
|
|
|
|
b"PD" => "Pando",
|
|
|
|
b"qB" => "qBittorrent",
|
|
|
|
b"QD" => "QQDownload",
|
|
|
|
b"QT" => "Qt 4 Torrent example",
|
|
|
|
b"RT" => "Retriever",
|
|
|
|
b"S~" => "Shareaza alpha/beta",
|
|
|
|
b"SB" => "~Swiftbit",
|
|
|
|
b"SS" => "SwarmScope",
|
|
|
|
b"ST" => "SymTorrent",
|
|
|
|
b"st" => "sharktorrent",
|
|
|
|
b"SZ" => "Shareaza",
|
|
|
|
b"TN" => "TorrentDotNET",
|
|
|
|
b"TR" => "Transmission",
|
|
|
|
b"TS" => "Torrentstorm",
|
|
|
|
b"TT" => "TuoTu",
|
|
|
|
b"UL" => "uLeecher!",
|
|
|
|
b"UT" => "µTorrent",
|
|
|
|
b"UW" => "µTorrent Web",
|
|
|
|
b"VG" => "Vagaa",
|
|
|
|
b"WD" => "WebTorrent Desktop",
|
|
|
|
b"WT" => "BitLet",
|
|
|
|
b"WW" => "WebTorrent",
|
|
|
|
b"WY" => "FireTorrent",
|
|
|
|
b"XL" => "Xunlei",
|
|
|
|
b"XT" => "XanTorrent",
|
|
|
|
b"XX" => "Xtorrent",
|
|
|
|
b"ZT" => "ZipTorrent",
|
|
|
|
_ => return None,
|
|
|
|
};
|
|
|
|
Some(name)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
impl Serialize for PeerId {
|
|
|
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
|
|
|
where S: serde::Serializer {
|
|
|
|
let mut tmp = [0u8; 40];
|
|
|
|
binascii::bin2hex(&self.0, &mut tmp).unwrap();
|
|
|
|
let id = std::str::from_utf8(&tmp).ok();
|
|
|
|
|
|
|
|
#[derive(Serialize)]
|
|
|
|
struct PeerIdInfo<'a> {
|
|
|
|
id: Option<&'a str>,
|
|
|
|
client: Option<&'a str>,
|
|
|
|
}
|
|
|
|
|
|
|
|
let obj = PeerIdInfo { id, client: self.get_client_name() };
|
|
|
|
obj.serialize(serializer)
|
|
|
|
}
|
|
|
|
}
|
2018-06-24 11:20:18 +01:00
|
|
|
|
2020-05-06 01:06:14 +01:00
|
|
|
#[derive(Serialize, Deserialize, Clone)]
|
2018-06-12 18:23:19 +01:00
|
|
|
pub struct TorrentEntry {
|
|
|
|
is_flagged: bool,
|
2018-10-27 23:12:13 +01:00
|
|
|
|
|
|
|
#[serde(skip)]
|
2018-06-12 18:23:19 +01:00
|
|
|
peers: std::collections::BTreeMap<PeerId, TorrentPeer>,
|
|
|
|
|
|
|
|
completed: u32,
|
2018-10-27 23:12:13 +01:00
|
|
|
|
|
|
|
#[serde(skip)]
|
2018-06-12 18:23:19 +01:00
|
|
|
seeders: u32,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl TorrentEntry {
|
2018-12-06 23:58:31 +00:00
|
|
|
pub fn new() -> TorrentEntry {
|
|
|
|
TorrentEntry {
|
2018-06-24 11:20:18 +01:00
|
|
|
is_flagged: false,
|
|
|
|
peers: std::collections::BTreeMap::new(),
|
|
|
|
completed: 0,
|
|
|
|
seeders: 0,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-12 18:23:19 +01:00
|
|
|
pub fn is_flagged(&self) -> bool {
|
|
|
|
self.is_flagged
|
|
|
|
}
|
|
|
|
|
2018-12-06 23:58:31 +00:00
|
|
|
pub fn update_peer(
|
2020-05-06 01:06:14 +01:00
|
|
|
&mut self, peer_id: &PeerId, remote_address: &std::net::SocketAddr, uploaded: u64, downloaded: u64, left: u64,
|
2018-12-06 23:58:31 +00:00
|
|
|
event: Events,
|
|
|
|
) {
|
2018-06-12 18:23:19 +01:00
|
|
|
let is_seeder = left == 0 && uploaded > 0;
|
|
|
|
let mut was_seeder = false;
|
|
|
|
let mut is_completed = left == 0 && (event as u32) == (Events::Complete as u32);
|
2020-05-06 01:06:14 +01:00
|
|
|
if let Some(prev) = self.peers.insert(*peer_id, TorrentPeer {
|
2020-05-06 02:37:52 +01:00
|
|
|
updated: std::time::Instant::now(),
|
2020-05-06 01:06:14 +01:00
|
|
|
left,
|
|
|
|
downloaded,
|
|
|
|
uploaded,
|
|
|
|
ip: *remote_address,
|
|
|
|
event,
|
|
|
|
}) {
|
2018-06-12 18:23:19 +01:00
|
|
|
was_seeder = prev.left == 0 && prev.uploaded > 0;
|
|
|
|
|
|
|
|
if is_completed && (prev.event as u32) == (Events::Complete as u32) {
|
|
|
|
// don't update count again. a torrent should only be updated once per peer.
|
|
|
|
is_completed = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if is_seeder && !was_seeder {
|
|
|
|
self.seeders += 1;
|
|
|
|
} else if was_seeder && !is_seeder {
|
|
|
|
self.seeders -= 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if is_completed {
|
|
|
|
self.completed += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn get_peers(&self, remote_addr: &std::net::SocketAddr) -> Vec<std::net::SocketAddr> {
|
|
|
|
let mut list = Vec::new();
|
2018-12-06 23:58:31 +00:00
|
|
|
for (_, peer) in self
|
|
|
|
.peers
|
|
|
|
.iter()
|
|
|
|
.filter(|e| e.1.ip.is_ipv4() == remote_addr.is_ipv4())
|
|
|
|
.take(74)
|
|
|
|
{
|
2018-06-12 18:23:19 +01:00
|
|
|
if peer.ip == *remote_addr {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
list.push(peer.ip);
|
|
|
|
}
|
|
|
|
list
|
|
|
|
}
|
|
|
|
|
2020-05-07 02:27:16 +01:00
|
|
|
pub fn get_peers_iter(&self) -> impl Iterator<Item=(&PeerId, &TorrentPeer)> {
|
|
|
|
self.peers.iter()
|
|
|
|
}
|
|
|
|
|
2018-06-12 18:23:19 +01:00
|
|
|
pub fn get_stats(&self) -> (u32, u32, u32) {
|
|
|
|
let leechers = (self.peers.len() as u32) - self.seeders;
|
|
|
|
(self.seeders, self.completed, leechers)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct TorrentDatabase {
|
2020-05-06 01:06:14 +01:00
|
|
|
torrent_peers: tokio::sync::RwLock<std::collections::BTreeMap<InfoHash, TorrentEntry>>,
|
2018-06-12 18:23:19 +01:00
|
|
|
}
|
|
|
|
|
2018-10-27 23:12:13 +01:00
|
|
|
impl Default for TorrentDatabase {
|
|
|
|
fn default() -> Self {
|
2018-12-06 23:58:31 +00:00
|
|
|
TorrentDatabase {
|
2020-05-06 01:06:14 +01:00
|
|
|
torrent_peers: tokio::sync::RwLock::new(std::collections::BTreeMap::new()),
|
2018-10-27 23:12:13 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-12 18:23:19 +01:00
|
|
|
pub struct TorrentTracker {
|
|
|
|
mode: TrackerMode,
|
|
|
|
database: TorrentDatabase,
|
|
|
|
}
|
|
|
|
|
2020-05-06 01:06:14 +01:00
|
|
|
#[derive(Serialize, Deserialize)]
|
|
|
|
struct DatabaseRow<'a> {
|
|
|
|
info_hash: InfoHash,
|
|
|
|
entry: Cow<'a, TorrentEntry>,
|
|
|
|
}
|
|
|
|
|
2018-06-24 11:20:18 +01:00
|
|
|
pub enum TorrentStats {
|
|
|
|
TorrentFlagged,
|
|
|
|
TorrentNotRegistered,
|
2020-05-06 01:06:14 +01:00
|
|
|
Stats { seeders: u32, leechers: u32, complete: u32 },
|
2018-06-24 11:20:18 +01:00
|
|
|
}
|
|
|
|
|
2018-06-12 18:23:19 +01:00
|
|
|
impl TorrentTracker {
|
2018-10-21 21:39:39 +01:00
|
|
|
pub fn new(mode: TrackerMode) -> TorrentTracker {
|
2018-12-06 23:58:31 +00:00
|
|
|
TorrentTracker {
|
2018-10-21 21:39:39 +01:00
|
|
|
mode,
|
2018-12-06 23:58:31 +00:00
|
|
|
database: TorrentDatabase {
|
2020-05-06 01:06:14 +01:00
|
|
|
torrent_peers: RwLock::new(std::collections::BTreeMap::new()),
|
2018-12-06 23:58:31 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-06 01:06:14 +01:00
|
|
|
pub async fn load_database<R: tokio::io::AsyncRead + Unpin>(
|
|
|
|
mode: TrackerMode, reader: &mut R,
|
|
|
|
) -> Result<TorrentTracker, std::io::Error> {
|
|
|
|
let reader = tokio::io::BufReader::new(reader);
|
2020-05-07 00:53:25 +01:00
|
|
|
let reader = utils::TokioFuturesCompat::from(reader);
|
|
|
|
let reader = async_compression::futures::bufread::BzDecoder::new(reader);
|
|
|
|
let reader = utils::TokioFuturesCompat::from(reader);
|
|
|
|
let reader = tokio::io::BufReader::new(reader);
|
|
|
|
|
2020-05-06 01:06:14 +01:00
|
|
|
let mut tmp: Vec<u8> = Vec::with_capacity(4096);
|
|
|
|
tmp.resize(tmp.capacity(), 0);
|
|
|
|
|
|
|
|
let res = TorrentTracker::new(mode);
|
|
|
|
let mut db = res.database.torrent_peers.write().await;
|
|
|
|
|
|
|
|
let mut records = reader
|
|
|
|
.lines()
|
2020-05-07 00:53:25 +01:00
|
|
|
.filter_map(|res: Result<String, _>| {
|
2020-05-06 01:06:14 +01:00
|
|
|
if let Err(ref err) = res {
|
|
|
|
error!("failed to read lines! {}", err);
|
|
|
|
}
|
|
|
|
res.ok()
|
|
|
|
})
|
2020-05-07 00:53:25 +01:00
|
|
|
.map(|line: String| serde_json::from_str::<DatabaseRow>(&line))
|
2020-05-06 01:06:14 +01:00
|
|
|
.filter_map(|jsr| {
|
|
|
|
if let Err(ref err) = jsr {
|
|
|
|
error!("failed to parse json: {}", err);
|
|
|
|
}
|
|
|
|
jsr.ok()
|
|
|
|
});
|
|
|
|
|
|
|
|
while let Some(entry) = records.next().await {
|
|
|
|
let x = || (entry.entry, entry.info_hash);
|
|
|
|
let (a, b) = x();
|
|
|
|
let a = a.into_owned();
|
|
|
|
db.insert(b, a);
|
2018-06-12 18:23:19 +01:00
|
|
|
}
|
2020-05-07 00:53:25 +01:00
|
|
|
trace!("loaded {} entries from database", db.len());
|
2020-05-06 01:06:14 +01:00
|
|
|
|
|
|
|
drop(db);
|
|
|
|
|
|
|
|
Ok(res)
|
2018-06-12 18:23:19 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Adding torrents is not relevant to dynamic trackers.
|
2020-05-06 01:06:14 +01:00
|
|
|
pub async fn add_torrent(&self, info_hash: &InfoHash) -> Result<(), ()> {
|
|
|
|
let mut write_lock = self.database.torrent_peers.write().await;
|
2018-10-27 23:59:35 +01:00
|
|
|
match write_lock.entry(info_hash.clone()) {
|
2018-06-24 11:20:18 +01:00
|
|
|
std::collections::btree_map::Entry::Vacant(ve) => {
|
|
|
|
ve.insert(TorrentEntry::new());
|
|
|
|
return Ok(());
|
2018-12-06 23:58:31 +00:00
|
|
|
}
|
2018-10-21 21:45:50 +01:00
|
|
|
std::collections::btree_map::Entry::Occupied(_entry) => {
|
2018-06-24 11:20:18 +01:00
|
|
|
return Err(());
|
|
|
|
}
|
|
|
|
}
|
2018-06-12 18:23:19 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// If the torrent is flagged, it will not be removed unless force is set to true.
|
2020-05-06 01:06:14 +01:00
|
|
|
pub async fn remove_torrent(&self, info_hash: &InfoHash, force: bool) -> Result<(), ()> {
|
2018-06-24 11:20:18 +01:00
|
|
|
use std::collections::btree_map::Entry;
|
2020-05-06 01:06:14 +01:00
|
|
|
let mut entry_lock = self.database.torrent_peers.write().await;
|
2018-10-27 23:59:35 +01:00
|
|
|
let torrent_entry = entry_lock.entry(info_hash.clone());
|
2018-06-24 11:20:18 +01:00
|
|
|
match torrent_entry {
|
|
|
|
Entry::Vacant(_) => {
|
|
|
|
// no entry, nothing to do...
|
|
|
|
return Err(());
|
2018-12-06 23:58:31 +00:00
|
|
|
}
|
2018-06-24 11:20:18 +01:00
|
|
|
Entry::Occupied(entry) => {
|
|
|
|
if force || !entry.get().is_flagged() {
|
|
|
|
entry.remove();
|
|
|
|
return Ok(());
|
2018-06-12 18:23:19 +01:00
|
|
|
}
|
2018-06-24 11:20:18 +01:00
|
|
|
return Err(());
|
2018-12-06 23:58:31 +00:00
|
|
|
}
|
2018-06-12 18:23:19 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// flagged torrents will result in a tracking error. This is to allow enforcement against piracy.
|
2020-05-06 01:06:14 +01:00
|
|
|
pub async fn set_torrent_flag(&self, info_hash: &InfoHash, is_flagged: bool) -> bool {
|
|
|
|
if let Some(entry) = self.database.torrent_peers.write().await.get_mut(info_hash) {
|
2018-06-12 18:23:19 +01:00
|
|
|
if is_flagged && !entry.is_flagged {
|
|
|
|
// empty peer list.
|
|
|
|
entry.peers.clear();
|
|
|
|
}
|
|
|
|
entry.is_flagged = is_flagged;
|
2020-05-06 01:06:14 +01:00
|
|
|
true
|
|
|
|
} else {
|
|
|
|
false
|
2018-06-12 18:23:19 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-06 01:06:14 +01:00
|
|
|
pub async fn get_torrent_peers(
|
|
|
|
&self, info_hash: &InfoHash, remote_addr: &std::net::SocketAddr,
|
2018-12-06 23:58:31 +00:00
|
|
|
) -> Option<Vec<std::net::SocketAddr>> {
|
2020-05-06 01:06:14 +01:00
|
|
|
let read_lock = self.database.torrent_peers.read().await;
|
2018-06-24 11:20:18 +01:00
|
|
|
match read_lock.get(info_hash) {
|
|
|
|
None => {
|
|
|
|
return None;
|
2018-06-12 18:23:19 +01:00
|
|
|
}
|
2018-06-24 11:20:18 +01:00
|
|
|
Some(entry) => {
|
|
|
|
return Some(entry.get_peers(remote_addr));
|
|
|
|
}
|
|
|
|
};
|
2018-06-12 18:23:19 +01:00
|
|
|
}
|
|
|
|
|
2020-05-06 01:06:14 +01:00
|
|
|
pub async fn update_torrent_and_get_stats(
|
|
|
|
&self, info_hash: &InfoHash, peer_id: &PeerId, remote_address: &std::net::SocketAddr, uploaded: u64,
|
|
|
|
downloaded: u64, left: u64, event: Events,
|
2018-12-06 23:58:31 +00:00
|
|
|
) -> TorrentStats {
|
2018-06-24 11:20:18 +01:00
|
|
|
use std::collections::btree_map::Entry;
|
2020-05-06 01:06:14 +01:00
|
|
|
let mut torrent_peers = self.database.torrent_peers.write().await;
|
2018-10-27 23:59:35 +01:00
|
|
|
let torrent_entry = match torrent_peers.entry(info_hash.clone()) {
|
2020-05-06 01:06:14 +01:00
|
|
|
Entry::Vacant(vacant) => {
|
|
|
|
match self.mode {
|
|
|
|
TrackerMode::DynamicMode => vacant.insert(TorrentEntry::new()),
|
|
|
|
_ => {
|
|
|
|
return TorrentStats::TorrentNotRegistered;
|
|
|
|
}
|
2018-06-24 11:20:18 +01:00
|
|
|
}
|
2020-05-06 01:06:14 +01:00
|
|
|
}
|
2018-06-24 11:20:18 +01:00
|
|
|
Entry::Occupied(entry) => {
|
|
|
|
if entry.get().is_flagged() {
|
|
|
|
return TorrentStats::TorrentFlagged;
|
|
|
|
}
|
|
|
|
entry.into_mut()
|
2018-12-06 23:58:31 +00:00
|
|
|
}
|
2018-06-24 11:20:18 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
torrent_entry.update_peer(peer_id, remote_address, uploaded, downloaded, left, event);
|
|
|
|
|
|
|
|
let (seeders, complete, leechers) = torrent_entry.get_stats();
|
|
|
|
|
|
|
|
return TorrentStats::Stats {
|
|
|
|
seeders,
|
|
|
|
leechers,
|
|
|
|
complete,
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2020-05-06 01:06:14 +01:00
|
|
|
pub(crate) async fn get_database<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, BTreeMap<InfoHash, TorrentEntry>> {
|
|
|
|
self.database.torrent_peers.read().await
|
2018-06-12 18:23:19 +01:00
|
|
|
}
|
2018-10-28 06:41:52 +00:00
|
|
|
|
2020-05-07 00:53:25 +01:00
|
|
|
pub async fn save_database<W: tokio::io::AsyncWrite + Unpin>(&self, mut w: W) -> Result<(), std::io::Error> {
|
|
|
|
use futures::io::AsyncWriteExt;
|
|
|
|
|
|
|
|
let mut writer = async_compression::futures::write::BzEncoder::new(utils::TokioFuturesCompat::from(&mut w));
|
2018-10-28 06:41:52 +00:00
|
|
|
|
2020-05-06 01:06:14 +01:00
|
|
|
let db_lock = self.database.torrent_peers.read().await;
|
2018-10-28 06:41:52 +00:00
|
|
|
|
2020-05-06 01:06:14 +01:00
|
|
|
let db: &BTreeMap<InfoHash, TorrentEntry> = &*db_lock;
|
|
|
|
let mut tmp = Vec::with_capacity(4096);
|
2018-10-28 06:41:52 +00:00
|
|
|
|
2020-05-06 01:06:14 +01:00
|
|
|
for row in db {
|
|
|
|
let entry = DatabaseRow {
|
|
|
|
info_hash: row.0.clone(),
|
|
|
|
entry: Cow::Borrowed(row.1),
|
|
|
|
};
|
|
|
|
tmp.clear();
|
|
|
|
if let Err(err) = serde_json::to_writer(&mut tmp, &entry) {
|
|
|
|
error!("failed to serialize: {}", err);
|
|
|
|
continue;
|
|
|
|
};
|
|
|
|
tmp.push(b'\n');
|
|
|
|
writer.write_all(&tmp).await?;
|
|
|
|
}
|
2020-05-07 00:53:25 +01:00
|
|
|
|
|
|
|
writer.close().await?;
|
2020-05-06 01:06:14 +01:00
|
|
|
Ok(())
|
2018-10-28 06:41:52 +00:00
|
|
|
}
|
2018-12-06 23:58:31 +00:00
|
|
|
|
2020-05-06 01:06:14 +01:00
|
|
|
async fn cleanup(&self) {
|
|
|
|
let mut lock = self.database.torrent_peers.write().await;
|
|
|
|
let db: &mut BTreeMap<InfoHash, TorrentEntry> = &mut *lock;
|
|
|
|
let mut torrents_to_remove = Vec::new();
|
|
|
|
|
|
|
|
for (k, v) in db.iter_mut() {
|
|
|
|
// timed-out peers..
|
|
|
|
{
|
|
|
|
let mut peers_to_remove = Vec::new();
|
|
|
|
let torrent_peers = &mut v.peers;
|
|
|
|
|
|
|
|
for (peer_id, state) in torrent_peers.iter() {
|
2020-05-06 02:37:52 +01:00
|
|
|
if state.updated.elapsed() > TWO_HOURS {
|
2020-05-06 01:06:14 +01:00
|
|
|
// over 2 hours past since last update...
|
|
|
|
peers_to_remove.push(*peer_id);
|
2018-12-06 23:58:31 +00:00
|
|
|
}
|
2020-05-06 01:06:14 +01:00
|
|
|
}
|
2018-12-06 23:58:31 +00:00
|
|
|
|
2020-05-06 01:06:14 +01:00
|
|
|
for peer_id in peers_to_remove.iter() {
|
|
|
|
torrent_peers.remove(peer_id);
|
2018-12-06 23:58:31 +00:00
|
|
|
}
|
2020-05-06 01:06:14 +01:00
|
|
|
}
|
2018-12-06 23:58:31 +00:00
|
|
|
|
2020-05-06 01:06:14 +01:00
|
|
|
if self.mode == TrackerMode::DynamicMode {
|
|
|
|
// peer-less torrents..
|
2020-05-06 02:37:52 +01:00
|
|
|
if v.peers.len() == 0 && !v.is_flagged() {
|
2020-05-06 01:06:14 +01:00
|
|
|
torrents_to_remove.push(k.clone());
|
2018-12-06 23:58:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-05-06 01:06:14 +01:00
|
|
|
|
|
|
|
for info_hash in torrents_to_remove {
|
|
|
|
db.remove(&info_hash);
|
|
|
|
}
|
2018-12-06 23:58:31 +00:00
|
|
|
}
|
|
|
|
|
2020-05-06 01:06:14 +01:00
|
|
|
pub async fn periodic_task(&self, db_path: &str) {
|
2018-12-06 23:58:31 +00:00
|
|
|
// cleanup db
|
2020-05-06 01:06:14 +01:00
|
|
|
self.cleanup().await;
|
2018-12-06 23:58:31 +00:00
|
|
|
|
2018-12-11 01:28:41 +00:00
|
|
|
// save journal db.
|
|
|
|
let mut journal_path = std::path::PathBuf::from(db_path);
|
|
|
|
|
|
|
|
let mut filename = String::from(journal_path.file_name().unwrap().to_str().unwrap());
|
|
|
|
filename.push_str("-journal");
|
|
|
|
|
|
|
|
journal_path.set_file_name(filename.as_str());
|
|
|
|
let jp_str = journal_path.as_path().to_str().unwrap();
|
|
|
|
|
|
|
|
// scope to make sure backup file is dropped/closed.
|
|
|
|
{
|
2020-05-06 01:06:14 +01:00
|
|
|
let mut file = match tokio::fs::File::create(jp_str).await {
|
2018-12-11 01:28:41 +00:00
|
|
|
Err(err) => {
|
|
|
|
error!("failed to open file '{}': {}", db_path, err);
|
|
|
|
return;
|
2018-12-11 00:46:57 +00:00
|
|
|
}
|
2018-12-11 01:28:41 +00:00
|
|
|
Ok(v) => v,
|
|
|
|
};
|
|
|
|
trace!("writing database to {}", jp_str);
|
2020-05-06 01:06:14 +01:00
|
|
|
if let Err(err) = self.save_database(&mut file).await {
|
2018-12-11 01:28:41 +00:00
|
|
|
error!("failed saving database. {}", err);
|
|
|
|
return;
|
2018-12-06 23:58:31 +00:00
|
|
|
}
|
|
|
|
}
|
2018-12-11 01:28:41 +00:00
|
|
|
|
|
|
|
// overwrite previous db
|
|
|
|
trace!("renaming '{}' to '{}'", jp_str, db_path);
|
2020-05-06 01:06:14 +01:00
|
|
|
if let Err(err) = tokio::fs::rename(jp_str, db_path).await {
|
2018-12-11 01:28:41 +00:00
|
|
|
error!("failed to move db backup. {}", err);
|
|
|
|
}
|
2018-12-06 23:58:31 +00:00
|
|
|
}
|
2018-06-12 18:23:19 +01:00
|
|
|
}
|
2018-06-24 11:20:18 +01:00
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::*;
|
|
|
|
|
|
|
|
fn is_sync<T: Sync>() {}
|
|
|
|
fn is_send<T: Send>() {}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn tracker_send() {
|
|
|
|
is_send::<TorrentTracker>();
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn tracker_sync() {
|
|
|
|
is_sync::<TorrentTracker>();
|
|
|
|
}
|
2018-10-28 00:36:10 +01:00
|
|
|
|
2020-05-06 01:06:14 +01:00
|
|
|
#[tokio::test]
|
|
|
|
async fn test_save_db() {
|
2018-10-28 06:41:52 +00:00
|
|
|
let tracker = TorrentTracker::new(TrackerMode::DynamicMode);
|
2020-05-06 01:23:29 +01:00
|
|
|
tracker
|
|
|
|
.add_torrent(&[0u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0].into())
|
|
|
|
.await
|
|
|
|
.expect("failed to add torrent");
|
2018-10-28 06:41:52 +00:00
|
|
|
|
|
|
|
let mut out = Vec::new();
|
2020-05-06 01:23:29 +01:00
|
|
|
let mut cursor = std::io::Cursor::new(&mut out);
|
2018-10-28 06:41:52 +00:00
|
|
|
|
2020-05-06 01:23:29 +01:00
|
|
|
tracker.save_database(&mut cursor).await.expect("db save failed");
|
|
|
|
assert!(cursor.position() > 0);
|
2018-10-28 06:41:52 +00:00
|
|
|
}
|
|
|
|
|
2018-10-28 00:36:10 +01:00
|
|
|
#[test]
|
|
|
|
fn test_infohash_de() {
|
|
|
|
use serde_json;
|
|
|
|
|
|
|
|
let ih: InfoHash = [0u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1].into();
|
|
|
|
|
|
|
|
let serialized_ih = serde_json::to_string(&ih).unwrap();
|
|
|
|
|
|
|
|
let de_ih: InfoHash = serde_json::from_str(serialized_ih.as_str()).unwrap();
|
|
|
|
|
|
|
|
assert!(de_ih == ih);
|
|
|
|
}
|
2018-12-06 23:58:31 +00:00
|
|
|
}
|