change sled to sqlite and remove lic

This commit is contained in:
rustdesk 2022-05-12 20:00:33 +08:00
parent d36d6da445
commit b3f39598a7
33 changed files with 4125 additions and 2646 deletions

1
.gitattributes vendored Normal file
View File

@ -0,0 +1 @@
* text=auto

8
.gitignore vendored
View File

@ -1,6 +1,2 @@
/target
**/*.rs.bk
version.rs
sled.db
hbbs.sh
hbbs.conf
target
id*

2448
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +1,10 @@
[package]
name = "hbbs"
version = "1.1.4"
authors = ["open-trade <info@opentradesolutions.com>"]
edition = "2018"
build= "build.rs"
version = "1.1.5"
authors = ["open-trade <info@rustdesk.com>"]
edition = "2021"
build = "build.rs"
default-run = "hbbs"
[[bin]]
name = "hbbr"
@ -17,22 +18,28 @@ serde_derive = "1.0"
serde = "1.0"
serde_json = "1.0"
lazy_static = "1.4"
clap = "2.33"
rust-ini = "0.16"
minreq = { version = "2.3.1", features = ["punycode"] }
clap = "2"
rust-ini = "0.18"
minreq = { version = "2.4", features = ["punycode"] }
machine-uid = "0.2"
mac_address = "1.1"
whoami = "0.9"
whoami = "1.2"
base64 = "0.13"
cryptoxide = "0.3"
[build-dependencies]
hbb_common = { path = "libs/hbb_common" }
[workspace]
members = ["libs/hbb_common"]
[dependencies.rocksdb]
default-features = false
features = ["lz4"]
version = "0.15"
axum = { version = "0.5", features = ["headers"] }
sqlx = { git = "https://github.com/open-trade/sqlx", features = [ "runtime-tokio-rustls", "sqlite", "macros", "chrono", "json" ] }
deadpool = "0.8"
async-trait = "0.1"
async-speed-limit = { git = "https://github.com/open-trade/async-speed-limit" }
uuid = { version = "0.8", features = ["v4"] }
bcrypt = "0.12"
chrono = "0.4"
jsonwebtoken = "8"
headers = "0.3"
once_cell = "1.8"
sodiumoxide = "0.2"
tokio-tungstenite = "0.17"
tungstenite = "0.17"
regex = "1.4"
tower-http = { version = "0.2", features = ["fs", "trace", "cors"] }
http = "0.2"
flexi_logger = { version = "0.22", features = ["async", "use_chrono_for_offset"] }

View File

@ -1,4 +0,0 @@
FROM ubuntu:20.04
COPY target/release/hbbs /usr/bin/hbbs
COPY target/release/hbbr /usr/bin/hbbr
WORKDIR /root

23
README.md Normal file
View File

@ -0,0 +1,23 @@
# RustDesk Server Program
<<<<<<< HEAD
Self-host your own RustDesk server, it is free and open source.
```
cargo build --release
```
Two executables will be generated in target/release.
- hbbs - RustDesk ID/Rendezvous server
- hbbr - RustDesk relay server
[**Manual**](https://rustdesk.com/docs/en/self-host/)
=======
**Could you please also help finish below "RustDesk Self-Hosting Sever Survey"? So that we can make it better.** https://forms.gle/9sDqAC5JBuB4b52S6
If you are looking for an open source implementation, please go to [rustdesk-server-demo](https://github.com/rustdesk/rustdesk-server-demo).
[Download](https://github.com/rustdesk/rustdesk-server/releases)
[Doc](https://rustdesk.com/docs/en/self-host)
>>>>>>> 5043b7ce7f5a2230661381cb9c63f84c60414035

BIN
db_v2.sqlite3 Normal file

Binary file not shown.

@ -1 +0,0 @@
Subproject commit ec453e5e65bdb3d082cca30416880e4ee0f3665a

View File

@ -1,18 +0,0 @@
package="RustDesk Server"
version="1.1.3"
description="RustDesk is a remote desktop software allowing your own rendezvous/relay server. It attempts to make direct connect via TCP hole punch first, and then forward via relay server if direct connection fails. 4 ports are used. NAT test port: 21115(tcp), ID/rendezvous port: 21116(tcp/udp), relay port: 21117(tcp), Email: (), Key: ()"
displayname="RustDesk Rendezvous/Relay Server"
maintainer="CarrieZ Studio"
maintainer_url="https://rustdesk.com/zh/"
distributor="RustDesk & 裙下孤魂"
support_url="https://rustdesk.com/contact/"
arch="apollolake avoton braswell broadwell broadwellnk bromolow cedarview denverton dockerx64 geminilake grantley kvmx64 purley v1000 x86 x86_64"
os_min_ver="6.1"
reloadui="yes"
startable="yes"
thirdparty="yes"
install_reboot="no"
install_dep_packages=""
install_conflict_packages=""
extractsize=""
checkport="no"

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 31 KiB

View File

@ -1,54 +0,0 @@
[{
"step_title": "Install Settings",
"items": [{
"type": "textfield",
"desc": "Listening Ports",
"subitems": [{
"key": "hbbs_port",
"desc": "RustDesk ID Server Port",
"defaultValue": "21116",
"validator": {
"allowBlank": false,
"regex": {
"expr": "/^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$/",
"errorText": "Digit number only"
}
}
}, {
"key": "hbbr_port",
"desc": "RustDesk Relay Server Port",
"defaultValue": "21117",
"validator": {
"allowBlank": false,
"regex": {
"expr": "/^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$/",
"errorText": "Digit number only"
}
}
}]
},{
"type": "textfield",
"desc": "Registered email, check http://rustdesk.com/server for more information",
"subitems": [{
"key": "email",
"desc": "Email",
"validator": {
"allowBlank": false,
"regex": {
"expr": "/^\\w+([-+.']\\w+)*@\\w+([-.]\\w+)*\\.\\w+([-.]\\w+)*$/",
"errorText": "Invalid email format"
}
}
}]
},{
"type": "textfield",
"desc": "Only allow the client with the same key",
"subitems": [{
"key": "key",
"desc": "Key",
"validator": {
"allowBlank": true
}
}]
}]
}]

View File

@ -1,23 +0,0 @@
[RustDesk_Server_HBBR]
title="RustDesk Server (HBBR_TCP)"
desc="RustDesk Server"
port_forward="yes"
dst.ports="21117/tcp"
[RustDesk_Server_HBBS_TCP]
title="RustDesk Server (HBBS_TCP)"
desc="RustDesk Server"
port_forward="yes"
dst.ports="21116/tcp"
[RustDesk_Server_HBBS_UDP]
title="RustDesk Server (HBBS_UDP)"
desc="RustDesk Server"
port_forward="yes"
dst.ports="21116/udp"
[RustDesk_Server_NAT_TCP]
title="RustDesk Server (NAT_TCP)"
desc="RustDesk Server"
port_forward="yes"
dst.ports="21115/tcp"

View File

@ -1,167 +0,0 @@
#!/bin/sh
PACKAGE_NAME="$SYNOPKG_PKGNAME"
PACKAGE_BASE="/var/packages/${PACKAGE_NAME}/target"
PACKAGE_SSS="/var/packages/${PACKAGE_NAME}/scripts/start-stop-status"
SERVICETOOL="/usr/syno/bin/servicetool"
GETKEYVALUE="/usr/syno/bin/synogetkeyvalue"
SETKEYVALUE="/usr/syno/bin/synosetkeyvalue"
FWFILENAME="RustDesk_Server.sc"
[ "${hbbr_port}" == "" ] && hbbr_port="21117"
[ "${hbbs_port}" == "" ] && hbbs_port="21116"
[ "${key}" == "" ] && key=""
[ "${email}" == "" ] && email=""
nat_port=`expr ${hbbs_port} - 1`
preinst() {
exit 0
}
postinst() {
if [ "${SYNOPKG_PKG_STATUS}" == "INSTALL" ]; then
# 导入另一个RustDesk服务器数据
import_db="false"
import_all="false"
if [ "${rds_old_import_all}" == "true" ]; then
rds_old_import_db="true"
import_all="true"
elif [ "${rds_import_all}" == "true" ]; then
rds_import_db="true"
import_all="true"
fi
if [ "${rds_old_import_db}" == "true" ]; then
import_db="true"
PACKAGE_IMPORT_DIR="/var/packages/RustDesk_Server"
elif [ "${rds_import_db}" == "true" ]; then
import_db="true"
PACKAGE_IMPORT_DIR="/var/packages/RustDesk Server"
fi
if [ "${import_db}" == "true" ]; then
[ -x "${PACKAGE_IMPORT_DIR}/scripts/start-stop-status" ] \
&& SYNOPKG_PKGNAME="RustDesk Server" "${PACKAGE_IMPORT_DIR}/scripts/start-stop-status" stop 2>&1
[ -f "${PACKAGE_IMPORT_DIR}/enabled" ] && rm -f "${PACKAGE_IMPORT_DIR}/enabled"
[ -d "${PACKAGE_IMPORT_DIR}/target/hbbs.db" ] && cp -prf "${PACKAGE_IMPORT_DIR}/target/hbbs.db" "${PACKAGE_BASE}"
fi
if [ "${import_all}" == "true" ]; then
[ -d "${PACKAGE_IMPORT_DIR}/target/logs" ] && cp -prf "${PACKAGE_IMPORT_DIR}/target/logs" "${PACKAGE_BASE}"
fi
# 添加应用配置
sed -i "s/relay port: 21117/relay port: ${hbbr_port}/" "/var/packages/${PACKAGE_NAME}/INFO"
sed -i "s/ID\/rendezvous port: 21116/ID\/rendezvous port: ${hbbs_port}/" "/var/packages/${PACKAGE_NAME}/INFO"
sed -i "s/NAT test port: 21115/NAT test port: ${nat_port}/" "/var/packages/${PACKAGE_NAME}/INFO"
sed -i "s/Key: ()/Key: (${key})/" "/var/packages/${PACKAGE_NAME}/INFO"
sed -i "s/Email: ()/Email: (${email})/" "/var/packages/${PACKAGE_NAME}/INFO"
sed -i "s/21117/${hbbr_port}/" "/var/packages/${PACKAGE_NAME}/scripts/${FWFILENAME}"
sed -i "s/21116/${hbbs_port}/" "/var/packages/${PACKAGE_NAME}/scripts/${FWFILENAME}"
sed -i "s/21115/${nat_port}/" "/var/packages/${PACKAGE_NAME}/scripts/${FWFILENAME}"
sed -i "s/port=[^ ]*/port=${hbbr_port}/g" "${PACKAGE_BASE}/config/hbbr.conf"
sed -i "s/port=[^ ]*/port=${hbbs_port}/g" "${PACKAGE_BASE}/config/hbbs.conf"
sed -i "s/key=[^ ]*/key=${key}/g" "${PACKAGE_BASE}/config/hbbs.conf"
sed -i "s/email=[^ ]*/email=${email}/g" "${PACKAGE_BASE}/config/hbbs.conf"
# 添加防火墙配置
cat "/var/packages/${PACKAGE_NAME}/scripts/${FWFILENAME}" >"/tmp/${FWFILENAME}"
${SERVICETOOL} --install-configure-file --package "/tmp/${FWFILENAME}" >/dev/null
rm -f "/tmp/${FWFILENAME}"
# 设置文件权限
chmod -R 755 "${PACKAGE_BASE}"/*
chmod -R 755 "/var/packages/${PACKAGE_NAME}/scripts"/*
chmod -R 755 "/var/packages/${PACKAGE_NAME}/WIZARD_UIFILES"/*
chmod 644 "/var/packages/${PACKAGE_NAME}/INFO"
fi
exit 0
}
preuninst() {
# 停用套件
"${PACKAGE_SSS}" stop
# 删除防火墙配置
if [ "${SYNOPKG_PKG_STATUS}" == "UNINSTALL" ]; then
${SERVICETOOL} --remove-configure-file --package "${FWFILENAME}" >/dev/null
fi
exit 0
}
postuninst() {
# 删除不必要的目录...
if [ -d "/usr/syno/etc/packages/${PACKAGE_NAME}" ]; then
rm -rf "/usr/syno/etc/packages/${PACKAGE_NAME}"
fi
exit 0
}
preupgrade() {
# 停用套件
"${PACKAGE_SSS}" stop
# Not working yet...
# # 检索旧设置...
# hbbr_port=`${GETKEYVALUE} "${PACKAGE_BASE}/config/hbbr.conf" port`
# hbbs_port=`${GETKEYVALUE} "${PACKAGE_BASE}/config/hbbs.conf" port`
# sed -i "s/21117/${hbbr_port}/" "/var/packages/${PACKAGE_NAME}/WIZARD_UIFILES/upgrade_uifile"
# sed -i "s/21116/${hbbs_port}/" "/var/packages/${PACKAGE_NAME}/WIZARD_UIFILES/upgrade_uifile"
## Not working yet...
# 备份数据文件...
if [ -d "${SYNOPKG_PKGDEST}" ]; then
DIRS4BACKUP="data logs hbbs.db config"
for DIR in $DIRS4BACKUP; do
if [ -d "${SYNOPKG_PKGDEST}/${DIR}" ]; then
mkdir -p "${SYNOPKG_PKGDEST}/../${PACKAGE_NAME}_upgrade/${DIR}"
mv "${SYNOPKG_PKGDEST}/${DIR}"/* "${SYNOPKG_PKGDEST}/../${PACKAGE_NAME}_upgrade/${DIR}"
rmdir "${SYNOPKG_PKGDEST}/${DIR}"
elif [ -f "${SYNOPKG_PKGDEST}/${DIR}" ]; then
mv "${SYNOPKG_PKGDEST}/${DIR}" "${SYNOPKG_PKGDEST}/../${PACKAGE_NAME}_upgrade"
fi
done
fi
exit 0
}
postupgrade() {
# 恢复数据文件...
if [ -d "${SYNOPKG_PKGDEST}/../${PACKAGE_NAME}_upgrade" ]; then
for DIR in `ls "${SYNOPKG_PKGDEST}/../${PACKAGE_NAME}_upgrade"`
do
if [ -d "${SYNOPKG_PKGDEST}/../${PACKAGE_NAME}_upgrade/${DIR}" ]; then
[ ! -d "${SYNOPKG_PKGDEST}/${DIR}" ] && mkdir "${SYNOPKG_PKGDEST}/${DIR}"
mv "${SYNOPKG_PKGDEST}/../${PACKAGE_NAME}_upgrade/${DIR}"/* "${SYNOPKG_PKGDEST}/${DIR}"
rmdir "${SYNOPKG_PKGDEST}/../${PACKAGE_NAME}_upgrade/${DIR}"
elif [ -f "${SYNOPKG_PKGDEST}/../${PACKAGE_NAME}_upgrade/${DIR}" ]; then
mv "${SYNOPKG_PKGDEST}/../${PACKAGE_NAME}_upgrade/${DIR}" "${SYNOPKG_PKGDEST}"
fi
done
rmdir "${SYNOPKG_PKGDEST}/../${PACKAGE_NAME}_upgrade"
fi
# 恢复设置...
hbbr_port=`${GETKEYVALUE} "${PACKAGE_BASE}/config/hbbr.conf" port` >>/tmp/wakko.txt
hbbs_port=`${GETKEYVALUE} "${PACKAGE_BASE}/config/hbbs.conf" port` >>/tmp/wakko.txt
nat_port=`expr ${hbbs_port} - 1`
key=`${GETKEYVALUE} "${PACKAGE_BASE}/config/hbbs.conf" key` >>/tmp/wakko.txt
email=`${GETKEYVALUE} "${PACKAGE_BASE}/config/hbbs.conf" email` >>/tmp/wakko.txt
sed -i "s/relay port: 21117/relay port: ${hbbr_port}/" "/var/packages/${PACKAGE_NAME}/INFO" >>/tmp/wakko.txt
sed -i "s/ID\/rendezvous port: 21116/ID\/rendezvous port: ${hbbs_port}/" "/var/packages/${PACKAGE_NAME}/INFO" >>/tmp/wakko.txt
sed -i "s/NAT test port: 21115/NAT test port: ${nat_port}/" "/var/packages/${PACKAGE_NAME}/INFO" >>/tmp/wakko.txt
sed -i "s/Key: ()/Key: (${key})/" "/var/packages/${PACKAGE_NAME}/INFO"
sed -i "s/Email: ()/Email: (${email})/" "/var/packages/${PACKAGE_NAME}/INFO"
sed -i "s/21117/${hbbr_port}/" "/var/packages/${PACKAGE_NAME}/scripts/${FWFILENAME}" >>/tmp/wakko.txt
sed -i "s/21116/${hbbs_port}/" "/var/packages/${PACKAGE_NAME}/scripts/${FWFILENAME}" >>/tmp/wakko.txt
sed -i "s/21115/${nat_port}/" "/var/packages/${PACKAGE_NAME}/scripts/${FWFILENAME}" >>/tmp/wakko.txt
# 设置文件权限
chmod -R 755 "/var/packages/${PACKAGE_NAME}/scripts"/*
chmod -R 755 "/var/packages/${PACKAGE_NAME}/WIZARD_UIFILES"/*
chmod 644 "/var/packages/${PACKAGE_NAME}/INFO"
exit 0
}

View File

@ -1,3 +0,0 @@
#!/bin/sh
. "`dirname \"$0\"`/installer"
`basename "$0"` >$SYNOPKG_TEMP_LOGFILE

View File

@ -1,3 +0,0 @@
#!/bin/sh
. "`dirname \"$0\"`/installer"
`basename "$0"` >$SYNOPKG_TEMP_LOGFILE

View File

@ -1,3 +0,0 @@
#!/bin/sh
. "`dirname \"$0\"`/installer"
`basename "$0"` >$SYNOPKG_TEMP_LOGFILE

View File

@ -1,3 +0,0 @@
#!/bin/sh
. "`dirname \"$0\"`/installer"
`basename "$0"` >$SYNOPKG_TEMP_LOGFILE

View File

@ -1,3 +0,0 @@
#!/bin/sh
. "`dirname \"$0\"`/installer"
`basename "$0"` >$SYNOPKG_TEMP_LOGFILE

View File

@ -1,3 +0,0 @@
#!/bin/sh
. "`dirname \"$0\"`/installer"
`basename "$0"` >$SYNOPKG_TEMP_LOGFILE

View File

@ -1,158 +0,0 @@
#!/bin/sh
sError="ERROR: "
[ ! -z "$SYNOPKG_PKGNAME" ] && sError="<br />${sError}"
TIMEOUT=120
PACKAGE_NAME="RustDesk Server"
PACKAGE_BASE="/var/packages/${PACKAGE_NAME}/target"
HBBR_BIN="${PACKAGE_BASE}/bin/hbbr"
HBBR_PORT=`synogetkeyvalue "${PACKAGE_BASE}/config/hbbr.conf" port`
HBBR_LOG="/var/log/hbbr.log"
HBBS_BIN="${PACKAGE_BASE}/bin/hbbs"
HBBS_PORT=`synogetkeyvalue "${PACKAGE_BASE}/config/hbbs.conf" port`
KEY=`synogetkeyvalue "${PACKAGE_BASE}/config/hbbs.conf" key`
EMAIL=`synogetkeyvalue "${PACKAGE_BASE}/config/hbbs.conf" email`
HBBS_LOG="/var/log/hbbs.log"
PACKAGE_ENABLED="/var/packages/${PACKAGE_NAME}/enabled"
PS_CMD="/bin/ps -w"
DSM_MAJORVERSION=`synogetkeyvalue /etc.defaults/VERSION majorversion`
if [[ $DSM_MAJORVERSION -gt 5 ]]; then
PS_CMD="$PS_CMD -x"
fi
CheckIfDaemonAlive() {
local PID="$1"
PROCESS_ALIVE="0"
[ -z "$PID" ] && return 1
kill -0 "$PID"
[ "0" == "$?" ] && PROCESS_ALIVE="1"
}
running_hbbr() {
local PID=$(${PS_CMD} | sed -e 's/^[ \t]*//' | grep -v grep | grep hbbr | grep "${PACKAGE_NAME}" | head -n1 | cut -f1 -d' ')
CheckIfDaemonAlive $PID
[ "0" == "$PROCESS_ALIVE" ] && return 1
return 0
}
running_hbbs() {
local PID=$(${PS_CMD} | sed -e 's/^[ \t]*//' | grep -v grep | grep hbbs | grep "${PACKAGE_NAME}" | head -n1 | cut -f1 -d' ')
CheckIfDaemonAlive $PID
[ "0" == "$PROCESS_ALIVE" ] && return 1
return 0
}
start() {
[ "$SYNOPKG_TEMP_LOGFILE" == "" ] && SYNOPKG_TEMP_LOGFILE="/var/log/rustdeskserver.start.log"
LANG=C cd "$PACKAGE_BASE" && (nohup "$HBBR_BIN" -p $HBBR_PORT -k "$KEY" -m "$EMAIL" > "$HBBR_LOG" 2>&1 &) && (nohup "$HBBS_BIN" -p $HBBS_PORT -k "$KEY" -m "$EMAIL" > "$HBBS_LOG" 2>&1 &)
i=0
while true; do
if ! running_hbbr || ! running_hbbs ; then
# echo "WAIT: ${i}s of ${TIMEOUT}s"
sleep 5s
i=$((i+5))
else
break
fi
[ $i -ge $TIMEOUT ] && break
done
# 检查hbbr进程状态
if ! running_hbbr ; then
echo -e "${sError}hbbr process not running" | tee -a $SYNOPKG_TEMP_LOGFILE
stop
return 1
fi
# 检查hbbs进程状态
if ! running_hbbs ; then
echo -e "${sError}hbbs process not running" | tee -a $SYNOPKG_TEMP_LOGFILE
stop
return 1
fi
return 0
}
stop() {
[ "$SYNOPKG_TEMP_LOGFILE" == "" ] && SYNOPKG_TEMP_LOGFILE="/var/log/rustdeskserver.stop.log"
# 检查hbbr进程状态
if running_hbbr ; then
local PID=$(${PS_CMD} | sed -e 's/^[ \t]*//' | grep -v grep | grep hbbr | grep "${PACKAGE_NAME}" | head -n1 | cut -f1 -d' ')
[ -z "$PID" ] && return 0
kill -15 $PID
sleep 5s
# 检查hbbr进程状态
if running_hbbr ; then
kill -9 $PID
sleep 5s
if running_hbbr ; then
echo "${sError}Failed to kill hbbr process(pid=$PID)!" | tee -a $SYNOPKG_TEMP_LOGFILE
return 1
fi
fi
fi
# 检查hbbs进程状态
if running_hbbs ; then
local PID=$(${PS_CMD} | sed -e 's/^[ \t]*//' | grep -v grep | grep hbbs | grep "${PACKAGE_NAME}" | head -n1 | cut -f1 -d' ')
[ -z "$PID" ] && return 0
kill -15 $PID
sleep 5s
# 检查hbbs进程状态
if running_hbbs ; then
kill -9 $PID
sleep 5s
if running_hbbs ; then
echo "${sError}无法关闭hbbs进程 (pid=$PID)!" | tee -a $SYNOPKG_TEMP_LOGFILE
return 1
fi
fi
fi
return 0
}
case $1 in
start)
# 启动服务器
start
exit $?
;;
stop)
# 关闭服务器
stop
exit $?
;;
status)
# 检查套件开关
if [ ! -f "${PACKAGE_ENABLED}" ]; then
echo "${sError}package not started" | tee -a $SYNOPKG_TEMP_LOGFILE
exit 0
fi
# 检查hbbr进程状态
if ! running_hbbr ; then
echo "${sError}hbbr process killed" | tee -a $SYNOPKG_TEMP_LOGFILE
exit 1
fi
# 检查hbbs进程状态
if ! running_hbbs ; then
echo "${sError}hbbs process killed" | tee -a $SYNOPKG_TEMP_LOGFILE
exit 1
fi
exit 0
;;
log)
echo "$PACKAGE_BASE/logs/server.log"
exit 0
;;
esac

128
src/common.rs Normal file
View File

@ -0,0 +1,128 @@
use clap::App;
use hbb_common::{anyhow::Context, log, ResultType};
use ini::Ini;
use sodiumoxide::crypto::sign;
use std::{
collections::HashMap,
io::prelude::*,
io::Read,
net::{IpAddr, SocketAddr},
time::{Instant, SystemTime},
};
pub(crate) fn get_expired_time() -> Instant {
let now = Instant::now();
now.checked_sub(std::time::Duration::from_secs(3600))
.unwrap_or(now)
}
pub(crate) fn test_if_valid_server(host: &str, name: &str) -> ResultType<SocketAddr> {
use std::net::ToSocketAddrs;
let res = if host.contains(":") {
host.to_socket_addrs()?.next().context("")
} else {
format!("{}:{}", host, 0)
.to_socket_addrs()?
.next()
.context("")
};
if res.is_err() {
log::error!("Invalid {} {}: {:?}", name, host, res);
}
res
}
pub(crate) fn get_servers(s: &str, tag: &str) -> Vec<String> {
let servers: Vec<String> = s
.split(",")
.filter(|x| !x.is_empty() && test_if_valid_server(x, tag).is_ok())
.map(|x| x.to_owned())
.collect();
log::info!("{}={:?}", tag, servers);
servers
}
#[inline]
fn arg_name(name: &str) -> String {
name.to_uppercase().replace("_", "-")
}
pub fn init_args(args: &str, name: &str, about: &str) {
let matches = App::new(name)
.version(crate::version::VERSION)
.author("Purslane Ltd. <info@rustdesk.com>")
.about(about)
.args_from_usage(&args)
.get_matches();
if let Ok(v) = Ini::load_from_file(".env") {
if let Some(section) = v.section(None::<String>) {
section
.iter()
.for_each(|(k, v)| std::env::set_var(arg_name(k), v));
}
}
if let Some(config) = matches.value_of("config") {
if let Ok(v) = Ini::load_from_file(config) {
if let Some(section) = v.section(None::<String>) {
section
.iter()
.for_each(|(k, v)| std::env::set_var(arg_name(k), v));
}
}
}
for (k, v) in matches.args {
if let Some(v) = v.vals.get(0) {
std::env::set_var(arg_name(k), v.to_string_lossy().to_string());
}
}
}
#[inline]
pub fn get_arg(name: &str) -> String {
get_arg_or(name, "".to_owned())
}
#[inline]
pub fn get_arg_or(name: &str, default: String) -> String {
std::env::var(arg_name(name)).unwrap_or(default)
}
#[inline]
pub fn now() -> u64 {
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.map(|x| x.as_secs())
.unwrap_or_default()
}
pub fn gen_sk() -> (String, Option<sign::SecretKey>) {
let sk_file = "id_ed25519";
if let Ok(mut file) = std::fs::File::open(sk_file) {
let mut contents = String::new();
if file.read_to_string(&mut contents).is_ok() {
let sk = base64::decode(&contents).unwrap_or_default();
if sk.len() == sign::SECRETKEYBYTES {
let mut tmp = [0u8; sign::SECRETKEYBYTES];
tmp[..].copy_from_slice(&sk);
let pk = base64::encode(&tmp[sign::SECRETKEYBYTES / 2..]);
log::info!("Private key comes from {}", sk_file);
return (pk, Some(sign::SecretKey(tmp)));
}
}
} else {
let (pk, sk) = sign::gen_keypair();
let pub_file = format!("{}.pub", sk_file);
if let Ok(mut f) = std::fs::File::create(&pub_file) {
f.write_all(base64::encode(pk).as_bytes()).ok();
if let Ok(mut f) = std::fs::File::create(sk_file) {
let s = base64::encode(&sk);
if f.write_all(s.as_bytes()).is_ok() {
log::info!("Private/public key written to {}/{}", sk_file, pub_file);
log::debug!("Public key: {:?}", pk);
return (base64::encode(pk), Some(sk));
}
}
}
}
("".to_owned(), None)
}

231
src/database.rs Normal file
View File

@ -0,0 +1,231 @@
use async_trait::async_trait;
use hbb_common::{log, ResultType};
use serde_json::value::Value;
use sqlx::{
sqlite::SqliteConnectOptions, ConnectOptions, Connection, Error as SqlxError, SqliteConnection,
};
use std::{ops::DerefMut, str::FromStr};
//use sqlx::postgres::PgPoolOptions;
//use sqlx::mysql::MySqlPoolOptions;
pub(crate) type DB = sqlx::Sqlite;
pub(crate) type MapValue = serde_json::map::Map<String, Value>;
pub(crate) type MapStr = std::collections::HashMap<String, String>;
type Pool = deadpool::managed::Pool<DbPool>;
pub struct DbPool {
url: String,
}
#[async_trait]
impl deadpool::managed::Manager for DbPool {
type Type = SqliteConnection;
type Error = SqlxError;
async fn create(&self) -> Result<SqliteConnection, SqlxError> {
let mut opt = SqliteConnectOptions::from_str(&self.url).unwrap();
opt.log_statements(log::LevelFilter::Debug);
SqliteConnection::connect_with(&opt).await
}
async fn recycle(
&self,
obj: &mut SqliteConnection,
) -> deadpool::managed::RecycleResult<SqlxError> {
Ok(obj.ping().await?)
}
}
#[derive(Clone)]
pub struct Database {
pool: Pool,
}
#[derive(Default)]
pub struct Peer {
pub guid: Vec<u8>,
pub id: String,
pub uuid: Vec<u8>,
pub pk: Vec<u8>,
pub user: Option<Vec<u8>>,
pub info: String,
pub status: Option<i64>,
}
impl Database {
pub async fn new(url: &str) -> ResultType<Database> {
if !std::path::Path::new(url).exists() {
std::fs::File::create(url).ok();
}
let n: usize = std::env::var("MAX_CONNECTIONS")
.unwrap_or("1".to_owned())
.parse()
.unwrap_or(1);
log::info!("MAX_CONNECTIONS={}", n);
let pool = Pool::new(
DbPool {
url: url.to_owned(),
},
n,
);
let _ = pool.get().await?; // test
let db = Database { pool };
db.create_tables().await?;
Ok(db)
}
async fn create_tables(&self) -> ResultType<()> {
sqlx::query!(
"
create table if not exists peer (
guid blob primary key not null,
id varchar(100) not null,
uuid blob not null,
pk blob not null,
created_at datetime not null default(current_timestamp),
user blob,
status tinyint,
note varchar(300),
info text not null
) without rowid;
create unique index if not exists index_peer_id on peer (id);
create index if not exists index_peer_user on peer (user);
create index if not exists index_peer_created_at on peer (created_at);
create index if not exists index_peer_status on peer (status);
"
)
.execute(self.pool.get().await?.deref_mut())
.await?;
Ok(())
}
pub async fn get_peer(&self, id: &str) -> ResultType<Option<Peer>> {
Ok(sqlx::query_as!(
Peer,
"select guid, id, uuid, pk, user, status, info from peer where id = ?",
id
)
.fetch_optional(self.pool.get().await?.deref_mut())
.await?)
}
pub async fn get_peer_id(&self, guid: &[u8]) -> ResultType<Option<String>> {
Ok(sqlx::query!("select id from peer where guid = ?", guid)
.fetch_optional(self.pool.get().await?.deref_mut())
.await?
.map(|x| x.id))
}
#[inline]
pub async fn get_conn(&self) -> ResultType<deadpool::managed::Object<DbPool>> {
Ok(self.pool.get().await?)
}
pub async fn update_peer(&self, payload: MapValue, guid: &[u8]) -> ResultType<()> {
let mut conn = self.get_conn().await?;
let mut tx = conn.begin().await?;
if let Some(v) = payload.get("note") {
let v = get_str(v);
sqlx::query!("update peer set note = ? where guid = ?", v, guid)
.execute(&mut tx)
.await?;
}
tx.commit().await?;
Ok(())
}
pub async fn insert_peer(
&self,
id: &str,
uuid: &Vec<u8>,
pk: &Vec<u8>,
info: &str,
) -> ResultType<Vec<u8>> {
let guid = uuid::Uuid::new_v4().as_bytes().to_vec();
sqlx::query!(
"insert into peer(guid, id, uuid, pk, info) values(?, ?, ?, ?, ?)",
guid,
id,
uuid,
pk,
info
)
.execute(self.pool.get().await?.deref_mut())
.await?;
Ok(guid)
}
pub async fn update_pk(
&self,
guid: &Vec<u8>,
id: &str,
pk: &Vec<u8>,
info: &str,
) -> ResultType<()> {
sqlx::query!(
"update peer set id=?, pk=?, info=? where guid=?",
id,
pk,
info,
guid
)
.execute(self.pool.get().await?.deref_mut())
.await?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use hbb_common::tokio;
#[test]
fn test_insert() {
insert();
}
#[tokio::main(flavor = "multi_thread")]
async fn insert() {
let db = super::Database::new("test.sqlite3").await.unwrap();
let mut jobs = vec![];
for i in 0..10000 {
let cloned = db.clone();
let id = i.to_string();
let a = tokio::spawn(async move {
let empty_vec = Vec::new();
cloned
.insert_peer(&id, &empty_vec, &empty_vec, "")
.await
.unwrap();
});
jobs.push(a);
}
for i in 0..10000 {
let cloned = db.clone();
let id = i.to_string();
let a = tokio::spawn(async move {
cloned.get_peer(&id).await.unwrap();
});
jobs.push(a);
}
hbb_common::futures::future::join_all(jobs).await;
}
}
#[inline]
pub fn guid2str(guid: &Vec<u8>) -> String {
let mut bytes = [0u8; 16];
bytes[..].copy_from_slice(&guid);
uuid::Uuid::from_bytes(bytes).to_string()
}
pub(crate) fn get_str(v: &Value) -> Option<&str> {
match v {
Value::String(v) => {
let v = v.trim();
if v.is_empty() {
None
} else {
Some(v)
}
}
_ => None,
}
}

View File

@ -1,34 +1,41 @@
use clap::App;
mod common;
mod relay_server;
use hbb_common::{env_logger::*, ResultType};
use flexi_logger::*;
use hbb_common::{config::RELAY_PORT, ResultType};
use relay_server::*;
use std::sync::{Arc, Mutex};
mod lic;
mod version;
fn main() -> ResultType<()> {
init_from_env(Env::default().filter_or(DEFAULT_FILTER_ENV, "info"));
let _logger = Logger::try_with_env_or_str("info")?
.log_to_stdout()
.format(opt_format)
.write_mode(WriteMode::Async)
.start()?;
let args = format!(
"-p, --port=[NUMBER(default={})] 'Sets the listening port'
-k, --key=[KEY] 'Only allow the client with the same key'
{}
",
DEFAULT_PORT,
lic::EMAIL_ARG
RELAY_PORT,
);
let matches = App::new("hbbr")
.version(hbbs::VERSION)
.author("CarrieZ Studio<info@rustdesk.com>")
.version(version::VERSION)
.author("Purslane Ltd. <info@rustdesk.com>")
.about("RustDesk Relay Server")
.args_from_usage(&args)
.get_matches();
if !lic::check_lic(matches.value_of("email").unwrap_or(""), hbbs::VERSION) {
if let Ok(v) = ini::Ini::load_from_file(".env") {
if let Some(section) = v.section(None::<String>) {
section.iter().for_each(|(k, v)| std::env::set_var(k, v));
}
}
#[cfg(not(debug_assertions))]
if !lic::check_lic(matches.value_of("email").unwrap_or(""), version::VERSION) {
return Ok(());
}
let stop: Arc<Mutex<bool>> = Default::default();
start(
matches.value_of("port").unwrap_or(DEFAULT_PORT),
matches.value_of("port").unwrap_or(&RELAY_PORT.to_string()),
matches.value_of("key").unwrap_or(""),
stop,
)?;
Ok(())
}

View File

@ -1,6 +1,6 @@
mod rendezvous_server;
mod sled_async;
pub use rendezvous_server::*;
use sled_async::*;
pub mod common;
mod database;
mod peer;
mod version;
pub use version::*;

View File

@ -1,170 +0,0 @@
use hbb_common::{bail, log, ResultType, rand::{self, Rng}};
use serde_derive::{Deserialize, Serialize};
use std::io::prelude::*;
use std::path::Path;
#[derive(Debug, PartialEq, Default, Serialize, Deserialize, Clone)]
pub struct Machine {
#[serde(default)]
hostname: String,
#[serde(default)]
uid: String,
#[serde(default)]
mac: String,
}
#[derive(Debug, PartialEq, Default, Serialize, Deserialize, Clone)]
pub struct Post {
#[serde(default)]
machine: String,
#[serde(default)]
email: String,
#[serde(default)]
status: String,
#[serde(default)]
version: String,
#[serde(default)]
next_check_time: u64,
#[serde(default)]
nonce: String,
#[serde(default)]
tip: String,
}
const LICENSE_FILE: &'static str = ".license.txt";
pub fn check_lic(email: &str, version: &str) -> bool {
if email.is_empty() {
log::error!("Registered email required (-m option). Please pay and register on https://rustdesk.com/server.");
return false;
}
let is_docker = std::path::Path::new("/.dockerenv").exists();
let machine = if is_docker { "".to_owned() } else { get_lic() };
if !is_docker {
let path = Path::new(LICENSE_FILE);
if Path::is_file(&path) {
let contents = std::fs::read_to_string(&path).unwrap_or("".to_owned());
if verify(&contents, &machine) {
async_check_email(&machine, email, version, 0);
return true;
}
}
}
match check_email(machine.clone(), email.to_owned(), version.to_owned()) {
Ok(v) => {
async_check_email(&machine, email, version, v);
return true;
}
Err(err) => {
log::error!("{}", err);
return false;
}
}
}
fn async_check_email(machine: &str, email: &str, version: &str, wait: u64) {
let machine = machine.to_owned();
let email = email.to_owned();
let version = version.to_owned();
std::thread::spawn(move || {
let mut wait = wait;
loop {
let machine = machine.clone();
let email = email.clone();
let version = version.clone();
std::thread::sleep(std::time::Duration::from_secs(wait));
match check_email(machine, email, version) {
Ok(v) => {
wait = v;
}
Err(err) => {
log::error!("{}", err);
std::process::exit(-1);
}
}
}
});
}
fn write_lic(lic: &str) {
if let Ok(mut f) = std::fs::File::create(LICENSE_FILE) {
f.write_all(lic.as_bytes()).ok();
f.sync_all().ok();
}
}
fn check_email(machine: String, email: String, version: String) -> ResultType<u64> {
log::info!("Checking email with the license server ...");
let mut rng = rand::thread_rng();
let nonce: usize = rng.gen();
let nonce = nonce.to_string();
let resp = minreq::post("http://rustdesk.com/api/check-email")
.with_body(
serde_json::to_string(&Post {
machine: machine.clone(),
version,
email,
nonce: nonce.clone(),
..Default::default()
})
.unwrap(),
)
.send()?;
if resp.reason_phrase == "OK" {
let p: Post = serde_json::from_str(&resp.as_str()?)?;
if !p.status.is_empty() {
std::fs::remove_file(LICENSE_FILE).ok();
bail!("{}", p.status);
}
if p.nonce.is_empty() {
bail!("Verification failure: nonce required");
}
if !verify(&p.nonce, &nonce) {
bail!("Verification failure: nonce mismatch");
}
if !machine.is_empty() {
if !verify(&p.machine, &machine) {
bail!("Verification failure");
}
write_lic(&p.machine);
}
log::info!("License OK");
if !p.tip.is_empty() {
log::info!("{}", p.tip);
}
let mut wait = p.next_check_time;
if wait == 0 {
wait = 3600 * 24 * 30;
}
Ok(wait)
} else {
bail!("Server error: {}", resp.reason_phrase);
}
}
fn get_lic() -> String {
let hostname = whoami::hostname();
let uid = machine_uid::get().unwrap_or("".to_owned());
let mac = if let Ok(Some(ma)) = mac_address::get_mac_address() {
base64::encode(ma.bytes())
} else {
"".to_owned()
};
serde_json::to_string(&Machine { hostname, uid, mac }).unwrap()
}
fn verify(enc_str: &str, msg: &str) -> bool {
if let Ok(data) = base64::decode(enc_str) {
let key =
b"\xf1T\xc0\x1c\xffee\x86,S*\xd9.\x91\xcd\x85\x12:\xec\xa9 \x99:\x8a\xa2S\x1f Yy\x93R";
cryptoxide::ed25519::verify(msg.as_bytes(), &key[..], &data)
} else {
false
}
}
pub const EMAIL_ARG: &'static str =
"-m, --email=[EMAIL] 'Sets your email address registered with RustDesk'";

View File

@ -1,15 +1,18 @@
// https://tools.ietf.org/rfc/rfc5128.txt
// https://blog.csdn.net/bytxl/article/details/44344855
use clap::App;
use hbb_common::{env_logger::*, log, ResultType};
use hbbs::*;
mod lic;
use ini::Ini;
use std::sync::{Arc, Mutex};
use flexi_logger::*;
use hbb_common::{bail, config::RENDEZVOUS_PORT, ResultType};
use hbbs::{common::*, *};
const RMEM: usize = 0;
fn main() -> ResultType<()> {
init_from_env(Env::default().filter_or(DEFAULT_FILTER_ENV, "info"));
let _logger = Logger::try_with_env_or_str("info")?
.log_to_stdout()
.format(opt_format)
.write_mode(WriteMode::Async)
.start()?;
let args = format!(
"-c --config=[FILE] +takes_value 'Sets a custom config file'
-p, --port=[NUMBER(default={})] 'Sets the listening port'
@ -18,66 +21,19 @@ fn main() -> ResultType<()> {
-u, --software-url=[URL] 'Sets download url of RustDesk software of newest version'
-r, --relay-servers=[HOST] 'Sets the default relay servers, seperated by colon'
-C, --change-id=[BOOL(default=Y)] 'Sets if support to change id'
{}
-M, --rmem=[NUMBER(default={})] 'Sets UDP recv buffer size, set system rmem_max first, e.g., sudo sysctl -w net.core.rmem_max=52428800. vi /etc/sysctl.conf, net.core.rmem_max=52428800, sudo sysctl p'
-k, --key=[KEY] 'Only allow the client with the same key'",
DEFAULT_PORT,
lic::EMAIL_ARG
RENDEZVOUS_PORT,
RMEM,
);
let matches = App::new("hbbs")
.version(crate::VERSION)
.author("CarrieZ Studio<info@rustdesk.com>")
.about("RustDesk ID/Rendezvous Server")
.args_from_usage(&args)
.get_matches();
let mut section = None;
let conf; // for holding section
if let Some(config) = matches.value_of("config") {
if let Ok(v) = Ini::load_from_file(config) {
conf = v;
section = conf.section(None::<String>);
}
init_args(&args, "hbbs", "RustDesk ID/Rendezvous Server");
let port = get_arg_or("port", RENDEZVOUS_PORT.to_string()).parse::<i32>()?;
if port < 3 {
bail!("Invalid port");
}
let get_arg = |name: &str, default: &str| -> String {
if let Some(v) = matches.value_of(name) {
return v.to_owned();
} else if let Some(section) = section {
if let Some(v) = section.get(name) {
return v.to_owned();
}
}
return default.to_owned();
};
if !lic::check_lic(&get_arg("email", ""), crate::VERSION) {
return Ok(());
}
let port = get_arg("port", DEFAULT_PORT);
let relay_servers: Vec<String> = get_arg("relay-servers", "")
.split(",")
.filter(|x| !x.is_empty() && test_if_valid_server(x, "relay-server").is_ok())
.map(|x| x.to_owned())
.collect();
let serial: i32 = get_arg("serial", "").parse().unwrap_or(0);
let id_change_support: bool = get_arg("change-id", "Y").to_uppercase() == "Y";
let rendezvous_servers: Vec<String> = get_arg("rendezvous-servers", "")
.split(",")
.filter(|x| !x.is_empty() && test_if_valid_server(x, "rendezvous-server").is_ok())
.map(|x| x.to_owned())
.collect();
let addr = format!("0.0.0.0:{}", port);
let addr2 = format!("0.0.0.0:{}", port.parse::<i32>().unwrap_or(0) - 1);
log::info!("serial={}", serial);
log::info!("rendezvous-servers={:?}", rendezvous_servers);
let stop: Arc<Mutex<bool>> = Default::default();
RendezvousServer::start(
&addr,
&addr2,
relay_servers,
serial,
rendezvous_servers,
get_arg("software-url", ""),
&get_arg("key", ""),
stop,
id_change_support,
)?;
let rmem = get_arg("rmem").parse::<usize>().unwrap_or(RMEM);
let serial: i32 = get_arg("serial").parse().unwrap_or(0);
let id_change_support: bool = get_arg_or("change-id", "Y".to_owned()).to_uppercase() == "Y";
RendezvousServer::start(port, serial, &get_arg("key"), id_change_support, rmem)?;
Ok(())
}

185
src/peer.rs Normal file
View File

@ -0,0 +1,185 @@
use crate::common::*;
use crate::database;
use hbb_common::{
log,
rendezvous_proto::*,
tokio::sync::{Mutex, RwLock},
ResultType,
};
use serde_derive::{Deserialize, Serialize};
use std::{collections::HashMap, collections::HashSet, net::SocketAddr, sync::Arc, time::Instant};
lazy_static::lazy_static! {
pub(crate) static ref IP_BLOCKER: Mutex<HashMap<String, ((u32, Instant), (HashSet<String>, Instant))>> = Default::default();
pub(crate) static ref USER_STATUS: RwLock<HashMap<Vec<u8>, Arc<(Option<Vec<u8>>, bool)>>> = Default::default();
pub(crate) static ref IP_CHANGES: Mutex<HashMap<String, (Instant, HashMap<String, i32>)>> = Default::default();
}
pub static IP_CHANGE_DUR: u64 = 180;
pub static IP_CHANGE_DUR_X2: u64 = IP_CHANGE_DUR * 2;
pub static DAY_SECONDS: u64 = 3600 * 24;
pub static IP_BLOCK_DUR: u64 = 60;
#[derive(Debug, Default, Serialize, Deserialize, Clone)]
pub(crate) struct PeerInfo {
#[serde(default)]
pub(crate) ip: String,
}
#[derive(Clone, Debug)]
pub(crate) struct Peer {
pub(crate) socket_addr: SocketAddr,
pub(crate) last_reg_time: Instant,
pub(crate) guid: Vec<u8>,
pub(crate) uuid: Vec<u8>,
pub(crate) pk: Vec<u8>,
pub(crate) user: Option<Vec<u8>>,
pub(crate) info: PeerInfo,
pub(crate) disabled: bool,
pub(crate) reg_pk: (u32, Instant), // how often register_pk
}
impl Default for Peer {
fn default() -> Self {
Self {
socket_addr: "0.0.0.0:0".parse().unwrap(),
last_reg_time: get_expired_time(),
guid: Vec::new(),
uuid: Vec::new(),
pk: Vec::new(),
info: Default::default(),
user: None,
disabled: false,
reg_pk: (0, get_expired_time()),
}
}
}
pub(crate) type LockPeer = Arc<RwLock<Peer>>;
#[derive(Clone)]
pub(crate) struct PeerMap {
map: Arc<RwLock<HashMap<String, LockPeer>>>,
pub(crate) db: database::Database,
}
impl PeerMap {
pub(crate) async fn new() -> ResultType<Self> {
let db = std::env::var("DB_URL").unwrap_or({
#[allow(unused_mut)]
let mut db = "db_v2.sqlite3".to_owned();
#[cfg(all(windows, not(debug_assertions)))]
{
if let Some(path) = hbb_common::config::Config::icon_path().parent() {
db = format!("{}\\{}", path.to_str().unwrap_or("."), db);
}
}
#[cfg(not(windows))]
{
db = format!("./{}", db);
}
db
});
log::info!("DB_URL={}", db);
let pm = Self {
map: Default::default(),
db: database::Database::new(&db).await?,
};
Ok(pm)
}
#[inline]
pub(crate) async fn update_pk(
&mut self,
id: String,
peer: LockPeer,
addr: SocketAddr,
uuid: Vec<u8>,
pk: Vec<u8>,
ip: String,
) -> register_pk_response::Result {
log::info!("update_pk {} {:?} {:?} {:?}", id, addr, uuid, pk);
let (info_str, guid) = {
let mut w = peer.write().await;
w.socket_addr = addr;
w.uuid = uuid.clone();
w.pk = pk.clone();
w.last_reg_time = Instant::now();
w.info.ip = ip;
(
serde_json::to_string(&w.info).unwrap_or_default(),
w.guid.clone(),
)
};
if guid.is_empty() {
match self.db.insert_peer(&id, &uuid, &pk, &info_str).await {
Err(err) => {
log::error!("db.insert_peer failed: {}", err);
return register_pk_response::Result::SERVER_ERROR;
}
Ok(guid) => {
peer.write().await.guid = guid;
}
}
} else {
if let Err(err) = self.db.update_pk(&guid, &id, &pk, &info_str).await {
log::error!("db.update_pk failed: {}", err);
return register_pk_response::Result::SERVER_ERROR;
}
log::info!("pk updated instead of insert");
}
register_pk_response::Result::OK
}
#[inline]
pub(crate) async fn get(&self, id: &str) -> Option<LockPeer> {
let p = self.map.read().await.get(id).map(|x| x.clone());
if p.is_some() {
return p;
} else {
if let Ok(Some(v)) = self.db.get_peer(id).await {
let peer = Peer {
guid: v.guid,
uuid: v.uuid,
pk: v.pk,
user: v.user,
info: serde_json::from_str::<PeerInfo>(&v.info).unwrap_or_default(),
disabled: v.status == Some(0),
..Default::default()
};
let peer = Arc::new(RwLock::new(peer));
self.map.write().await.insert(id.to_owned(), peer.clone());
return Some(peer);
}
}
None
}
#[inline]
pub(crate) async fn get_or(&self, id: &str) -> LockPeer {
if let Some(p) = self.get(id).await {
return p;
}
let mut w = self.map.write().await;
if let Some(p) = w.get(id) {
return p.clone();
}
let tmp = LockPeer::default();
w.insert(id.to_owned(), tmp.clone());
tmp
}
#[inline]
pub(crate) async fn get_in_memory(&self, id: &str) -> Option<LockPeer> {
self.map.read().await.get(id).map(|x| x.clone())
}
#[inline]
pub(crate) async fn is_in_memory(&self, id: &str) -> bool {
self.map.read().await.contains_key(id)
}
#[inline]
pub(crate) async fn remove(&self, id: &str) {
self.map.write().await.remove(id);
}
}

View File

@ -1,930 +0,0 @@
// This file is generated by rust-protobuf 2.10.2. Do not edit
// @generated
// https://github.com/rust-lang/rust-clippy/issues/702
#![allow(unknown_lints)]
#![allow(clippy::all)]
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(box_pointers)]
#![allow(dead_code)]
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(trivial_casts)]
#![allow(unsafe_code)]
#![allow(unused_imports)]
#![allow(unused_results)]
//! Generated file from `message.proto`
use protobuf::Message as Message_imported_for_functions;
use protobuf::ProtobufEnum as ProtobufEnum_imported_for_functions;
/// Generated files are compatible only with the same version
/// of protobuf runtime.
// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_10_2;
#[derive(PartialEq,Clone,Default)]
pub struct RegisterPeer {
// message fields
pub hbb_addr: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a RegisterPeer {
fn default() -> &'a RegisterPeer {
<RegisterPeer as ::protobuf::Message>::default_instance()
}
}
impl RegisterPeer {
pub fn new() -> RegisterPeer {
::std::default::Default::default()
}
// string hbb_addr = 1;
pub fn get_hbb_addr(&self) -> &str {
&self.hbb_addr
}
pub fn clear_hbb_addr(&mut self) {
self.hbb_addr.clear();
}
// Param is passed by value, moved
pub fn set_hbb_addr(&mut self, v: ::std::string::String) {
self.hbb_addr = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_hbb_addr(&mut self) -> &mut ::std::string::String {
&mut self.hbb_addr
}
// Take field
pub fn take_hbb_addr(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.hbb_addr, ::std::string::String::new())
}
}
impl ::protobuf::Message for RegisterPeer {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.hbb_addr)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.hbb_addr.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.hbb_addr);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.hbb_addr.is_empty() {
os.write_string(1, &self.hbb_addr)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> RegisterPeer {
RegisterPeer::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::reflect::MessageDescriptor,
};
unsafe {
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"hbb_addr",
|m: &RegisterPeer| { &m.hbb_addr },
|m: &mut RegisterPeer| { &mut m.hbb_addr },
));
::protobuf::reflect::MessageDescriptor::new::<RegisterPeer>(
"RegisterPeer",
fields,
file_descriptor_proto()
)
})
}
}
fn default_instance() -> &'static RegisterPeer {
static mut instance: ::protobuf::lazy::Lazy<RegisterPeer> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const RegisterPeer,
};
unsafe {
instance.get(RegisterPeer::new)
}
}
}
impl ::protobuf::Clear for RegisterPeer {
fn clear(&mut self) {
self.hbb_addr.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for RegisterPeer {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for RegisterPeer {
fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef {
::protobuf::reflect::ProtobufValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct PeekPeer {
// message fields
pub hbb_addr: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a PeekPeer {
fn default() -> &'a PeekPeer {
<PeekPeer as ::protobuf::Message>::default_instance()
}
}
impl PeekPeer {
pub fn new() -> PeekPeer {
::std::default::Default::default()
}
// string hbb_addr = 1;
pub fn get_hbb_addr(&self) -> &str {
&self.hbb_addr
}
pub fn clear_hbb_addr(&mut self) {
self.hbb_addr.clear();
}
// Param is passed by value, moved
pub fn set_hbb_addr(&mut self, v: ::std::string::String) {
self.hbb_addr = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_hbb_addr(&mut self) -> &mut ::std::string::String {
&mut self.hbb_addr
}
// Take field
pub fn take_hbb_addr(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.hbb_addr, ::std::string::String::new())
}
}
impl ::protobuf::Message for PeekPeer {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.hbb_addr)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.hbb_addr.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.hbb_addr);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.hbb_addr.is_empty() {
os.write_string(1, &self.hbb_addr)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> PeekPeer {
PeekPeer::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::reflect::MessageDescriptor,
};
unsafe {
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"hbb_addr",
|m: &PeekPeer| { &m.hbb_addr },
|m: &mut PeekPeer| { &mut m.hbb_addr },
));
::protobuf::reflect::MessageDescriptor::new::<PeekPeer>(
"PeekPeer",
fields,
file_descriptor_proto()
)
})
}
}
fn default_instance() -> &'static PeekPeer {
static mut instance: ::protobuf::lazy::Lazy<PeekPeer> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const PeekPeer,
};
unsafe {
instance.get(PeekPeer::new)
}
}
}
impl ::protobuf::Clear for PeekPeer {
fn clear(&mut self) {
self.hbb_addr.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for PeekPeer {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for PeekPeer {
fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef {
::protobuf::reflect::ProtobufValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct PeekPeerResponse {
// message fields
pub socket_addr: ::std::vec::Vec<u8>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a PeekPeerResponse {
fn default() -> &'a PeekPeerResponse {
<PeekPeerResponse as ::protobuf::Message>::default_instance()
}
}
impl PeekPeerResponse {
pub fn new() -> PeekPeerResponse {
::std::default::Default::default()
}
// bytes socket_addr = 1;
pub fn get_socket_addr(&self) -> &[u8] {
&self.socket_addr
}
pub fn clear_socket_addr(&mut self) {
self.socket_addr.clear();
}
// Param is passed by value, moved
pub fn set_socket_addr(&mut self, v: ::std::vec::Vec<u8>) {
self.socket_addr = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_socket_addr(&mut self) -> &mut ::std::vec::Vec<u8> {
&mut self.socket_addr
}
// Take field
pub fn take_socket_addr(&mut self) -> ::std::vec::Vec<u8> {
::std::mem::replace(&mut self.socket_addr, ::std::vec::Vec::new())
}
}
impl ::protobuf::Message for PeekPeerResponse {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.socket_addr)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.socket_addr.is_empty() {
my_size += ::protobuf::rt::bytes_size(1, &self.socket_addr);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.socket_addr.is_empty() {
os.write_bytes(1, &self.socket_addr)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> PeekPeerResponse {
PeekPeerResponse::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::reflect::MessageDescriptor,
};
unsafe {
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>(
"socket_addr",
|m: &PeekPeerResponse| { &m.socket_addr },
|m: &mut PeekPeerResponse| { &mut m.socket_addr },
));
::protobuf::reflect::MessageDescriptor::new::<PeekPeerResponse>(
"PeekPeerResponse",
fields,
file_descriptor_proto()
)
})
}
}
fn default_instance() -> &'static PeekPeerResponse {
static mut instance: ::protobuf::lazy::Lazy<PeekPeerResponse> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const PeekPeerResponse,
};
unsafe {
instance.get(PeekPeerResponse::new)
}
}
}
impl ::protobuf::Clear for PeekPeerResponse {
fn clear(&mut self) {
self.socket_addr.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for PeekPeerResponse {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for PeekPeerResponse {
fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef {
::protobuf::reflect::ProtobufValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct Message {
// message oneof groups
pub union: ::std::option::Option<Message_oneof_union>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a Message {
fn default() -> &'a Message {
<Message as ::protobuf::Message>::default_instance()
}
}
#[derive(Clone,PartialEq,Debug)]
pub enum Message_oneof_union {
register_peer(RegisterPeer),
peek_peer(PeekPeer),
peek_peer_response(PeekPeerResponse),
}
impl Message {
pub fn new() -> Message {
::std::default::Default::default()
}
// .hbb.RegisterPeer register_peer = 6;
pub fn get_register_peer(&self) -> &RegisterPeer {
match self.union {
::std::option::Option::Some(Message_oneof_union::register_peer(ref v)) => v,
_ => RegisterPeer::default_instance(),
}
}
pub fn clear_register_peer(&mut self) {
self.union = ::std::option::Option::None;
}
pub fn has_register_peer(&self) -> bool {
match self.union {
::std::option::Option::Some(Message_oneof_union::register_peer(..)) => true,
_ => false,
}
}
// Param is passed by value, moved
pub fn set_register_peer(&mut self, v: RegisterPeer) {
self.union = ::std::option::Option::Some(Message_oneof_union::register_peer(v))
}
// Mutable pointer to the field.
pub fn mut_register_peer(&mut self) -> &mut RegisterPeer {
if let ::std::option::Option::Some(Message_oneof_union::register_peer(_)) = self.union {
} else {
self.union = ::std::option::Option::Some(Message_oneof_union::register_peer(RegisterPeer::new()));
}
match self.union {
::std::option::Option::Some(Message_oneof_union::register_peer(ref mut v)) => v,
_ => panic!(),
}
}
// Take field
pub fn take_register_peer(&mut self) -> RegisterPeer {
if self.has_register_peer() {
match self.union.take() {
::std::option::Option::Some(Message_oneof_union::register_peer(v)) => v,
_ => panic!(),
}
} else {
RegisterPeer::new()
}
}
// .hbb.PeekPeer peek_peer = 7;
pub fn get_peek_peer(&self) -> &PeekPeer {
match self.union {
::std::option::Option::Some(Message_oneof_union::peek_peer(ref v)) => v,
_ => PeekPeer::default_instance(),
}
}
pub fn clear_peek_peer(&mut self) {
self.union = ::std::option::Option::None;
}
pub fn has_peek_peer(&self) -> bool {
match self.union {
::std::option::Option::Some(Message_oneof_union::peek_peer(..)) => true,
_ => false,
}
}
// Param is passed by value, moved
pub fn set_peek_peer(&mut self, v: PeekPeer) {
self.union = ::std::option::Option::Some(Message_oneof_union::peek_peer(v))
}
// Mutable pointer to the field.
pub fn mut_peek_peer(&mut self) -> &mut PeekPeer {
if let ::std::option::Option::Some(Message_oneof_union::peek_peer(_)) = self.union {
} else {
self.union = ::std::option::Option::Some(Message_oneof_union::peek_peer(PeekPeer::new()));
}
match self.union {
::std::option::Option::Some(Message_oneof_union::peek_peer(ref mut v)) => v,
_ => panic!(),
}
}
// Take field
pub fn take_peek_peer(&mut self) -> PeekPeer {
if self.has_peek_peer() {
match self.union.take() {
::std::option::Option::Some(Message_oneof_union::peek_peer(v)) => v,
_ => panic!(),
}
} else {
PeekPeer::new()
}
}
// .hbb.PeekPeerResponse peek_peer_response = 8;
pub fn get_peek_peer_response(&self) -> &PeekPeerResponse {
match self.union {
::std::option::Option::Some(Message_oneof_union::peek_peer_response(ref v)) => v,
_ => PeekPeerResponse::default_instance(),
}
}
pub fn clear_peek_peer_response(&mut self) {
self.union = ::std::option::Option::None;
}
pub fn has_peek_peer_response(&self) -> bool {
match self.union {
::std::option::Option::Some(Message_oneof_union::peek_peer_response(..)) => true,
_ => false,
}
}
// Param is passed by value, moved
pub fn set_peek_peer_response(&mut self, v: PeekPeerResponse) {
self.union = ::std::option::Option::Some(Message_oneof_union::peek_peer_response(v))
}
// Mutable pointer to the field.
pub fn mut_peek_peer_response(&mut self) -> &mut PeekPeerResponse {
if let ::std::option::Option::Some(Message_oneof_union::peek_peer_response(_)) = self.union {
} else {
self.union = ::std::option::Option::Some(Message_oneof_union::peek_peer_response(PeekPeerResponse::new()));
}
match self.union {
::std::option::Option::Some(Message_oneof_union::peek_peer_response(ref mut v)) => v,
_ => panic!(),
}
}
// Take field
pub fn take_peek_peer_response(&mut self) -> PeekPeerResponse {
if self.has_peek_peer_response() {
match self.union.take() {
::std::option::Option::Some(Message_oneof_union::peek_peer_response(v)) => v,
_ => panic!(),
}
} else {
PeekPeerResponse::new()
}
}
}
impl ::protobuf::Message for Message {
fn is_initialized(&self) -> bool {
if let Some(Message_oneof_union::register_peer(ref v)) = self.union {
if !v.is_initialized() {
return false;
}
}
if let Some(Message_oneof_union::peek_peer(ref v)) = self.union {
if !v.is_initialized() {
return false;
}
}
if let Some(Message_oneof_union::peek_peer_response(ref v)) = self.union {
if !v.is_initialized() {
return false;
}
}
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
6 => {
if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
self.union = ::std::option::Option::Some(Message_oneof_union::register_peer(is.read_message()?));
},
7 => {
if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
self.union = ::std::option::Option::Some(Message_oneof_union::peek_peer(is.read_message()?));
},
8 => {
if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
self.union = ::std::option::Option::Some(Message_oneof_union::peek_peer_response(is.read_message()?));
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if let ::std::option::Option::Some(ref v) = self.union {
match v {
&Message_oneof_union::register_peer(ref v) => {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
},
&Message_oneof_union::peek_peer(ref v) => {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
},
&Message_oneof_union::peek_peer_response(ref v) => {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
},
};
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if let ::std::option::Option::Some(ref v) = self.union {
match v {
&Message_oneof_union::register_peer(ref v) => {
os.write_tag(6, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
},
&Message_oneof_union::peek_peer(ref v) => {
os.write_tag(7, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
},
&Message_oneof_union::peek_peer_response(ref v) => {
os.write_tag(8, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
},
};
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> Message {
Message::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::reflect::MessageDescriptor,
};
unsafe {
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, RegisterPeer>(
"register_peer",
Message::has_register_peer,
Message::get_register_peer,
));
fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, PeekPeer>(
"peek_peer",
Message::has_peek_peer,
Message::get_peek_peer,
));
fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, PeekPeerResponse>(
"peek_peer_response",
Message::has_peek_peer_response,
Message::get_peek_peer_response,
));
::protobuf::reflect::MessageDescriptor::new::<Message>(
"Message",
fields,
file_descriptor_proto()
)
})
}
}
fn default_instance() -> &'static Message {
static mut instance: ::protobuf::lazy::Lazy<Message> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const Message,
};
unsafe {
instance.get(Message::new)
}
}
}
impl ::protobuf::Clear for Message {
fn clear(&mut self) {
self.union = ::std::option::Option::None;
self.union = ::std::option::Option::None;
self.union = ::std::option::Option::None;
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for Message {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for Message {
fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef {
::protobuf::reflect::ProtobufValueRef::Message(self)
}
}
static file_descriptor_proto_data: &'static [u8] = b"\
\n\rmessage.proto\x12\x03hbb\"$\n\x0cRegisterPeer\x12\x12\n\x08hbb_addr\
\x18\x01\x20\x01(\tB\0:\0\"\x20\n\x08PeekPeer\x12\x12\n\x08hbb_addr\x18\
\x01\x20\x01(\tB\0:\0\"+\n\x10PeekPeerResponse\x12\x15\n\x0bsocket_addr\
\x18\x01\x20\x01(\x0cB\0:\0\"\x9f\x01\n\x07Message\x12,\n\rregister_peer\
\x18\x06\x20\x01(\x0b2\x11.hbb.RegisterPeerH\0B\0\x12$\n\tpeek_peer\x18\
\x07\x20\x01(\x0b2\r.hbb.PeekPeerH\0B\0\x125\n\x12peek_peer_response\x18\
\x08\x20\x01(\x0b2\x15.hbb.PeekPeerResponseH\0B\0B\x07\n\x05union:\0B\0b\
\x06proto3\
";
static mut file_descriptor_proto_lazy: ::protobuf::lazy::Lazy<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::descriptor::FileDescriptorProto,
};
fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto {
::protobuf::parse_from_bytes(file_descriptor_proto_data).unwrap()
}
pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto {
unsafe {
file_descriptor_proto_lazy.get(|| {
parse_descriptor_proto()
})
}
}

View File

@ -1,114 +1,638 @@
use async_speed_limit::Limiter;
use async_trait::async_trait;
use hbb_common::{
allow_err, bail,
bytes::{Bytes, BytesMut},
futures_util::{sink::SinkExt, stream::StreamExt},
log,
protobuf::Message as _,
rendezvous_proto::*,
sleep,
tcp::{new_listener, FramedStream},
timeout,
tokio::{
self,
net::TcpListener,
io::{AsyncReadExt, AsyncWriteExt},
net::{TcpListener, TcpStream},
sync::{Mutex, RwLock},
time::{interval, Duration},
},
ResultType,
};
use sodiumoxide::crypto::sign;
use std::{
collections::HashMap,
collections::{HashMap, HashSet},
io::prelude::*,
io::Error,
net::SocketAddr,
sync::{Arc, Mutex},
};
type Usage = (usize, usize, usize, usize);
lazy_static::lazy_static! {
static ref PEERS: Arc<Mutex<HashMap<String, FramedStream>>> = Arc::new(Mutex::new(HashMap::new()));
static ref PEERS: Mutex<HashMap<String, Box<dyn StreamTrait>>> = Default::default();
static ref USAGE: RwLock<HashMap<String, Usage>> = Default::default();
static ref BLACKLIST: RwLock<HashSet<String>> = Default::default();
static ref BLOCKLIST: RwLock<HashSet<String>> = Default::default();
}
pub const DEFAULT_PORT: &'static str = "21117";
static mut DOWNGRADE_THRESHOLD: f64 = 0.66;
static mut DOWNGRADE_START_CHECK: usize = 1800_000; // in ms
static mut LIMIT_SPEED: usize = 4 * 1024 * 1024; // in bit/s
static mut TOTAL_BANDWIDTH: usize = 1024 * 1024 * 1024; // in bit/s
static mut SINGLE_BANDWIDTH: usize = 16 * 1024 * 1024; // in bit/s
const BLACKLIST_FILE: &'static str = "blacklist.txt";
const BLOCKLIST_FILE: &'static str = "blocklist.txt";
#[tokio::main(basic_scheduler)]
pub async fn start(port: &str, key: &str, stop: Arc<Mutex<bool>>) -> ResultType<()> {
if !key.is_empty() {
log::info!("Key: {}", key);
#[tokio::main(flavor = "multi_thread")]
pub async fn start(port: &str, key: &str) -> ResultType<()> {
let key = get_server_sk(key);
if let Ok(mut file) = std::fs::File::open(BLACKLIST_FILE) {
let mut contents = String::new();
if file.read_to_string(&mut contents).is_ok() {
for x in contents.split("\n") {
if let Some(ip) = x.trim().split(' ').nth(0) {
BLACKLIST.write().await.insert(ip.to_owned());
}
}
}
}
log::info!(
"#blacklist({}): {}",
BLACKLIST_FILE,
BLACKLIST.read().await.len()
);
if let Ok(mut file) = std::fs::File::open(BLOCKLIST_FILE) {
let mut contents = String::new();
if file.read_to_string(&mut contents).is_ok() {
for x in contents.split("\n") {
if let Some(ip) = x.trim().split(' ').nth(0) {
BLOCKLIST.write().await.insert(ip.to_owned());
}
}
}
}
log::info!(
"#blocklist({}): {}",
BLOCKLIST_FILE,
BLOCKLIST.read().await.len()
);
let addr = format!("0.0.0.0:{}", port);
log::info!("Listening on tcp {}", addr);
let mut listener = new_listener(addr, false).await?;
let addr2 = format!("0.0.0.0:{}", port.parse::<u16>().unwrap() + 2);
log::info!("Listening on websocket {}", addr2);
loop {
if *stop.lock().unwrap() {
sleep(0.1).await;
continue;
}
log::info!("Start");
io_loop(&mut listener, key, stop.clone()).await;
io_loop(
new_listener(&addr, false).await?,
new_listener(&addr2, false).await?,
&key,
)
.await;
}
}
async fn io_loop(listener: &mut TcpListener, key: &str, stop: Arc<Mutex<bool>>) {
let mut timer = interval(Duration::from_millis(100));
fn check_params() {
let tmp = std::env::var("DOWNGRADE_THRESHOLD")
.map(|x| x.parse::<f64>().unwrap_or(0.))
.unwrap_or(0.);
if tmp > 0. {
unsafe {
DOWNGRADE_THRESHOLD = tmp;
}
}
unsafe { log::info!("DOWNGRADE_THRESHOLD: {}", DOWNGRADE_THRESHOLD) };
let tmp = std::env::var("DOWNGRADE_START_CHECK")
.map(|x| x.parse::<usize>().unwrap_or(0))
.unwrap_or(0);
if tmp > 0 {
unsafe {
DOWNGRADE_START_CHECK = tmp * 1000;
}
}
unsafe { log::info!("DOWNGRADE_START_CHECK: {}s", DOWNGRADE_START_CHECK / 1000) };
let tmp = std::env::var("LIMIT_SPEED")
.map(|x| x.parse::<f64>().unwrap_or(0.))
.unwrap_or(0.);
if tmp > 0. {
unsafe {
LIMIT_SPEED = (tmp * 1024. * 1024.) as usize;
}
}
unsafe { log::info!("LIMIT_SPEED: {}Mb/s", LIMIT_SPEED as f64 / 1024. / 1024.) };
let tmp = std::env::var("TOTAL_BANDWIDTH")
.map(|x| x.parse::<f64>().unwrap_or(0.))
.unwrap_or(0.);
if tmp > 0. {
unsafe {
TOTAL_BANDWIDTH = (tmp * 1024. * 1024.) as usize;
}
}
unsafe {
log::info!(
"TOTAL_BANDWIDTH: {}Mb/s",
TOTAL_BANDWIDTH as f64 / 1024. / 1024.
)
};
let tmp = std::env::var("SINGLE_BANDWIDTH")
.map(|x| x.parse::<f64>().unwrap_or(0.))
.unwrap_or(0.);
if tmp > 0. {
unsafe {
SINGLE_BANDWIDTH = (tmp * 1024. * 1024.) as usize;
}
}
unsafe {
log::info!(
"SINGLE_BANDWIDTH: {}Mb/s",
SINGLE_BANDWIDTH as f64 / 1024. / 1024.
)
};
}
async fn check_cmd(cmd: &str, limiter: Limiter) -> String {
let mut res = "".to_owned();
let mut fds = cmd.trim().split(" ");
match fds.next() {
Some("h") => {
res = format!(
"{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n",
"blacklist-add(ba) <ip>",
"blacklist-remove(br) <ip>",
"blacklist(b) <ip>",
"blocklist-add(Ba) <ip>",
"blocklist-remove(Br) <ip>",
"blocklist(B) <ip>",
"downgrade-threshold(dt) [value]",
"downgrade-start-check(t) [value(second)]",
"limit-speed(ls) [value(Mb/s)]",
"total-bandwidth(tb) [value(Mb/s)]",
"single-bandwidth(sb) [value(Mb/s)]",
"usage(u)"
)
}
Some("blacklist-add" | "ba") => {
if let Some(ip) = fds.next() {
for ip in ip.split("|") {
BLACKLIST.write().await.insert(ip.to_owned());
}
}
}
Some("blacklist-remove" | "br") => {
if let Some(ip) = fds.next() {
if ip == "all" {
BLACKLIST.write().await.clear();
} else {
for ip in ip.split("|") {
BLACKLIST.write().await.remove(ip);
}
}
}
}
Some("blacklist" | "b") => {
if let Some(ip) = fds.next() {
res = format!("{}\n", BLACKLIST.read().await.get(ip).is_some());
} else {
for ip in BLACKLIST.read().await.clone().into_iter() {
res += &format!("{}\n", ip);
}
}
}
Some("blocklist-add" | "Ba") => {
if let Some(ip) = fds.next() {
for ip in ip.split("|") {
BLOCKLIST.write().await.insert(ip.to_owned());
}
}
}
Some("blocklist-remove" | "Br") => {
if let Some(ip) = fds.next() {
if ip == "all" {
BLOCKLIST.write().await.clear();
} else {
for ip in ip.split("|") {
BLOCKLIST.write().await.remove(ip);
}
}
}
}
Some("blocklist" | "B") => {
if let Some(ip) = fds.next() {
res = format!("{}\n", BLOCKLIST.read().await.get(ip).is_some());
} else {
for ip in BLOCKLIST.read().await.clone().into_iter() {
res += &format!("{}\n", ip);
}
}
}
Some("downgrade-threshold" | "dt") => {
if let Some(v) = fds.next() {
if let Ok(v) = v.parse::<f64>() {
if v > 0. {
unsafe {
DOWNGRADE_THRESHOLD = v;
}
}
}
} else {
unsafe {
res = format!("{}\n", DOWNGRADE_THRESHOLD);
}
}
}
Some("downgrade-start-check" | "t") => {
if let Some(v) = fds.next() {
if let Ok(v) = v.parse::<usize>() {
if v > 0 {
unsafe {
DOWNGRADE_START_CHECK = v * 1000;
}
}
}
} else {
unsafe {
res = format!("{}s\n", DOWNGRADE_START_CHECK / 1000);
}
}
}
Some("limit-speed" | "ls") => {
if let Some(v) = fds.next() {
if let Ok(v) = v.parse::<f64>() {
if v > 0. {
unsafe {
LIMIT_SPEED = (v * 1024. * 1024.) as _;
}
}
}
} else {
unsafe {
res = format!("{}Mb/s\n", LIMIT_SPEED as f64 / 1024. / 1024.);
}
}
}
Some("total-bandwidth" | "tb") => {
if let Some(v) = fds.next() {
if let Ok(v) = v.parse::<f64>() {
if v > 0. {
unsafe {
TOTAL_BANDWIDTH = (v * 1024. * 1024.) as _;
limiter.set_speed_limit(TOTAL_BANDWIDTH as _);
}
}
}
} else {
unsafe {
res = format!("{}Mb/s\n", TOTAL_BANDWIDTH as f64 / 1024. / 1024.);
}
}
}
Some("single-bandwidth" | "sb") => {
if let Some(v) = fds.next() {
if let Ok(v) = v.parse::<f64>() {
if v > 0. {
unsafe {
SINGLE_BANDWIDTH = (v * 1024. * 1024.) as _;
}
}
}
} else {
unsafe {
res = format!("{}Mb/s\n", SINGLE_BANDWIDTH as f64 / 1024. / 1024.);
}
}
}
Some("usage" | "u") => {
let mut tmp: Vec<(String, Usage)> = USAGE
.read()
.await
.iter()
.map(|x| (x.0.clone(), x.1.clone()))
.collect();
tmp.sort_by(|a, b| ((b.1).1).partial_cmp(&(a.1).1).unwrap());
for (ip, (elapsed, total, highest, speed)) in tmp {
if elapsed <= 0 {
continue;
}
res += &format!(
"{}: {}s {:.2}MB {}kb/s {}kb/s {}kb/s\n",
ip,
elapsed / 1000,
total as f64 / 1024. / 1024. / 8.,
highest,
total / elapsed,
speed
);
}
}
_ => {}
}
res
}
async fn io_loop(listener: TcpListener, listener2: TcpListener, key: &str) {
check_params();
let limiter = <Limiter>::new(unsafe { TOTAL_BANDWIDTH as _ });
loop {
tokio::select! {
Ok((stream, addr)) = listener.accept() => {
let key = key.to_owned();
tokio::spawn(async move {
make_pair(FramedStream::from(stream), addr, &key).await.ok();
});
}
_ = timer.tick() => {
if *stop.lock().unwrap() {
log::info!("Stopped");
break;
res = listener.accept() => {
match res {
Ok((stream, addr)) => {
stream.set_nodelay(true).ok();
handle_connection(stream, addr, &limiter, key, false).await;
}
Err(err) => {
log::error!("listener.accept failed: {}", err);
break;
}
}
}
}
}
}
async fn make_pair(stream: FramedStream, addr: SocketAddr, key: &str) -> ResultType<()> {
let mut stream = stream;
if let Some(Ok(bytes)) = stream.next_timeout(30_000).await {
if let Ok(msg_in) = RendezvousMessage::parse_from_bytes(&bytes) {
if let Some(rendezvous_message::Union::request_relay(rf)) = msg_in.union {
if !key.is_empty() && rf.licence_key != key {
return Ok(());
}
if !rf.uuid.is_empty() {
let peer = PEERS.lock().unwrap().remove(&rf.uuid);
if let Some(peer) = peer {
log::info!("Forward request {} from {} got paired", rf.uuid, addr);
return relay(stream, peer).await;
} else {
log::info!("New relay request {} from {}", rf.uuid, addr);
PEERS.lock().unwrap().insert(rf.uuid.clone(), stream);
sleep(30.).await;
PEERS.lock().unwrap().remove(&rf.uuid);
res = listener2.accept() => {
match res {
Ok((stream, addr)) => {
stream.set_nodelay(true).ok();
handle_connection(stream, addr, &limiter, key, true).await;
}
Err(err) => {
log::error!("listener2.accept failed: {}", err);
break;
}
}
}
}
}
}
async fn handle_connection(
stream: TcpStream,
addr: SocketAddr,
limiter: &Limiter,
key: &str,
ws: bool,
) {
let ip = addr.ip().to_string();
if !ws && ip == "127.0.0.1" {
let limiter = limiter.clone();
tokio::spawn(async move {
let mut stream = stream;
let mut buffer = [0; 64];
if let Ok(Ok(n)) = timeout(1000, stream.read(&mut buffer[..])).await {
if let Ok(data) = std::str::from_utf8(&buffer[..n]) {
let res = check_cmd(data, limiter).await;
stream.write(res.as_bytes()).await.ok();
}
}
});
return;
}
if BLOCKLIST.read().await.get(&ip).is_some() {
log::info!("{} blocked", ip);
return;
}
let key = key.to_owned();
let limiter = limiter.clone();
tokio::spawn(async move {
allow_err!(make_pair(stream, addr, &key, limiter, ws).await);
});
}
async fn make_pair(
stream: TcpStream,
addr: SocketAddr,
key: &str,
limiter: Limiter,
ws: bool,
) -> ResultType<()> {
if ws {
make_pair_(
tokio_tungstenite::accept_async(stream).await?,
addr,
key,
limiter,
)
.await;
} else {
make_pair_(FramedStream::from(stream, addr), addr, key, limiter).await;
}
Ok(())
}
async fn relay(stream: FramedStream, peer: FramedStream) -> ResultType<()> {
let mut peer = peer;
async fn make_pair_(stream: impl StreamTrait, addr: SocketAddr, key: &str, limiter: Limiter) {
let mut stream = stream;
peer.set_raw();
stream.set_raw();
if let Ok(Some(Ok(bytes))) = timeout(30_000, stream.recv()).await {
if let Ok(msg_in) = RendezvousMessage::parse_from_bytes(&bytes) {
if let Some(rendezvous_message::Union::request_relay(rf)) = msg_in.union {
if !key.is_empty() && rf.licence_key != key {
return;
}
if !rf.uuid.is_empty() {
let mut peer = PEERS.lock().await.remove(&rf.uuid);
if let Some(peer) = peer.as_mut() {
log::info!("Relayrequest {} from {} got paired", rf.uuid, addr);
let id = format!("{}:{}", addr.ip(), addr.port());
USAGE.write().await.insert(id.clone(), Default::default());
if !stream.is_ws() && !peer.is_ws() {
peer.set_raw();
stream.set_raw();
log::info!("Both are raw");
}
if let Err(err) = relay(addr, &mut stream, peer, limiter, id.clone()).await
{
log::info!("Relay of {} closed: {}", addr, err);
} else {
log::info!("Relay of {} closed", addr);
}
USAGE.write().await.remove(&id);
} else {
log::info!("New relay request {} from {}", rf.uuid, addr);
PEERS.lock().await.insert(rf.uuid.clone(), Box::new(stream));
sleep(30.).await;
PEERS.lock().await.remove(&rf.uuid);
}
}
}
}
}
}
async fn relay(
addr: SocketAddr,
stream: &mut impl StreamTrait,
peer: &mut Box<dyn StreamTrait>,
total_limiter: Limiter,
id: String,
) -> ResultType<()> {
let ip = addr.ip().to_string();
let mut tm = std::time::Instant::now();
let mut elapsed = 0;
let mut total = 0;
let mut total_s = 0;
let mut highest_s = 0;
let mut downgrade: bool = false;
let mut blacked: bool = false;
let limiter = <Limiter>::new(unsafe { SINGLE_BANDWIDTH as _ });
let blacklist_limiter = <Limiter>::new(unsafe { LIMIT_SPEED as _ });
let downgrade_threshold =
(unsafe { SINGLE_BANDWIDTH as f64 * DOWNGRADE_THRESHOLD } / 1000.) as usize; // in bit/ms
let mut timer = interval(Duration::from_secs(3));
let mut last_recv_time = std::time::Instant::now();
loop {
tokio::select! {
res = peer.next() => {
res = peer.recv() => {
if let Some(Ok(bytes)) = res {
stream.send_bytes(bytes.into()).await?;
last_recv_time = std::time::Instant::now();
let nb = bytes.len() * 8;
if blacked || downgrade {
blacklist_limiter.consume(nb).await;
} else {
limiter.consume(nb).await;
}
total_limiter.consume(nb).await;
total += nb;
total_s += nb;
if bytes.len() > 0 {
stream.send_raw(bytes.into()).await?;
}
} else {
break;
}
},
res = stream.next() => {
res = stream.recv() => {
if let Some(Ok(bytes)) = res {
peer.send_bytes(bytes.into()).await?;
last_recv_time = std::time::Instant::now();
let nb = bytes.len() * 8;
if blacked || downgrade {
blacklist_limiter.consume(nb).await;
} else {
limiter.consume(nb).await;
}
total_limiter.consume(nb).await;
total += nb;
total_s += nb;
if bytes.len() > 0 {
peer.send_raw(bytes.into()).await?;
}
} else {
break;
}
},
_ = timer.tick() => {
if last_recv_time.elapsed().as_secs() > 30 {
bail!("Timeout");
}
}
}
let n = tm.elapsed().as_millis() as usize;
if n >= 1_000 {
if BLOCKLIST.read().await.get(&ip).is_some() {
log::info!("{} blocked", ip);
break;
}
blacked = BLACKLIST.read().await.get(&ip).is_some();
tm = std::time::Instant::now();
let speed = total_s / (n as usize);
if speed > highest_s {
highest_s = speed;
}
elapsed += n;
USAGE.write().await.insert(
id.clone(),
(elapsed as _, total as _, highest_s as _, speed as _),
);
total_s = 0;
if elapsed > unsafe { DOWNGRADE_START_CHECK } && !downgrade {
if total > elapsed * downgrade_threshold {
downgrade = true;
log::info!(
"Downgrade {}, exceed downgrade threshold {}bit/ms in {}ms",
id,
downgrade_threshold,
elapsed
);
}
}
}
}
Ok(())
}
fn get_server_sk(key: &str) -> String {
let mut key = key.to_owned();
if let Ok(sk) = base64::decode(&key) {
if sk.len() == sign::SECRETKEYBYTES {
log::info!("The key is a crypto private key");
key = base64::encode(&sk[(sign::SECRETKEYBYTES / 2)..]);
}
}
if key == "-" || key == "_" {
let (pk, _) = crate::common::gen_sk();
key = pk;
}
if !key.is_empty() {
log::info!("Key: {}", key);
}
key
}
#[async_trait]
trait StreamTrait: Send + Sync + 'static {
async fn recv(&mut self) -> Option<Result<BytesMut, Error>>;
async fn send_raw(&mut self, bytes: Bytes) -> ResultType<()>;
fn is_ws(&self) -> bool;
fn set_raw(&mut self);
}
#[async_trait]
impl StreamTrait for FramedStream {
async fn recv(&mut self) -> Option<Result<BytesMut, Error>> {
self.next().await
}
async fn send_raw(&mut self, bytes: Bytes) -> ResultType<()> {
self.send_bytes(bytes).await
}
fn is_ws(&self) -> bool {
false
}
fn set_raw(&mut self) {
self.set_raw();
}
}
#[async_trait]
impl StreamTrait for tokio_tungstenite::WebSocketStream<TcpStream> {
async fn recv(&mut self) -> Option<Result<BytesMut, Error>> {
if let Some(msg) = self.next().await {
match msg {
Ok(msg) => {
match msg {
tungstenite::Message::Binary(bytes) => {
Some(Ok(bytes[..].into())) // to-do: poor performance
}
_ => Some(Ok(BytesMut::new())),
}
}
Err(err) => Some(Err(Error::new(std::io::ErrorKind::Other, err.to_string()))),
}
} else {
None
}
}
async fn send_raw(&mut self, bytes: Bytes) -> ResultType<()> {
Ok(self
.send(tungstenite::Message::Binary(bytes.to_vec()))
.await?) // to-do: poor performance
}
fn is_ws(&self) -> bool {
true
}
fn set_raw(&mut self) {}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,101 +0,0 @@
use hbb_common::{
allow_err, log,
tokio::{self, sync::mpsc},
ResultType,
};
use rocksdb::DB;
#[derive(Debug)]
enum Action {
Insert((String, Vec<u8>)),
Get((String, mpsc::Sender<Option<Vec<u8>>>)),
_Close,
}
#[derive(Clone)]
pub struct SledAsync {
tx: Option<mpsc::UnboundedSender<Action>>,
path: String,
}
impl SledAsync {
pub fn new(path: &str, run: bool) -> ResultType<Self> {
let mut res = Self {
tx: None,
path: path.to_owned(),
};
if run {
res.run()?;
}
Ok(res)
}
pub fn run(&mut self) -> ResultType<std::thread::JoinHandle<()>> {
let (tx, rx) = mpsc::unbounded_channel::<Action>();
self.tx = Some(tx);
let db = DB::open_default(&self.path)?;
Ok(std::thread::spawn(move || {
Self::io_loop(db, rx);
log::debug!("Exit SledAsync loop");
}))
}
#[tokio::main(basic_scheduler)]
async fn io_loop(db: DB, rx: mpsc::UnboundedReceiver<Action>) {
let mut rx = rx;
while let Some(x) = rx.recv().await {
match x {
Action::Insert((key, value)) => {
allow_err!(db.put(&key, &value));
}
Action::Get((key, sender)) => {
let mut sender = sender;
allow_err!(
sender
.send(if let Ok(v) = db.get(key) { v } else { None })
.await
);
}
Action::_Close => break,
}
}
}
pub fn _close(self, j: std::thread::JoinHandle<()>) {
if let Some(tx) = &self.tx {
allow_err!(tx.send(Action::_Close));
}
allow_err!(j.join());
}
pub async fn get(&mut self, key: String) -> Option<Vec<u8>> {
if let Some(tx) = &self.tx {
let (tx_once, mut rx) = mpsc::channel::<Option<Vec<u8>>>(1);
allow_err!(tx.send(Action::Get((key, tx_once))));
if let Some(v) = rx.recv().await {
return v;
}
}
None
}
#[inline]
pub fn deserialize<'a, T: serde::Deserialize<'a>>(v: &'a Option<Vec<u8>>) -> Option<T> {
if let Some(v) = v {
if let Ok(v) = std::str::from_utf8(v) {
if let Ok(v) = serde_json::from_str::<T>(&v) {
return Some(v);
}
}
}
None
}
pub fn insert<T: serde::Serialize>(&mut self, key: String, v: T) {
if let Some(tx) = &self.tx {
if let Ok(v) = serde_json::to_vec(&v) {
allow_err!(tx.send(Action::Insert((key, v))));
}
}
}
}

1
src/version.rs Normal file
View File

@ -0,0 +1 @@
pub const VERSION: &str = "1.1.5";