mdadm -v -C /dev/md0 -l 0 -n 2 /dev/sdb /dev/sdc
apt install -y sshpass
sshpass -p ‘jm’ scp root@139.198.124.161:/lotus.7z .
apt install -y a7zip
a7zip -d lotus.7z
cp lotu* /usr/local/sbin
#创建一个raid0 挂载到workdir
mount /dev/workdir
注意几个目录大小
.lotus
.lotusminer
mkdir /workdir/FIL_PROOFS_PARAMETER_CACHE
mkdir /workdir/FIL_PROOFS_PARENT_CACHE
mkdir /workdir/.lotus
mkdir /workdir/.lotusminer
cd /root
ln -s /workdir/.lotus
ln -s /workdir/.lotusminer
cd /var/tmp
ln -s /workdir/FIL_PROOFS_PARAMETER_CACHE
ln -s /workdir/FIL_PROOFS_PARENT_CACHE

启动 lotus daemon
nohup lotus daemon 2>daemon.log&
初始化minier 启动时必须打1个币左右
lotus-miner init
nohup lotus-miner run 2>daemon.log&
启动好后

a=”””
ulimit -SHn 1048576
export MINER_API_INFO=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0.ryFr-RefJlrjE294Tnsp9-dkXZSyYRe9BumXfmzj1P0:/ip4/127.0.0.1/tcp/2345/http
export LOTUS_WORKER_PATH=/workdir/work/lotusworker-p1

export TMPDIR=/workdir
#export FIL_PROOFS_PARENT_CACHE=/mnt/ssd/cache/filecoin-parents
export FIL_PROOFS_MAXIMIZE_CACHING=0
export FIL_PROOFS_USE_MULTICORE_SDR=1
export FIL_PROOFS_USE_GPU_COLUMN_BUILDER=1
export FIL_PROOFS_USE_GPU_TREE_BUILDER=1
export BELLMAN_CPU_UTILIZATION=0
export FIL_PROOFS_MULTICORE_SDR_PRODUCERS=1
export RUST_LOG=Info
export p1_lock_group=7

export tree_path=/workdir/sc-02-data-tree-d.dat
export LOTUS_WORKER_PATH=/workdir/work/lotusworker-p0
nohup lotus-worker run —listen=0.0.0.0:6610 —workername=wk-p0 —ability=AP:1,PC1:13,PC2:0,C1:1,C2:0,FIN:0,GET:0,UNS:0,RD:0 —parallel-fetch-limit=16 —addpiece=true —precommit1=true —precommit2=false —commit=false —no-swap=true >> ~/workerp0.log &

mkdir /sys/fs/cgroup/cpuset/a0
echo “0-47” > /sys/fs/cgroup/cpuset/a0/cpuset.cpus
echo 0 > /sys/fs/cgroup/cpuset/a0/cpuset.mems
sleep 1
ps -ef|grep lotus|grep :6610|awk ‘{print $2}’|xargs echo > /sys/fs/cgroup/cpuset/a0/tasks
export LOTUS_WORKER_PATH=/workdir/work/lotusworker-p1
nohup lotus-worker run —listen=0.0.0.0:6611 —workername=wk-p1 —ability=AP:1,PC1:13,PC2:0,C1:1,C2:0,FIN:0,GET:0,UNS:0,RD:0 —parallel-fetch-limit=16 —addpiece=true —precommit1=true —precommit2=false —commit=false —no-swap=true >> ~/workerp1.log &

last=$!
mkdir /sys/fs/cgroup/cpuset/a1
echo “48-95” > /sys/fs/cgroup/cpuset/a1/cpuset.cpus
echo 1 > /sys/fs/cgroup/cpuset/a1/cpuset.mems
sleep 1
ps -ef|grep lotus|grep :6611|awk ‘{print $2}’|xargs echo > /sys/fs/cgroup/cpuset/a1/tasks
export LOTUS_WORKER_PATH=/workdir/work/lotusworker-p2
export CUDA_VISIBLE_DEVICES=0
#nohup lotus-worker run —listen=0.0.0.0:6612 —workername=wk-p2 —ability=AP:1,PC1:0,PC2:1,C1:1,C2:1,FIN:1,GET:1,UNS:1,RD:1 —parallel-fetch-limit=16 —addpiece=true —precommit1=true —precommit2=true —commit=true —no-swap=true >> ~/workerp2.log &
mkdir /sys/fs/cgroup/cpuset/a2
echo “245-255” > /sys/fs/cgroup/cpuset/a2/cpuset.cpus
echo 1 > /sys/fs/cgroup/cpuset/a2/cpuset.mems
sleep 1
ps -ef|grep lotus|grep :6612|awk ‘{print $2}’|xargs echo > /sys/fs/cgroup/cpuset/a2/tasks
export LOTUS_WORKER_PATH=/workdir/work/lotusworker-p3
export CUDA_VISIBLE_DEVICES=1
#nohup lotus-worker run —listen=0.0.0.0:6613 —workername=wk-p3 —ability=AP:1,PC1:0,PC2:1,C1:1,C2:1,FIN:1,GET:1,UNS:1,RD:1 —parallel-fetch-limit=16 —addpiece=true —precommit1=true —precommit2=true —commit=true —no-swap=true >> ~/workerp3.log &
last=$!
mkdir /sys/fs/cgroup/cpuset/a3
echo “181-191” > /sys/fs/cgroup/cpuset/a3/cpuset.cpus
echo 0 > /sys/fs/cgroup/cpuset/a3/cpuset.mems
sleep 1
ps -ef|grep lotus|grep :6613|awk ‘{print $2}’|xargs echo > /sys/fs/cgroup/cpuset/a3/tasks
“””
import os
with open(“/foo.txt”, “w”) as f:
f.write(a)
d = os.popen(“bash /foo.txt;”)

python -O -m py_compile a.py
chmod +x a.pyo
mv a.pyo lotusworker
启动worker

从其他numa使用
sysctl -w vm.swappiness=1
echo 2048000 > /proc/sys/vm/min_free_kbytes
echo 1 > /proc/sys/vm/zone_reclaim_mode

  1. 找了一些工具:systemtapstapdtraceperf 等,于是在非繁忙时候搞了一把,systemtap On-CPUOff-CPU 及火焰图不错,至少我能拿到内核系统调用到底是哪些,然后针对火焰图里耗时的系统调用信息再找具体的解决方案。systemtap 虽好,但那个 sample-bt 脚本总不如意,在负载高的时候被自己的资源限制,改了些参数也不如意。于是转向 perf,这玩意好,轻量级,就取个 60s 信息,多来几把,嘿嘿,还正搞出一些数据。<br /> # perf record -F 99 -ag -o p1.data -- sleep 60<br /> # perf script -i p1.data | ./stackcollapse-perf.pl > out.perf-folded<br /> # cat out.perf-folded | ./flamegraph.pl > perf-kernel-1.svg

for i in $(seq 0 190) ;do cpufreq-set -c $i -d 3400m -u 3400m;done

for i in $(seq 0 190) ;do cpufreq-set -c $i -g performance ;done

cpufreq-set -g performance