forked from legalizemath/mybosbotexample
-
Notifications
You must be signed in to change notification settings - Fork 2
/
index.js
2921 lines (2471 loc) · 126 KB
/
index.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// NOT SAFE TO RUN
I KNOW WHAT I AM DOING
import fs from 'fs' // comes with nodejs, to read/write log files
import dns from 'dns' // comes with nodejs, to check if there's internet access
import bos from './bos.js' // my wrapper for bos, needs to be in same folder
import path from 'path';
import dotenv from 'dotenv';
import crypto from 'crypto'; // hash settings file
const { min, max, trunc, floor, abs, random, log2, pow, ceil, exp, PI } = Math // useful Math
const copy = item => JSON.parse(JSON.stringify(item)) // copy values to new item, useful
const env = process.env
dotenv.config({ path: path.resolve(process.cwd(), '.env.local') });
dotenv.config({ path: path.resolve(process.cwd(), '.env') });
// ## MANAGEMENT SWITCHES
// allow BOS reconnect
const ALLOW_BOS_RECONNECT = env.ALLOW_BOS_RECONNECT === 'true' ? true : false
// allow actually adjusting fees and max htlc sizes and updating peer records
// if false it will just print out what would've been to terminal & _feeChanges.txt
const ADJUST_POLICIES = env.ADJUST_POLICIES === 'true' ? true : false // set max htlc & fees
const ADJUST_POLICIES_FEES = env.ADJUST_POLICIES_FEES === 'true' ? true : false // = false : set only max htlcs (subset of ALLOW_POLICIES: true)
// allow rebalancing (false = dryrun)
const ALLOW_REBALANCING = env.ALLOW_REBALANCING === 'true' ? true : false
// try simple reconnecting to inactive or offline peers (safer to run frequently)
const ALLOW_SIMPLE_RECONNECT = env.ALLOW_SIMPLE_RECONNECT === 'true' ? true : false
// backup payments in jsons & then remove from database for speed
const ALLOW_DB_CLEANUP = env.ALLOW_DB_CLEANUP === 'true' ? true : false
// ######################
// ## GENERAL SETTINGS
// Telegram Settings
const TELEGRAM_CHATID = env.TELEGRAM_CHATID || ''
const TELEGRAM_TOKEN = env.TELEGRAM_TOKEN || ''
// Tor Proxy for Telegram Bot
const TELEGRAM_PROXY_HOST = env.TELEGRAM_PROXY_HOST || ''
const TELEGRAM_PROXY_PORT = env.TELEGRAM_PROXY_PORT || ''
// path to channel.db for size
const DB_PATH = env.DB_PATH || ''
// time to sleep between trying a bot step again
const MINUTES_BETWEEN_STEPS = parseFloat(env.MINUTES_BETWEEN_STEPS) || 10
// how far back to look for routing stats, must be longer than any other DAYS setting
const DAYS_FOR_STATS = parseFloat(env.DAYS_FOR_STATS) || 7
// channels smaller than this not necessary to balance or adjust fees for usually special cases anyway
// (maybe use proportional fee policy for them instead) ~2m for now
const MIN_CHAN_SIZE = parseInt(env.MIN_CHAN_SIZE) || 1.9e6
// smallest amount of sats necessary to consider a side not drained
const MIN_SATS_PER_SIDE = parseInt(env.MIN_SATS_PER_SIDE) || 500e3 //1e6
// local sats below this means channel is drained
const SATS_PER_SIDE_DRAINED_LIMIT = MIN_SATS_PER_SIDE * 0.25
// how often to move payments from db to backup logs
const DAYS_BETWEEN_DB_CLEANING = parseFloat(env.DAYS_BETWEEN_DB_CLEANING) || 7
// minimum sats away from 0.5 balance to consider off-balance
const MIN_SATS_OFF_BALANCE = parseInt(env.MIN_SATS_OFF_BALANCE) || 750e3
// limit of sats to balance per attempt
// larger = faster rebalances, less for channels.db to store
// smaller = can use smaller liquidity/channels for cheaper/easier rebalances
// bos rebalance does probing + size up htlc strategy
// (bos rebalance requires >50k)
const MAX_REBALANCE_SATS = parseInt(env.MAX_REBALANCE_SATS) || 1e6 // 2e5
// unbalanced sats below this can stop (bos rebalance requires >50k)
const MIN_REBALANCE_SATS = parseInt(env.MIN_REBALANCE_SATS) || 1e5 //51e3
// fraction of peers that need to be offline to restart tor service
const PEERS_OFFLINE_PERCENT_MAXIMUM = parseInt(env.PEERS_OFFLINE_PERCENT_MAXIMUM) || 11
const INCLUDE_RECONNECTED_IN_OFFLINE = env.INCLUDE_RECONNECTED_IN_OFFLINE === 'false' ? false : true
// hours between running bos reconnect (does some disable-based reconnections)
const MINUTES_BETWEEN_BOS_RECONNECTS = parseInt(env.MINUTES_BETWEEN_BOS_RECONNECTS) || 24 * 60
// hours between running basic offline/inactive reconnect
const MINUTES_BETWEEN_SIMPLE_RECONNECTS = parseInt(env.MINUTES_BETWEEN_SIMPLE_RECONNECTS) || 2 * 60
// memory handling
const SHOW_RAM_USAGE = env.SHOW_RAM_USAGE === 'true' ? true : false
// more logs
const VERBOSE = env.VERBOSE === 'false' ? false : true
const DEBUG = env.DEBUG === 'true' ? true : false
// ## REBALANCING SETTINGS
// multiplier for proportional safety ppm margin
const SAFETY_MARGIN = parseFloat(env.SAFETY_MARGIN) || 1.25 //1.125
// maximum flat safety ppm margin (proportional below this value)
const SAFETY_MARGIN_FLAT_MAX = parseInt(env.SAFETY_MARGIN_FLAT_MAX) || 222
// suspect might cause tor issues if too much bandwidth being used
// setting to 1 makes it try just 1 rebalance at a time
const MAX_PARALLEL_REBALANCES = parseInt(env.MAX_PARALLEL_REBALANCES) || 5
// max minutes to spend per rebalance try
const MINUTES_FOR_REBALANCE = parseInt(env.MINUTES_FOR_REBALANCE) || 5
// max minutes to spend per keysend try
const MINUTES_FOR_KEYSEND = parseInt(env.MINUTES_FOR_KEYSEND) || 5
// rebalance with faster keysends after bos rebalance works (faster but higher risk of stuck sats so I send less)
const USE_KEYSENDS_AFTER_BALANCE = env.USE_KEYSENDS_AFTER_BALANCE === 'false' ? false : true
// only use keysends (I use for testing)
const ONLY_USE_KEYSENDS = env.ONLY_USE_KEYSENDS === 'false' ? false : true
// sats to balance via keysends
const MAX_REBALANCE_SATS_KEYSEND = parseInt(env.MAX_REBALANCE_SATS_KEYSEND) || 2e5
// show rebalancing printouts (very verbose routing info (BoS output))
const SHOW_REBALANCE_LOG = env.SHOW_REBALANCE_LOG === 'false' ? false : true
// would average in earned/routed out fee rate measured in DAYS_FOR_STATS
// to determine what fee rate to use for rebalance
const INCLUDE_EARNED_FEE_RATE_FOR_REBALANCE = env.INCLUDE_EARNED_FEE_RATE_FOR_REBALANCE === 'false' ? false : true
// number of times to retry a rebalance on probe timeout while
// increasing fee for last hop to skip all depleted channels
// Only applies on specifically ProbeTimeout so unsearched routes remain
const RETRIES_ON_TIMEOUTS_REBALANCE = parseInt(env.RETRIES_ON_TIMEOUTS_REBALANCE) || 2
const RETRIES_ON_TIMEOUTS_SEND = parseInt(env.RETRIES_ON_TIMEOUTS_SEND) || 1
// time between retrying same good pair
const MIN_MINUTES_BETWEEN_SAME_PAIR = (MINUTES_BETWEEN_STEPS + MINUTES_FOR_REBALANCE) * 2
// max rebalance repeats while successful
// if realized rebalance rate is > 1/2 max rebalance rate
// this will just limit repeats when there's no major discounts
const MAX_REBALANCE_REPEATS = parseInt(env.MAX_REBALANCE_REPEATS) || 10 // without major discount
const MAX_REBALANCE_REPEATS_ANY = parseInt(env.MAX_REBALANCE_REPEATS_ANY) || 21 // with even discounts
// multiply max ppm rate after each rebalance for repeats by this
const REPEAT_MAX_RATE_RATIO = 0.98
// ms to put between each rebalance launch for safety
const STAGGERED_LAUNCH_MS = parseInt(env.STAGGERED_LAUNCH_MS) || 1111
// ## FEE SETTINGS
// how often to update fees and max htlc sizes (keep high to minimize network gossip)
// also time span of flow to look back at for deciding if and by how much to increase each fee rate
const MINUTES_BETWEEN_FEE_CHANGES = parseInt(env.MINUTES_BETWEEN_FEE_CHANGES) || 121
// minimum ppm ever possible
const MIN_PPM_ABSOLUTE = parseInt(env.MIN_PPM_ABSOLUTE) || 0
// maximum ppm ever possible
const MAX_PPM_ABSOLUTE = parseInt(env.MAX_PPM_ABSOLUTE) || 999
// max size of fee adjustment upward
const NUDGE_UP = parseFloat(env.NUDGE_UP) || 0.0314
// max size of fee adjustment downward
const NUDGE_DOWN_PER_DAY = parseFloat(env.NUDGE_DOWN_PER_DAY) || 0.01337
const NUDGE_DOWN = NUDGE_DOWN_PER_DAY / ((24 * 60) / MINUTES_BETWEEN_FEE_CHANGES) // 0.0021
// increase NUDGE_DOWN by this factor when channel has never been seen routing out
const NUDGE_DOWN_INACTIVE_MULTIPLIER = 2
// how much internal ppm setpoint has to change by to update what public sees as new fee rate
const FEE_CHANGE_TOLERANCE_FRACTION = 0.21 // by this fraction
const FEE_CHANGE_TOLERANCE_FLAT = 21 // or by this flat amount in ppm
// min days of no routing activity before allowing reduction in fees
const DAYS_FOR_FEE_REDUCTION = parseFloat(env.DAYS_FOR_FEE_REDUCTION) || 4.2
// rebalancing fee rates below this aren't considered for rebalancing
const MIN_FEE_RATE_FOR_REBALANCE = parseInt(env.MIN_FEE_RATE_FOR_REBALANCE) || 1
// max fee rate for rebalancing even if channel earns more
const MAX_FEE_RATE_FOR_REBALANCE = parseInt(env.MAX_FEE_RATE_FOR_REBALANCE) || 599
// fee rate to stop forwards out of drained channel
const ROUTING_STOPPING_FEE_RATE = parseInt(env.ROUTING_STOPPING_FEE_RATE) || 999
// available weighting strategies used for rebalancing
const WEIGHT_OPTIONS = {}
// WEIGHT_OPTIONS.FLAT = () => 1 // no preferences, totally random
// 2x more sats from balance is 2x more likely to be selected
// WEIGHT_OPTIONS.UNBALANCED_SATS = peer => peer.unbalancedSats
// 2x more sats from balance is ~1.4x more likely to be selected
// better for trying more channel combinations still favoring unabalanced
// WEIGHT_OPTIONS.UNBALANCED_SATS_SQRT = peer => trunc(sqrt(peer.unbalancedSats))
// WEIGHT_OPTIONS.UNBALANCED_SATS_SQRTSQRT = peer => trunc(sqrt(sqrt(peer.unbalancedSats)))
// WEIGHT_OPTIONS.CHANNEL_SIZE = peer => peer.totalSats
// ensure highest priority if below MIN_SATS_PER_SIDE on any side and decay to 0 when balanced
WEIGHT_OPTIONS.MIN_LIQUIDITY = peer =>
1 - exp(-2 * pow(PI, 2) * pow((peer.outbound_liquidity - 0.5 * peer.capacity) / (peer.capacity - 2 * MIN_SATS_PER_SIDE), 2))
const WEIGHT = WEIGHT_OPTIONS.MIN_LIQUIDITY // default weight
// just to prioritize more profitable channels including fee rate in random sorting
// give highest priority to higher fee rates on scale 0 - 1
WEIGHT_OPTIONS.FEE_RATE = peer => 1 - exp((-2 * PI * peer.fee_rate) / MAX_FEE_RATE_FOR_REBALANCE)
// combine fee rate and liquidity functions just for remote sorting
const WEIGHT_REMOTE = peer => 0.5 * WEIGHT_OPTIONS.MIN_LIQUIDITY(peer) + 0.5 * WEIGHT_OPTIONS.FEE_RATE(peer)
// fuzzy the amount being rebalanced to blend in better
const fuzzyAmount = (amount, fraction = 0.21) => trunc(amount * (1 - fraction * random()))
// array of public key strings to avoid in paths (avoids from settings.json added to it)
const AVOID_LIST = []
// ## FILESYSTEM SETTINGS
const SNAPSHOTS_PATH = './snapshots'
const PEERS_LOG_PATH = './peers'
const LOG_FILES = './logs'
const TIMERS_PATH = 'timers.json'
const SETTINGS_PATH = 'settings.json'
let SETTINGS_FILEHASH = ''
const LAST_SEEN_PATH = `${LOG_FILES}/lastSeen.json`
// Timers
const DEFAULT_TIMERS = {
lastReconnect: 0,
lastSimpleReconnect: 0,
lastFeeUpdate: 0,
lastCleaningUpdate: 0,
lastDailyReset: 0,
lastNodeReset: 0
}
// Global Node Info
const mynode = {
scriptStarted: Date.now(),
public_key: '',
restartFailures: 0,
offlineLimitPercentage: PEERS_OFFLINE_PERCENT_MAXIMUM,
peers: [],
htlcLimiter: {},
timers: copy(DEFAULT_TIMERS)
}
const runBot = async () => {
logDim('runBot()')
// force clean up memory if gc exposed with --expose-gc
global?.gc?.()
if (SHOW_RAM_USAGE) printMemoryUsage('(at start of runBot cycle)')
// reload settings
await runReloadSettings()
// check if time for bos reconnect
await runBotReconnectCheck()
// check if time for updating fees
await runUpdateFeesCheck()
// runCleaningCheck
await runCleaningCheck()
// simple reconnect
await runSimpleReconnect()
// do rebalancing
await runBotRebalanceOrganizer()
// long pause
await sleep(MINUTES_BETWEEN_STEPS * minutes)
// restart
runBot()
}
// starts everything
const initialize = async () => {
// get authorized access to node
const auth = await bos.initializeAuth()
// get your own public key
const identity = await bos.callAPI('getIdentity')
if (!identity.public_key || identity.public_key.length < 10) {
console.log()
throw new Error('unknown public key for this node')
}
mynode.public_key = identity.public_key
const feeUpdatesPerDay = +((60 * 24) / MINUTES_BETWEEN_FEE_CHANGES).toFixed(1)
const feeRateInceaseString = (NUDGE_UP * 100).toFixed(2)
const feeRateDecreaseString = (NUDGE_DOWN * 100).toFixed(2)
const feeRateToleranceString = (FEE_CHANGE_TOLERANCE_FRACTION * 100).toFixed(0)
const feeRateToleranceFlatString = (FEE_CHANGE_TOLERANCE_FLAT * 100).toFixed(0)
const maxUpFeeChangePerDay = ((1 + NUDGE_UP) ** feeUpdatesPerDay - 1) * 100
const maxDownFeeChangePerDay = (1 - (1 - NUDGE_DOWN) ** feeUpdatesPerDay) * 100
const hoursBetweenFeeChanges = (MINUTES_BETWEEN_FEE_CHANGES / 60).toFixed(1)
// roughly how many decreases to equal to an increase
const decreasesToUndo = ceil(NUDGE_UP / NUDGE_DOWN)
// how much time is that including 2 periods before decreases are allowed
const minutesToUndo = (decreasesToUndo + 2) * MINUTES_BETWEEN_FEE_CHANGES
const daysToUndoString = (minutesToUndo / 60 / 24).toFixed(1)
console.log(`${getDate()}
========================================================
this node's public key:
"${mynode.public_key}"
`+
(ADJUST_POLICIES_FEES ? ` There are a maximum of ${feeUpdatesPerDay} fee updates per day.
UP: Channel fee rate set-point increases by max of +${feeRateInceaseString}%
every ${hoursBetweenFeeChanges} hours or more
with higher if its outflow at that rate is closer to local sats per day.
At constant high outflow absolute max is +${maxUpFeeChangePerDay.toFixed(1)}% / day.
DOWN: Fee rate set-point decreases by max of -${feeRateDecreaseString}%
every ${hoursBetweenFeeChanges} hours or more
if no outflow took place in at least 2x as long of time.
Channel must also have more than ${pretty(MIN_SATS_PER_SIDE)} sats local
to allow decrease of fee rate or outflow numbers are unreliable.
At continous 0 outflow, max decrease per day is -${maxDownFeeChangePerDay.toFixed(1)}%.
If never logged as outflowing, decreasing rate is increased by factor of ${NUDGE_DOWN_INACTIVE_MULTIPLIER}x.
One high increase in fee rate takes ${daysToUndoString} days to undo with decreases.
Actual fee rate in policy is only updated when internal set-point is more
than ${feeRateToleranceString}% or ${feeRateToleranceFlatString} ppm away from current public policy fee rate.
If channel has under ${pretty(SATS_PER_SIDE_DRAINED_LIMIT)} sats local
it's also considered drained and policy fee rate is temporarily increased
to ${ROUTING_STOPPING_FEE_RATE} ppm to discorage additional routing.`
: ``)
+`
IF THIS IS INCORRECT, ctrl + c
========================================================
`)
// make folders for all the files I use
if (!fs.existsSync(PEERS_LOG_PATH)) {
fs.mkdirSync(PEERS_LOG_PATH, { recursive: true })
}
if (!fs.existsSync(SNAPSHOTS_PATH)) {
fs.mkdirSync(SNAPSHOTS_PATH, { recursive: true })
}
if (!fs.existsSync(LOG_FILES)) {
fs.mkdirSync(LOG_FILES, { recursive: true })
}
// load settings file
if (fs.existsSync(SETTINGS_PATH)) {
const settingsFile = fs.readFileSync(SETTINGS_PATH)
const hashSum = crypto.createHash('sha256').update(settingsFile)
SETTINGS_FILEHASH = hashSum.digest('hex')
logDim(`initialize(): loading settings.json - hash: ${SETTINGS_FILEHASH}`)
mynode.settings = JSON.parse(fs.readFileSync(SETTINGS_PATH))
// add to avoid list from there
if (mynode.settings?.avoid?.length) {
mynode.settings.avoid.forEach(pk => {
if (!pk.startsWith('//')) AVOID_LIST.push(pk)
})
console.log(`${getDate()}`, { AVOID_LIST })
}
}
if(TELEGRAM_PROXY_HOST != ''
&& TELEGRAM_PROXY_PORT != ''
&& TELEGRAM_CHATID != ''
&& TELEGRAM_TOKEN != '')
//&& mynode.settings?.telegram?.chat_id
//&& mynode.settings?.telegram?.token)
{
logDim(`bos.sayWithTelegramBot(): Connecting via proxy: socks://${TELEGRAM_PROXY_HOST}:${TELEGRAM_PROXY_PORT}`)
} else {
logDim(`bos.sayWithTelegramBot(): Connecting without proxy`)
}
// small pause for friendly stop
await sleep(5 * seconds)
// timers
initializeBotTimers()
// generate snapshots at start to ensure recent data
await generatePeersSnapshots()
// small pause for friendly stop
await sleep(5 * seconds)
// start bot loop
runBot()
}
// update settings on the fly if necessary (compare file hashes)
const runReloadSettings = async () => {
logDim('runReloadSettings()')
// check and reload settings
const settingsFile = fs.readFileSync(SETTINGS_PATH)
if (settingsFile) {
const hashSum = crypto.createHash('sha256').update(settingsFile)
const newHash = hashSum.digest('hex')
if(SETTINGS_FILEHASH !== newHash) {
// logDim(`runReloadSettings(): reloading settings.json\noldhash: ${SETTINGS_FILEHASH}\nnewhash: ${newHash}`)
logDim(`runReloadSettings(): reloading settings.json`)
mynode.settings = JSON.parse(fs.readFileSync(SETTINGS_PATH))
// empy and refill list
if (mynode.settings?.avoid?.length) {
AVOID_LIST.length = 0
mynode.settings.avoid.forEach(pk => {
if (!pk.startsWith('//')) AVOID_LIST.push(pk)
})
console.log(`${getDate()}`, { AVOID_LIST })
}
} else {
// logDim(`runReloadSettings(): settings.json unchanged\noldhash: ${SETTINGS_FILEHASH}\nnewhash: ${newHash}`)
logDim(`runReloadSettings(): settings.json unchanged`)
}
}
// small pause for friendly stop
await sleep(5 * seconds)
}
// my own reconnect method that doesn't disconnect channels based on just disables
// so can run frequently without risking instability
const runSimpleReconnect = async () => {
if (!ALLOW_SIMPLE_RECONNECT) return null
logDim('runSimpleReconnect()')
await sleep(5 * seconds)
const minutesSinceLast = minutesAgo(mynode.timers.lastSimpleReconnect || 0)
if (minutesSinceLast > MINUTES_BETWEEN_SIMPLE_RECONNECTS) {
logDim('runSimpleReconnect(): time to run')
updateBotTimers({ lastSimpleReconnect: Date.now() })
} else {
const timeUntil = (MINUTES_BETWEEN_SIMPLE_RECONNECTS - minutesSinceLast).toFixed(0)
logDim(`runSimpleReconnect(): not time to run yet. (Scheduled in ${timeUntil}+ minutes)`)
return null
}
// key to alias table
const pkToAlias = await bos.getPublicKeyToAliasTable()
// get offline peers pubkeys
const peers = await bos.peers({})
const peersTotal = peers?.length
const offline = peers?.filter(p => p.is_offline).map(p => p.public_key) || []
// get inactive peers pubkeys
const channels = (await bos.callAPI('getChannels'))?.channels || []
const inactive = unique(channels.filter(c => !c.is_active).map(c => c.partner_public_key))
// combine offline and inactive list
const listToReconnect = unique([...offline, ...inactive])
const finalReconnected = []
const finalOffline = []
// reconnnect each one
const reconnectionTasks = []
for (const public_key of listToReconnect) {
const reconnection = bos.addPeer({ public_key }).then(res => {
if (res) {
finalReconnected.push(public_key)
logDim(`Reconnected to ${pkToAlias[public_key] || public_key.slice(0, 20)}`)
} else {
finalOffline.push(public_key)
logDim(`Failed to reconnect to ${pkToAlias[public_key] || public_key.slice(0, 20)}`)
}
})
reconnectionTasks.push(reconnection)
// launch in parallel but space apart starting reconnects by few seconds
const STAGGERED_RECONNECTS_MS = 7 * seconds
await sleep(STAGGERED_RECONNECTS_MS, { quiet: true })
}
// wait until all the reconnection tasks are complete
await Promise.all(reconnectionTasks)
// make a nice summary of results
const lastSeen = fs.existsSync(LAST_SEEN_PATH) ? JSON.parse(fs.readFileSync(LAST_SEEN_PATH)) : {}
const peersDisabledToMe = peers.filter(p => p.is_inbound_disabled).map(p => p.public_key)
// sort by offline time
finalOffline.sort((a, b) => (lastSeen[a] || 0) - (lastSeen[b] || 0))
const offlinePeerInfoList = []
for (const public_key of finalOffline) {
const alias = ca(pkToAlias[public_key]) || public_key.slice(0, 20)
const { countPeers, countDisabled } = await getPeersDisabledTowards({ public_key })
const percent = countPeers ? ((countDisabled / countPeers) * 100).toFixed(0) + '% 🚫' : ''
const daysOffline = lastSeen[public_key] ? daysAgo(lastSeen[public_key]).toFixed(1) + 'd offline' : ''
// const isReallyOffline = daysOffline > 1 || (countPeers && countDisabled / countPeers > 0.33)
// const icon = isReallyOffline ? '❌' : '⛔'
// offlinePeerInfoList.push(`${alias} ${icon} : ${percent} | ${daysOffline}`)
offlinePeerInfoList.push(`${alias} : ${percent} | ${daysOffline}`)
}
const message =
peers !== null
? `🔌 Simple Reconnect Statistics:\n`
+ ` ${finalOffline.length} / ${peersTotal} peers offline (${((finalOffline.length / peersTotal) * 100).toFixed(0)}%):\n -`
+ ` ${offlinePeerInfoList.join('\n - ') || 'n/a'}\n`
+ ` ${peersDisabledToMe.length} peers in-disabled (${((peersDisabledToMe.length / peersTotal) * 100).toFixed(0)}%):\n -`
+ ` ${peersDisabledToMe.map(pk => ca(pkToAlias[pk]) || pk.slice(0, 20)).join('\n - ') || 'n/a'}\n`
+ ` ${finalReconnected.length} peers reconnected: \n -`
+ ` ${finalReconnected.map(pk => ca(pkToAlias[pk]) || pk.slice(0, 20)).join('\n - ') || 'n/a'}\n`
+ `(Simple Reconnect every ${MINUTES_BETWEEN_SIMPLE_RECONNECTS} minutes).`
: 'BoS/LND issue detected'
// update user about offline peers just in case
console.log(`${getDate()} ${message.replaceAll(/<\/?.>/g, '')}`)
await telegramLog(message)
}
/*
// restart node if requested
const runNodeRestartCheck = async () => {
if (!(ALLOW_DAILY_RESET && ALLOW_NODE_RESET)) return null
logDim('runNodeRestartCheck()')
await sleep(5 * seconds)
const now = Date.now()
const timers = mynode.timers
const thisHour = new Date(now).getUTCHours()
// check if right hour
const isRightHour = UTC_HOUR_FOR_RESTART === thisHour
// check if at least 6 hours since last daily reset or since last reset from other sources
const HOURS_DELTA = 4
const hoursSinceDailyReset = (now - timers.lastDailyReset) / hours
const hoursSinceReset = (now - timers.lastNodeReset) / hours
const beenLongEnough = hoursSinceDailyReset > HOURS_DELTA && hoursSinceReset > HOURS_DELTA // just in case, checking both
const isReseting = isRightHour && beenLongEnough
// prettier-ignore
logDim(`runNodeRestartCheck() ${isRightHour && beenLongEnough ? 'reseting node processes' : 'not right time'}
${thisHour} UTC hour ${isRightHour ? 'matches' : 'is not'} the specified ${UTC_HOUR_FOR_RESTART} UTC hour for timed node reset.
It has been ${hoursSinceReset > 24 * 2 ? 'over 2 days' : hoursSinceReset.toFixed(1) + ' hours'} since last known reset.
It has been ${hoursSinceDailyReset > 24 * 2 ? 'over 2 days' : hoursSinceDailyReset.toFixed(1) + ' hours'} since last daily reset.
Both must be more than ${HOURS_DELTA} hours to reset again.
`)
if (!isReseting) return null
// seems time to restart node
logDim('runNodeRestartCheck() - right hour and been long enough so restarting node processes')
// update timers
updateBotTimers({ lastDailyReset: now })
await restartNodeProcess()
// run reconnect script to ensure everything is ready again
await runBotReconnect()
} */
const initializeBotTimers = () => {
if (!fs.existsSync(TIMERS_PATH)) {
// if no timer file, just generate timers file to keep track between runs
console.log(`${getDate()} creating timers file at ${TIMERS_PATH}`)
} else {
// if timer file exists, overwrite defaults with whatever is available in file
try {
const timersOnFile = JSON.parse(fs.readFileSync(TIMERS_PATH))
mynode.timers = { ...mynode.timers, ...(timersOnFile ?? {}) }
console.log(`${getDate()} found & updating timers file at ${TIMERS_PATH}`)
} catch (e) {
console.log(`${getDate()} timers file unreadable, writing to ${TIMERS_PATH}`)
}
}
console.log(
`${getDate()} current UTC timestamps: ${JSON.stringify(
Object.keys(mynode.timers).map(timerName => `${timerName.padStart(20)}: ${getDate(mynode.timers[timerName])}`),
null,
2
)}`
)
fs.writeFileSync(TIMERS_PATH, JSON.stringify(mynode.timers))
}
// setting/updating both bot global and written to file timers with newItems object item(s)
// getting is just mynode.timers as its updated from defaults+file during start up and w/ updates
const updateBotTimers = newItems => {
mynode.timers = {
...mynode.timers,
...newItems
}
console.log(`${getDate()} Updated ${TIMERS_PATH}`)
fs.writeFileSync(TIMERS_PATH, JSON.stringify(mynode.timers))
}
/*
// carefully shut down node if low on battery
const checkBattery = async () => {
if (!ALLOW_NODE_SHUTDOWN_ON_LOW_BATTERY) return null
logDim('checkBattery()')
await sleep(5 * seconds)
const battery = await getBattery()
logDim(`checkBattery(): ${battery + '%' || 'n/a'}`)
if (battery && +battery < 50) {
console.log(`${getDate()} checkBattery(): battery below 50%`)
// check internet connection
const isInternetConnected = await dns.promises
.lookup('google.com')
.then(() => true)
.catch(() => false)
if (isInternetConnected && ALLOW_HTLC_LIMITER) {
// if internet still connected can wait a little for existing forwards to clear
console.log(`${getDate()} checkBattery(): requesting blocking of all new forward requests`)
// if HTLClimiter used, should signal it to reject all NEW forward requests until node is down
mynode.htlcLimiter.stop = true
// giving it 2 min to clear old htlcs
await sleep(2 * minutes)
}
console.log(`${getDate()} checkBattery(): requesting node shut down`)
// now signaling node shut down, picked up by resetHandler.js
const requestTime = Date.now()
const SHUTDOWN_REQUEST_PATH = 'shutdownRequest.json'
fs.writeFileSync(SHUTDOWN_REQUEST_PATH, JSON.stringify({ requestTime }))
// giving lightning node 5 min to shut down
await sleep(5 * minutes)
// exit this bot
console.log(`${getDate()} checkBattery(): terminating bot processes`)
process.exit(0)
}
}
*/
// experimental parallel rebalancing function (unsplit, wip)
const runBotRebalanceOrganizer = async () => {
ALLOW_REBALANCING
? logDim('runBotRebalanceOrganizer()')
: logDim('runBotRebalanceOrganizer() - Dry-Running - No Rebalancing')
await sleep(5 * seconds)
// match up peers
// high weight lets channels get to pick good peers first (not always to occasionally search for better matches)
// get active peers
const peers = await runBotGetPeers()
// make a list of remote heavy and local heavy peers via balance check
const remoteHeavyPeers = rndWeightedSort(
peers.filter(includeForRemoteHeavyRebalance),
// this one includes fee rate in weight so more profitable channels more likely to be tried more often
WEIGHT_REMOTE
)
const localHeavyPeers = rndWeightedSort(peers.filter(includeForLocalHeavyRebalance), WEIGHT)
// grab original number of peers for each side
const [nRHP, nLHP] = [remoteHeavyPeers.length, localHeavyPeers.length]
// /*
// print out all options of peers & their weight
if (VERBOSE) {
console.log(`${getDate()} Peer weight / balance / alias. Weight function: ${WEIGHT}`)
for (const p of localHeavyPeers) {
const weight = WEIGHT(p).toFixed(5)
const w = weight.padStart(13)
const b = p.balance.toFixed(2)
const local = (p.outbound_liquidity / 1e6).toFixed(1).padStart(4) + 'M'
const remote = (p.inbound_liquidity / 1e6).toFixed(1).padStart(4) + 'M'
console.log(`Local-heavy: ${ca(p.alias).padEnd(30)} ${w}w ${b}b ${local}|${remote}`)
}
console.log('')
for (const p of remoteHeavyPeers) {
const weight = WEIGHT(p).toFixed(5)
const w = weight.padStart(12)
const b = p.balance.toFixed(2)
const local = (p.outbound_liquidity / 1e6).toFixed(1).padStart(4) + 'M'
const remote = (p.inbound_liquidity / 1e6).toFixed(1).padStart(4) + 'M'
console.log(`Remote-heavy: ${ca(p.alias).padEnd(30)} ${w}w ${b}b ${local}|${remote}`)
}
console.log('')
}
// */
// if not actually rebalancing we end here
if (!ALLOW_REBALANCING) return null
// assemble list of matching peers and how much to rebalance
const matchups = []
// keep taking peers out of arrays to match until one side empty
while (localHeavyPeers.length > 0 && remoteHeavyPeers.length > 0) {
// get top lucky remote channel
const remoteHeavy = remoteHeavyPeers[0]
// try to see if there's good match in locals for this peer
// just do it half the time to discover more
const localHeavyIndexIdeal =
random() < 0.5 ? findGoodPeerMatch({ remoteChannel: remoteHeavy, peerOptions: localHeavyPeers }) : -1
// use localHeavyIndexIdeal if it returns an index, otherwise use top local channel
const isGoodPeer = localHeavyIndexIdeal > -1
const localHeavyIndexUsed = isGoodPeer ? localHeavyIndexIdeal : 0
const localHeavy = localHeavyPeers[localHeavyIndexUsed]
// max amount to rebalance is the smaller sats off-balance between the two
const maxSatsToRebalance = trunc(min(localHeavy.unbalancedSats, remoteHeavy.unbalancedSats))
// can also calculate fee rate used this week for routing instead of just current fee rate
// round down fees to nearest sat to get rid of base fee
const routedOut = remoteHeavy.routed_out_msats / 1000
const earnedOut = remoteHeavy.routed_out_fees_msats / 1000
// const capacity = remoteHeavy.capacity
// const remoteSats = remoteHeavy.inbound_liquidity
// grab my outgoing fee for remote heavy peer (from record if available)
const rateNowOutgoing = trunc(getReferenceFee(remoteHeavy))
// actual earning rate (how else to handle very small amounts giving incorrect fee rate?)
const effectiveFeeRate = trunc((floor(earnedOut) / routedOut) * 1e6) || 0
// near MIN_SATS_PER_SIDE routed out will use effective fee, otherwise channel setting
const routedOutFactor = 1 - exp((-routedOut * PI) / MIN_SATS_PER_SIDE)
// the more I route out the more reliable calculated fee rate is vs current channel fee rate
const usedRefFeeRate = trunc(effectiveFeeRate * routedOutFactor + rateNowOutgoing * (1 - routedOutFactor) || 0)
// start calculating rebalance rate
const feeRateUsedForCalc = !INCLUDE_EARNED_FEE_RATE_FOR_REBALANCE
? rateNowOutgoing
: min(rateNowOutgoing, usedRefFeeRate)
// level of emergency decided by highest need of either channel 0-1
const weightRemote = WEIGHT(remoteHeavy)
const weightLocal = WEIGHT(localHeavy)
const levelOfEmergency = max(weightRemote, weightLocal)
// time dependence starts at 0 and ~1 after DAYS_FOR_STATS
const channelsAgeRemote = min(...(remoteHeavy.ids?.map(c => c.channel_age_days || 0) || [0]))
if (DEBUG && !remoteHeavy.ids) console.log('unknown channel ids on remote heavy peer', remoteHeavy)
const timeFactor = 1 - exp((-PI * channelsAgeRemote) / DAYS_FOR_STATS)
// fee via simple subtraction & division from reference
const safeRateBaseline = subtractSafety(feeRateUsedForCalc)
// new remoteHeavy channels can wait to be rebalanced
const safeRateForAge = trunc(timeFactor * feeRateUsedForCalc)
// low levels of emergency will try less hard
// high level of emergency will go as high as subtractSafety allows
// fee via weights from 0.5-1x of reference ppm
const rateBasedOnEmergency = trunc((0.33 + 0.67 * levelOfEmergency) * feeRateUsedForCalc)
// use smallest of 3 rebalance fee rate limits
const safeRate = min(rateBasedOnEmergency, safeRateBaseline, safeRateForAge)
// check against the absolute highest rebalance rate allowed
const maxRebalanceRate = min(safeRate, MAX_FEE_RATE_FOR_REBALANCE)
// console.log(remoteHeavy.alias, { effectiveFeeRate, rateNowOutgoing, maxRebalanceRate })
// check if rebalance rate is below absolute min fee rate for rebalance allowed or below inbound fee rate
if (maxRebalanceRate < MIN_FEE_RATE_FOR_REBALANCE || maxRebalanceRate < remoteHeavy.inbound_fee_rate) {
remoteHeavyPeers.splice(0, 1) // drop remote-heavy peer from consideration
continue // move onto next peer
}
// add this peer pair to matchups
// run keeps track of n times matchup ran
// done keeps track of done tasks
// started at keeps track of time taken
// results keeps 1+ return values from bos function
matchups.push({
localHeavy,
remoteHeavy,
maxSatsToRebalance,
maxRebalanceRate,
run: 1,
done: false,
startedAt: Date.now(),
results: [],
isGoodPeer,
rateNowOutgoing,
usedRefFeeRate,
effectiveFeeRate,
routedOut,
earnedOut,
routedOutFactor,
channelsAgeRemote,
timeFactor,
weightRemote,
weightLocal,
levelOfEmergency,
rateBasedOnEmergency,
safeRateBaseline,
safeRate,
safeRateForAge
})
// remove these peers from peer lists
localHeavyPeers.splice(localHeavyIndexUsed, 1)
remoteHeavyPeers.splice(0, 1)
// stop if limit reached
if (matchups.length >= MAX_PARALLEL_REBALANCES) break
}
if (VERBOSE) {
console.log(
`${getDate()} ${matchups.length} rebalance matchups from ${nRHP} remote-heavy & ${nLHP} local-heavy peers
sorted with offbalance-weighted randomness of ${WEIGHT}
${dim}weighting factors: wL = local-offbalance, wR = remote-offbalance, wT = aged weight, wO = outflow weight${undim}
${dim}rebalance ppm's considered: eff = effective, safe = max safe, rush = offbalance emergency${undim}
`
)
for (const match of matchups) {
const outOf = ca(match.localHeavy.alias).padStart(30)
const into = ca(match.remoteHeavy.alias).padEnd(30)
const meAtLH = (match.localHeavy.outbound_liquidity / 1e6).toFixed(1).padStart(5) + 'M'
const remAtLH = (match.localHeavy.inbound_liquidity / 1e6).toFixed(1).padStart(5) + 'M'
const meAtRH = (match.remoteHeavy.outbound_liquidity / 1e6).toFixed(1).padStart(5) + 'M'
const remAtRH = (match.remoteHeavy.inbound_liquidity / 1e6).toFixed(1).padStart(5) + 'M'
// show ppm used for routing in channel regularly and not temporary high ppm used on very drained channels as former is used for rebalancing reference
// const myFeeAtLH = `(${match.localHeavy.fee_rate})`.padStart(6)
const myFeeAtLH = `(${getReferenceFee(match.localHeavy)})`.padStart(6)
const remFeeAtLH = `(${match.localHeavy.inbound_fee_rate})`.padEnd(6)
// const myFeeAtRH = `(${match.remoteHeavy.fee_rate})`.padEnd(6)
const myFeeAtRH = `(${getReferenceFee(match.remoteHeavy)})`.padEnd(6)
const remFeeAtRH = `(${match.remoteHeavy.inbound_fee_rate})`.padStart(6)
const factorsUsed = [
`${match.weightLocal.toFixed(1)}wL`,
`${match.weightRemote.toFixed(1)}wR`,
`${match.timeFactor.toFixed(1)}wT`, // `-${match.channelsAgeRemote}d`
`${match.routedOutFactor.toFixed(1)}wO`,
`${match.levelOfEmergency.toFixed(1)}wE`,
`${match.usedRefFeeRate}eff`.padStart(7),
`${match.safeRateBaseline}safe`.padStart(8),
`${match.rateBasedOnEmergency}rush`.padStart(8)
].join(' ')
const isGoodPeer = match.isGoodPeer ? '💚' : ''
console.log(
` 🕺(me) ${dim}${myFeeAtLH} ${meAtLH} [ ||||-> ] ${remAtLH} ${remFeeAtLH}${undim} ${outOf} ${dim}--> ⚡` +
` -->${undim} ${into} ${dim}${remFeeAtRH} ${remAtRH} [ ||||-> ] ${meAtRH} ${myFeeAtRH}${undim} 🕺(me) ` +
`${dim}${factorsUsed}${undim} ${isGoodPeer}`
)
}
console.log('')
}
// to keep track of list of launched rebalancing tasks
const rebalanceTasks = []
// function to launch every rebalance task for a matched pair with
const handleRebalance = async matchedPair => {
const { localHeavy, remoteHeavy, maxSatsToRebalance, maxRebalanceRate, run, startedAt } = matchedPair
const localString = ca(localHeavy.alias).padStart(30)
const remoteString = ca(remoteHeavy.alias).padEnd(30)
const maxRebalanceRateString = ('<' + maxRebalanceRate + ' ppm').padStart(9)
// ONLY_USE_KEYSENDS - always does bos send instead of bos rebalance
// USE_KEYSENDS_AFTER_BALANCE - always does bos send after 1 bos rebalance works
const useRegularRebalance = !(run > 1 && USE_KEYSENDS_AFTER_BALANCE) && !ONLY_USE_KEYSENDS
const maxSatsToRebalanceAfterRules = useRegularRebalance
? fuzzyAmount(min(maxSatsToRebalance, MAX_REBALANCE_SATS))
: fuzzyAmount(min(maxSatsToRebalance, MAX_REBALANCE_SATS_KEYSEND))
// task launch message
console.log(
`${getDate()} Starting ${localString} --> ${remoteString} run #${run}` +
` rebalance @ ${maxRebalanceRateString}, ${pretty(maxSatsToRebalance).padStart(10)} sats left to balance ` +
`${dim}(${useRegularRebalance ? 'via bos rebalance' : 'via bos send'})${undim}`
)
const resBalance = useRegularRebalance
? await bos.rebalance(
{
fromChannel: localHeavy.public_key,
toChannel: remoteHeavy.public_key,
// bos rebalance probes with small # of sats and then increases
// amount up to this value until probe fails
// so then it uses the largest size that worked
maxSats: maxSatsToRebalanceAfterRules,
maxMinutes: MINUTES_FOR_REBALANCE,
maxFeeRate: maxRebalanceRate,
avoid: copy(AVOID_LIST), // avoid these nodes in paths
retryAvoidsOnTimeout: RETRIES_ON_TIMEOUTS_REBALANCE
},
undefined,
// {} // no terminal output, too many things happening
{ details: SHOW_REBALANCE_LOG }
)
: await bos.keysendRebalance(
{
destination: mynode.public_key,
fromChannel: localHeavy.public_key,
toChannel: remoteHeavy.public_key,
// add randomness to amt (downward only)
sats: maxSatsToRebalanceAfterRules,
maxMinutes: MINUTES_FOR_KEYSEND,
maxFeeRate: maxRebalanceRate,
avoid: copy(AVOID_LIST), // avoid these nodes in paths
retryAvoidsOnTimeout: RETRIES_ON_TIMEOUTS_SEND
},
// {} // no terminal output, too many things happening
{ details: SHOW_REBALANCE_LOG }
)
const taskLength = ((Date.now() - startedAt) / minutes).toFixed(1) + ' minutes'
matchedPair.results.push(resBalance)
if (resBalance.failed) {
// fail:
matchedPair.done = true
const tasksDone = matchups.reduce((count, m) => (m.done ? count + 1 : count), 0)
const reason = resBalance.msg[1] // 2nd item in error array from bos
const reasonString = resBalance.ppmSuggested
? `(Reason: needed ${String(resBalance.ppmSuggested).padStart(4)} ppm) `
: `(Reason: ${reason}) `
console.log(
`${getDate()} Stopping ${localString} --> ${remoteString} run #${run} ${maxRebalanceRateString} ` +
`rebalance failed ${reasonString}` +
`${dim}(${tasksDone}/${matchups.length} done after ${taskLength})${undim}`
)
// fails are to be logged only when there's a useful suggested fee rate
if (resBalance.ppmSuggested) {
appendRecord({
peer: remoteHeavy,
newRebalance: {
t: Date.now(),
ppm: resBalance.ppmSuggested,
failed: true,
peer: localHeavy.public_key,
peerAlias: localHeavy.alias,
sats: maxSatsToRebalanceAfterRules
}
})
}
// return matchedPair
} else {
// just in case both fields are missing for some reason in response lets stop
if (!resBalance.rebalanced && !resBalance.sent) {
console.error(`${getDate()} shouldn't happen: missing resBalance.rebalanced & resBalance.sent`)
return matchedPair
}
const rebalanced = resBalance.rebalanced ?? resBalance.sent
// succeess:
matchedPair.maxSatsToRebalance -= rebalanced
matchedPair.run++
appendRecord({
peer: remoteHeavy,
newRebalance: {
t: Date.now(),
ppm: resBalance.fee_rate,
failed: false,
peer: localHeavy.public_key,
peerAlias: localHeavy.alias,
sats: rebalanced
}
})
// more than 1 smiley = huge discount
const discount = floor(maxRebalanceRate / resBalance.fee_rate)
const yays = '🍀'.repeat(min(5, discount))
if (matchedPair.maxSatsToRebalance < MIN_REBALANCE_SATS) {
// successful & stopping - rebalanced "enough" as sats off-balance below minimum
matchedPair.done = true
const tasksDone = matchups.reduce((count, m) => (m.done ? count + 1 : count), 0)
console.log(
`${getDate()} Completed${localString} --> ${remoteString} at #${run} ${maxRebalanceRateString} ` +
`rebalance done for ${pretty(rebalanced)} sats @ ${resBalance.fee_rate} ppm ${yays}` +
` & completed! 🏆 ${dim}(${tasksDone}/${matchups.length} done after ${taskLength})${undim}`
)
// return matchedPair
} else if (
// if reached max # of rebalances without a discount
(run >= MAX_REBALANCE_REPEATS && discount < 2) ||
// if reached max # of rebalances even with major discount
run >= MAX_REBALANCE_REPEATS_ANY
) {
// successful & stopping - at max repeats for minor discounts (< than 1/2 of attempted fee rate)
matchedPair.done = true
const tasksDone = matchups.reduce((count, m) => (m.done ? count + 1 : count), 0)
console.log(
`${getDate()} Completed${localString} --> ${remoteString} at #${run} ${maxRebalanceRateString} ` +
`rebalance done for ${pretty(rebalanced)} sats @ ${resBalance.fee_rate} ppm ${yays}` +
` & reached max number of repeats. ${dim}(${tasksDone}/${matchups.length} done after ${taskLength})${undim}`
)
// return matchedPair
} else {
// successful & keep doing rebalances
console.log(
`${getDate()} Updating ${localString} --> ${remoteString} run #${run} ${maxRebalanceRateString} ` +
`rebalance succeeded for ${pretty(rebalanced)} sats @ ${resBalance.fee_rate} ppm ${yays}` +
` & moving onto run #${run + 1} ${dim}(${taskLength})${undim}`
)
// need to rebalance drops off as we get closer to balanced so decreasing risk
matchedPair.maxRebalanceRate = trunc(REPEAT_MAX_RATE_RATIO * matchedPair.maxRebalanceRate) // temporary, ideally eval the scores each pass in future
return await handleRebalance(matchedPair)
}
}
return matchedPair
}
// launch every task near-simultaneously, small stagger between launch
for (const matchedPair of matchups) {
// no await before handleRebalance to not wait
rebalanceTasks.push(handleRebalance(matchedPair))
await sleep(STAGGERED_LAUNCH_MS, { quiet: true })
}
if(rebalanceTasks.length > 0) {
console.log(
`${getDate()}\n\n runBotRebalanceOrganizer(): All ${rebalanceTasks.length} parallel rebalances launched!\n`
)
// now we wait until every rebalance task is done & returns a value
const rebalanceResults = await Promise.all(rebalanceTasks)
rebalanceResults.sort((a, b) => b.run - a.run)
console.log(
`${getDate()} ALL TASKS COMPLETED:\n` +
rebalanceResults