diff --git a/app/cli.js b/app/cli.js index 16702d1fe1e9cfe7693d590512c2f650c60583ec..e0c32c2d429dff54fb5133f2546a4d75e1f16002 100644 --- a/app/cli.js +++ b/app/cli.js @@ -48,10 +48,6 @@ module.exports = () => { .option('-c, --currency <name>', 'Name of the currency managed by this node.') - .option('--nointeractive', 'Disable interactive sync UI') - .option('--nocautious', 'Do not check blocks validity during sync') - .option('--cautious', 'Check blocks validity during sync (overrides --nocautious option)') - .option('--nopeers', 'Do not retrieve peers during sync') .option('--nostdout', 'Disable stdout printing for `export-bc` command') .option('--noshuffle', 'Disable peers shuffling for `sync` command') diff --git a/app/lib/computation/blockProver.js b/app/lib/computation/blockProver.js index 96f17ff433c75240f0583e03bcd67ed4c4a3fcbb..66c320633ee8b8cb35195a0173ad09797ada4686 100644 --- a/app/lib/computation/blockProver.js +++ b/app/lib/computation/blockProver.js @@ -4,7 +4,7 @@ const _ = require('underscore'); const constants = require('../constants'); const engine = require('../pow/engine'); const Block = require('../entity/block'); -const querablep = require('../querablep'); +const querablep = require('querablep'); const POW_FOUND = true; const POW_NOT_FOUND_YET = false; diff --git a/app/lib/constants.js b/app/lib/constants.js index 091b6ef1129aacba45f8d5839ca855537f1c67e0..f9268c12fcb1b1d10343df8abd97b986a1500dde 100644 --- a/app/lib/constants.js +++ b/app/lib/constants.js @@ -222,19 +222,15 @@ module.exports = { MAX_NON_MEMBERS_TO_FORWARD_TO_FOR_SELF_DOCUMENTS: 6, MAX_NON_MEMBERS_TO_FORWARD_TO: 4, MAX_MEMBERS_TO_FORWARD_TO: 6, - COUNT_FOR_ENOUGH_PEERS: 4, MAX_CONCURRENT_POST: 3, DEFAULT_TIMEOUT: 10 * 1000, // 10 seconds - SYNC_LONG_TIMEOUT: 30 * 1000, // 30 seconds SYNC: { MAX: 20 }, STATUS_INTERVAL: { UPDATE: 2, // Every X blocks MAX: 20 // MAX Y blocks - }, - SYNC_PEERS_INTERVAL: 3, // Every 3 block average generation time - TEST_PEERS_INTERVAL: 10 // In seconds + } }, PROOF_OF_WORK: { MINIMAL_TO_SHOW: 2, @@ -260,16 +256,6 @@ module.exports = { ] }, - DURATIONS: { - TEN_SECONDS: 10, - A_MINUTE: 60, - TEN_MINUTES: 600, - AN_HOUR: 3600, - A_DAY: 3600 * 24, - A_WEEK: 3600 * 24 * 7, - A_MONTH: (3600 * 24 * 365.25) / 12 - }, - DEFAULT_CPU: 0.6, DEFAULT_CURRENCY_NAME: "no_currency", @@ -301,15 +287,13 @@ module.exports = { }, BRANCHES: { - DEFAULT_WINDOW_SIZE: 100, - SWITCH_ON_BRANCH_AHEAD_BY_X_MINUTES: 30 + DEFAULT_WINDOW_SIZE: 100 }, INVALIDATE_CORE_CACHE: true, WITH_SIGNATURES_AND_POW: true, NO_FORK_ALLOWED: false, - FORK_ALLOWED: true, SAFE_FACTOR: 3, BLOCKS_COLLECT_THRESHOLD: 30, // Blocks to collect from memory and persist @@ -322,7 +306,6 @@ module.exports = { SANDBOX_SIZE_MEMBERSHIPS: 200, MAXIMUM_LEN_OF_COMPACT_TX: MAXIMUM_LEN_OF_COMPACT_TX, - MAX_NUMBER_OF_PEERS_FOR_PULLING: 4, CURRENT_BLOCK_CACHE_DURATION: 10 * 1000, // 30 seconds CORES_MAXIMUM_USE_IN_PARALLEL: 8, // For more cores, we need to use a better PoW synchronization algorithm @@ -346,10 +329,7 @@ module.exports = { S_INDEX: 'SINDEX', C_INDEX: 'CINDEX', IDX_CREATE: 'CREATE', - IDX_UPDATE: 'UPDATE', - - PULLING_MINIMAL_DELAY: 20, - PULLING_INTERVAL_TARGET: 240 + IDX_UPDATE: 'UPDATE' }; function exact (regexpContent) { diff --git a/app/lib/contacter.js b/app/lib/contacter.js deleted file mode 100644 index fcb34cfdda18f61b1ab3ef9f6d335e3d3c9ebac5..0000000000000000000000000000000000000000 --- a/app/lib/contacter.js +++ /dev/null @@ -1,105 +0,0 @@ -"use strict"; - -const co = require('co'); -const rp = require('request-promise'); -const constants = require('./constants'); -const sanitize = require('duniter-bma').duniter.methods.sanitize; -const dtos = require('duniter-bma').duniter.methods.dtos; - -/** - * Created by cgeek on 16/10/16. - */ - -const contacter = module.exports = (host, port, opts) => new Contacter(host, port, opts); - -function Contacter(host, port, opts) { - - opts = opts || {}; - const options = { - timeout: opts.timeout || constants.NETWORK.DEFAULT_TIMEOUT - }; - - this.host = host; - this.port = port; - - this.getSummary = () => get('/node/summary/', dtos.Summary); - this.getLookup = (search) => get('/wot/lookup/', dtos.Lookup, search); - this.getBlock = (number) => get('/blockchain/block/', dtos.Block, number); - this.getCurrent = () => get('/blockchain/current', dtos.Block); - this.getPeer = () => get('/network/peering', dtos.Peer); - this.getPeers = (obj) => get('/network/peering/peers', dtos.MerkleOfPeers, obj); - this.getSources = (pubkey) => get('/tx/sources/', dtos.Sources, pubkey); - this.getBlocks = (count, fromNumber) => get('/blockchain/blocks/', dtos.Blocks, [count, fromNumber].join('/')); - this.postPeer = (peer) => post('/network/peering/peers', dtos.Peer, { peer: peer }); - this.processTransaction = (rawTX) => post('/tx/process', dtos.Transaction, { transaction: rawTX }); - - // We suppose that IPv6 is already wrapped by [], for example 'http://[::1]:80/index.html' - const fullyQualifiedHost = [host, port].join(':'); - - function get(url, dtoContract, param) { - if (typeof param === 'object') { - // Classical URL params (a=1&b=2&...) - param = '?' + Object.keys(param).map((k) => [k, param[k]].join('=')).join('&'); - } - return co(function*() { - try { - const json = yield rp.get({ - url: 'http://' + fullyQualifiedHost + url + (param !== undefined ? param : ''), - json: true, - timeout: options.timeout - }); - // Prevent JSON injection - return sanitize(json, dtoContract); - } catch (e) { - throw e.error; - } - }); - } - - function post(url, dtoContract, data) { - return co(function*() { - try { - const json = yield rp.post({ - url: 'http://' + fullyQualifiedHost + url, - body: data, - json: true, - timeout: options.timeout - }); - // Prevent JSON injection - return sanitize(json, dtoContract); - } catch (e) { - throw e.error; - } - }); - } -} - -contacter.statics = {}; - -contacter.statics.quickly = (host, port, opts, callbackPromise) => co(function*() { - const node = contacter(host, port, opts); - return callbackPromise(node); -}); - -contacter.statics.quickly2 = (peer, opts, callbackPromise) => co(function*() { - const Peer = require('./entity/peer'); - const p = Peer.statics.fromJSON(peer); - const node = new Contacter(p.getHostPreferDNS(), p.getPort(), opts); - return callbackPromise(node); -}); - -contacter.statics.fetchPeer = (host, port, opts) => contacter.statics.quickly(host, port, opts, (node) => node.getPeer()); - -contacter.statics.fetchBlock = (number, peer, opts) => contacter.statics.quickly2(peer, opts, (node) => node.getBlock(number)); - -contacter.statics.isReachableFromTheInternet = (peer, opts) => co(function*() { - const Peer = require('./entity/peer'); - const p = Peer.statics.fromJSON(peer); - const node = new Contacter(p.getHostPreferDNS(), p.getPort(), opts); - try { - yield node.getPeer(); - return true; - } catch (e) { - return false; - } -}); diff --git a/app/lib/entity/peer.js b/app/lib/entity/peer.js index b54e4563bd26be4a903a0763ca67a256285e6183..faea69aa9dc9fde2b17e5bccc181e20cc71cb7cf 100644 --- a/app/lib/entity/peer.js +++ b/app/lib/entity/peer.js @@ -1,6 +1,6 @@ "use strict"; const _ = require('underscore'); -const contacter = require('../contacter'); +const contacter = require('duniter-crawler').duniter.methods.contacter; const rawer = require('../ucp/rawer'); const constants = require('../constants'); diff --git a/app/lib/pow/engine.js b/app/lib/pow/engine.js index c2f9416fdec2bdfe0bf17132ecdc713bb659dac8..0bcbb0f3fee49e074961e960632018726d0e2abe 100644 --- a/app/lib/pow/engine.js +++ b/app/lib/pow/engine.js @@ -5,7 +5,7 @@ const path = require('path'); const co = require('co'); const os = require('os'); const nuuid = require('node-uuid'); -const querablep = require('../querablep'); +const querablep = require('querablep'); module.exports = function (opts) { return new PowEngine(opts); diff --git a/app/lib/pow/proof.js b/app/lib/pow/proof.js index 35de280845db216f16366acf16c2486992bc5d4b..026f8243d8d1b6f6771f8fa8e924b287398d4319 100644 --- a/app/lib/pow/proof.js +++ b/app/lib/pow/proof.js @@ -3,7 +3,7 @@ const co = require('co'); const moment = require('moment'); const hashf = require('./../ucp/hashf'); const dos2unix = require('./../system/dos2unix'); -const querablep = require('./../querablep'); +const querablep = require('querablep'); const rules = require('./../rules/index'); const constants = require('./../constants'); const keyring = require('./../crypto/keyring'); diff --git a/app/lib/querablep.js b/app/lib/querablep.js deleted file mode 100644 index e9653c9a7a2016d263e93851fff7075c5a304736..0000000000000000000000000000000000000000 --- a/app/lib/querablep.js +++ /dev/null @@ -1,17 +0,0 @@ -"use strict"; - -module.exports = function makeQuerablePromise(promise) { - - // Don't create a wrapper for promises that can already be queried. - if (promise.isResolved) return promise; - - var isResolved = false; - var isRejected = false; - - // Observe the promise, saving the fulfillment in a closure scope. - var result = promise.then((v) => { isResolved = true; return v; }, (e) => { isRejected = true; throw e; }); - result.isFulfilled = () => isResolved || isRejected; - result.isResolved = () => isResolved; - result.isRejected = () => isRejected; - return result; -}; diff --git a/app/lib/sync.js b/app/lib/sync.js deleted file mode 100644 index 6180d2b26f7346fdc4d4d6c956e6dbb2c888d0b6..0000000000000000000000000000000000000000 --- a/app/lib/sync.js +++ /dev/null @@ -1,930 +0,0 @@ -"use strict"; -const util = require('util'); -const stream = require('stream'); -const co = require('co'); -const _ = require('underscore'); -const moment = require('moment'); -const contacter = require('./contacter'); -const hashf = require('./ucp/hashf'); -const indexer = require('./dup/indexer'); -const dos2unix = require('./system/dos2unix'); -const logger = require('./logger')('sync'); -const rawer = require('./ucp/rawer'); -const constants = require('../lib/constants'); -const Block = require('../lib/entity/block'); -const Transaction = require('../lib/entity/transaction'); -const Peer = require('../lib/entity/peer'); -const multimeter = require('multimeter'); -const pulling = require('../lib/pulling'); -const makeQuerablePromise = require('../lib/querablep'); - -const CONST_BLOCKS_CHUNK = 250; -const EVAL_REMAINING_INTERVAL = 1000; -const INITIAL_DOWNLOAD_SLOTS = 1; - - -module.exports = Synchroniser; - -function Synchroniser (server, host, port, conf, interactive) { - - const that = this; - - let speed = 0, blocksApplied = 0; - const baseWatcher = interactive ? new MultimeterWatcher() : new LoggerWatcher(); - - // Wrapper to also push event stream - const watcher = { - writeStatus: baseWatcher.writeStatus, - downloadPercent: (pct) => { - if (pct !== undefined && baseWatcher.downloadPercent() < pct) { - that.push({ download: pct }); - } - return baseWatcher.downloadPercent(pct); - }, - appliedPercent: (pct) => { - if (pct !== undefined && baseWatcher.appliedPercent() < pct) { - that.push({ applied: pct }); - } - return baseWatcher.appliedPercent(pct); - }, - end: baseWatcher.end - }; - - stream.Duplex.call(this, { objectMode: true }); - - // Unused, but made mandatory by Duplex interface - this._read = () => null; - this._write = () => null; - - if (interactive) { - logger.mute(); - } - - // Services - const PeeringService = server.PeeringService; - const BlockchainService = server.BlockchainService; - - const contacterOptions = { - timeout: constants.NETWORK.SYNC_LONG_TIMEOUT - }; - - const dal = server.dal; - - const logRemaining = (to) => co(function*() { - const lCurrent = yield dal.getCurrentBlockOrNull(); - const localNumber = lCurrent ? lCurrent.number : -1; - - if (to > 1 && speed > 0) { - const remain = (to - (localNumber + 1 + blocksApplied)); - const secondsLeft = remain / speed; - const momDuration = moment.duration(secondsLeft * 1000); - watcher.writeStatus('Remaining ' + momDuration.humanize() + ''); - } - }); - - this.test = (to, chunkLen, askedCautious) => co(function*() { - const peering = yield contacter.statics.fetchPeer(host, port, contacterOptions); - const peer = new Peer(peering); - const node = yield peer.connect(); - return node.getCurrent(); - }); - - this.sync = (to, chunkLen, askedCautious, nopeers, noShufflePeers) => co(function*() { - - try { - - const peering = yield contacter.statics.fetchPeer(host, port, contacterOptions); - - let peer = new Peer(peering); - logger.info("Try with %s %s", peer.getURL(), peer.pubkey.substr(0, 6)); - let node = yield peer.connect(); - node.pubkey = peer.pubkey; - logger.info('Sync started.'); - - const fullSync = !to; - - //============ - // Blockchain headers - //============ - logger.info('Getting remote blockchain info...'); - watcher.writeStatus('Connecting to ' + host + '...'); - const lCurrent = yield dal.getCurrentBlockOrNull(); - const localNumber = lCurrent ? lCurrent.number : -1; - let rCurrent; - if (isNaN(to)) { - rCurrent = yield node.getCurrent(); - } else { - rCurrent = yield node.getBlock(to); - } - to = rCurrent.number; - - //======= - // Peers (just for P2P download) - //======= - let peers = []; - if (!nopeers && (to - localNumber > 1000)) { // P2P download if more than 1000 blocs - watcher.writeStatus('Peers...'); - const merkle = yield dal.merkleForPeers(); - const getPeers = node.getPeers.bind(node); - const json2 = yield getPeers({}); - const rm = new NodesMerkle(json2); - if(rm.root() != merkle.root()){ - const leavesToAdd = []; - const json = yield getPeers({ leaves: true }); - _(json.leaves).forEach((leaf) => { - if(merkle.leaves().indexOf(leaf) == -1){ - leavesToAdd.push(leaf); - } - }); - peers = yield leavesToAdd.map((leaf) => co(function*() { - try { - const json3 = yield getPeers({ "leaf": leaf }); - const jsonEntry = json3.leaf.value; - const endpoint = jsonEntry.endpoints[0]; - watcher.writeStatus('Peer ' + endpoint); - return jsonEntry; - } catch (e) { - logger.warn("Could not get peer of leaf %s, continue...", leaf); - return null; - } - })); - } - else { - watcher.writeStatus('Peers already known'); - } - } - - if (!peers.length) { - peers.push(peer); - } - peers = peers.filter((p) => p); - - //============ - // Blockchain - //============ - logger.info('Downloading Blockchain...'); - - // We use cautious mode if it is asked, or not particulary asked but blockchain has been started - const cautious = (askedCautious === true || localNumber >= 0); - const shuffledPeers = noShufflePeers ? peers : _.shuffle(peers); - const downloader = new P2PDownloader(localNumber, to, rCurrent.hash, shuffledPeers, watcher); - - downloader.start(); - - let lastPullBlock = null; - - let bindex = []; - let iindex = []; - let mindex = []; - let cindex = []; - let sindex = []; - let currConf = {}; - let bindexSize = 0; - let allBlocks = []; - - let dao = pulling.abstractDao({ - - // Get the local blockchain current block - localCurrent: () => co(function*() { - if (cautious) { - return yield dal.getCurrentBlockOrNull(); - } else { - if (lCurrent && !lastPullBlock) { - lastPullBlock = lCurrent; - } - return lastPullBlock; - } - }), - - // Get the remote blockchain (bc) current block - remoteCurrent: () => Promise.resolve(rCurrent), - - // Get the remote peers to be pulled - remotePeers: () => co(function*() { - return [node]; - }), - - // Get block of given peer with given block number - getLocalBlock: (number) => dal.getBlock(number), - - // Get block of given peer with given block number - getRemoteBlock: (thePeer, number) => co(function *() { - let block = null; - try { - block = yield node.getBlock(number); - Transaction.statics.cleanSignatories(block.transactions); - } catch (e) { - if (e.httpCode != 404) { - throw e; - } - } - return block; - }), - - downloadBlocks: (thePeer, number) => co(function *() { - // Note: we don't care about the particular peer asked by the method. We use the network instead. - const numberOffseted = number - (localNumber + 1); - const targetChunk = Math.floor(numberOffseted / CONST_BLOCKS_CHUNK); - // Return the download promise! Simple. - return downloader.getChunk(targetChunk); - }), - - - applyBranch: (blocks) => co(function *() { - if (cautious) { - for (const block of blocks) { - if (block.number == 0) { - yield BlockchainService.saveParametersForRootBlock(block); - currConf = Block.statics.getConf(block); - } - yield dao.applyMainBranch(block); - } - } else { - const ctx = BlockchainService.getContext(); - let blocksToSave = []; - - for (const block of blocks) { - allBlocks.push(block); - - if (block.number == 0) { - currConf = Block.statics.getConf(block); - } - - if (block.number != to) { - blocksToSave.push(block); - const index = indexer.localIndex(block, currConf); - const local_iindex = indexer.iindex(index); - const local_cindex = indexer.cindex(index); - iindex = iindex.concat(local_iindex); - cindex = cindex.concat(local_cindex); - sindex = sindex.concat(indexer.sindex(index)); - mindex = mindex.concat(indexer.mindex(index)); - const HEAD = yield indexer.quickCompleteGlobalScope(block, currConf, bindex, iindex, mindex, cindex, sindex, { - getBlock: (number) => { - return Promise.resolve(allBlocks[number - 1]); - }, - getBlockByBlockstamp: (blockstamp) => { - return Promise.resolve(allBlocks[parseInt(blockstamp) - 1]); - } - }); - bindex.push(HEAD); - - yield ctx.createNewcomers(local_iindex); - - if (block.dividend - || block.joiners.length - || block.actives.length - || block.revoked.length - || block.excluded.length - || block.certifications.length) { - // Flush the INDEX (not bindex, which is particular) - yield dal.mindexDAL.insertBatch(mindex); - yield dal.iindexDAL.insertBatch(iindex); - yield dal.sindexDAL.insertBatch(sindex); - yield dal.cindexDAL.insertBatch(cindex); - mindex = []; - iindex = []; - cindex = []; - sindex = yield indexer.ruleIndexGenDividend(HEAD, dal); - sindex = sindex.concat(yield indexer.ruleIndexGarbageSmallAccounts(HEAD, sindex, dal)); - - // Create/Update nodes in wotb - yield ctx.updateMembers(block); - - // --> Update links - yield dal.updateWotbLinks(local_cindex); - } - - // Trim the bindex - bindexSize = [ - block.issuersCount, - block.issuersFrame, - conf.medianTimeBlocks, - conf.dtDiffEval, - CONST_BLOCKS_CHUNK - ].reduce((max, value) => { - return Math.max(max, value); - }, 0); - - if (bindexSize && bindex.length >= 2 * bindexSize) { - // We trim it, not necessary to store it all (we already store the full blocks) - bindex.splice(0, bindexSize); - - // Process triming continuously to avoid super long ending of sync - yield dal.trimIndexes(bindex[0].number); - } - } else { - - if (blocksToSave.length) { - yield server.BlockchainService.saveBlocksInMainBranch(blocksToSave); - } - blocksToSave = []; - - // Save the INDEX - yield dal.bindexDAL.insertBatch(bindex); - yield dal.mindexDAL.insertBatch(mindex); - yield dal.iindexDAL.insertBatch(iindex); - yield dal.sindexDAL.insertBatch(sindex); - yield dal.cindexDAL.insertBatch(cindex); - - // Last block: cautious mode to trigger all the INDEX expiry mechanisms - yield dao.applyMainBranch(block); - } - } - if (blocksToSave.length) { - yield server.BlockchainService.saveBlocksInMainBranch(blocksToSave); - } - } - lastPullBlock = blocks[blocks.length - 1]; - watcher.appliedPercent(Math.floor(blocks[blocks.length - 1].number / to * 100)); - return true; - }), - - applyMainBranch: (block) => co(function *() { - const addedBlock = yield server.BlockchainService.submitBlock(block, true, constants.FORK_ALLOWED); - server.streamPush(addedBlock); - watcher.appliedPercent(Math.floor(block.number / to * 100)); - }), - - // Eventually remove forks later on - removeForks: () => co(function*() {}), - - // Tells wether given peer is a member peer - isMemberPeer: (thePeer) => co(function *() { - let idty = yield dal.getWrittenIdtyByPubkey(thePeer.pubkey); - return (idty && idty.member) || false; - }) - }); - - const logInterval = setInterval(() => logRemaining(to), EVAL_REMAINING_INTERVAL); - yield pulling.pull(conf, dao); - - // Finished blocks - watcher.downloadPercent(100.0); - watcher.appliedPercent(100.0); - - if (logInterval) { - clearInterval(logInterval); - } - - // Save currency parameters given by root block - const rootBlock = yield server.dal.getBlock(0); - yield BlockchainService.saveParametersForRootBlock(rootBlock); - server.dal.blockDAL.cleanCache(); - - //======= - // Peers - //======= - if (!nopeers && fullSync) { - watcher.writeStatus('Peers...'); - yield syncPeer(node); - const merkle = yield dal.merkleForPeers(); - const getPeers = node.getPeers.bind(node); - const json2 = yield getPeers({}); - const rm = new NodesMerkle(json2); - if(rm.root() != merkle.root()){ - const leavesToAdd = []; - const json = yield getPeers({ leaves: true }); - _(json.leaves).forEach((leaf) => { - if(merkle.leaves().indexOf(leaf) == -1){ - leavesToAdd.push(leaf); - } - }); - for (const leaf of leavesToAdd) { - try { - const json3 = yield getPeers({ "leaf": leaf }); - const jsonEntry = json3.leaf.value; - const sign = json3.leaf.value.signature; - const entry = {}; - ["version", "currency", "pubkey", "endpoints", "block"].forEach((key) => { - entry[key] = jsonEntry[key]; - }); - entry.signature = sign; - watcher.writeStatus('Peer ' + entry.pubkey); - yield PeeringService.submitP(entry, false, to === undefined); - } catch (e) { - logger.warn(e); - } - } - } - else { - watcher.writeStatus('Peers already known'); - } - } - - watcher.end(); - that.push({ sync: true }); - logger.info('Sync finished.'); - } catch (err) { - that.push({ sync: false, msg: err }); - err && watcher.writeStatus(err.message || (err.uerr && err.uerr.message) || String(err)); - watcher.end(); - throw err; - } - }); - - //============ - // Peer - //============ - function syncPeer (node) { - - // Global sync vars - const remotePeer = new Peer({}); - let remoteJsonPeer = {}; - - return co(function *() { - const json = yield node.getPeer(); - remotePeer.copyValuesFrom(json); - const entry = remotePeer.getRaw(); - const signature = dos2unix(remotePeer.signature); - // Parameters - if(!(entry && signature)){ - throw 'Requires a peering entry + signature'; - } - - remoteJsonPeer = json; - remoteJsonPeer.pubkey = json.pubkey; - let signatureOK = PeeringService.checkPeerSignature(remoteJsonPeer); - if (!signatureOK) { - watcher.writeStatus('Wrong signature for peer #' + remoteJsonPeer.pubkey); - } - try { - yield PeeringService.submitP(remoteJsonPeer); - } catch (err) { - if (err.indexOf !== undefined && err.indexOf(constants.ERRORS.NEWER_PEER_DOCUMENT_AVAILABLE.uerr.message) !== -1 && err != constants.ERROR.PEER.UNKNOWN_REFERENCE_BLOCK) { - throw err; - } - } - }); - } -} - -function NodesMerkle (json) { - - const that = this; - ["depth", "nodesCount", "leavesCount"].forEach(function (key) { - that[key] = json[key]; - }); - - this.merkleRoot = json.root; - - // var i = 0; - // this.levels = []; - // while(json && json.levels[i]){ - // this.levels.push(json.levels[i]); - // i++; - // } - - this.root = function () { - return this.merkleRoot; - }; -} - -function MultimeterWatcher() { - - const multi = multimeter(process); - const charm = multi.charm; - charm.on('^C', process.exit); - charm.reset(); - - multi.write('Progress:\n\n'); - - multi.write("Download: \n"); - const downloadBar = multi("Download: \n".length, 3, { - width : 20, - solid : { - text : '|', - foreground : 'white', - background : 'blue' - }, - empty : { text : ' ' } - }); - - multi.write("Apply: \n"); - const appliedBar = multi("Apply: \n".length, 4, { - width : 20, - solid : { - text : '|', - foreground : 'white', - background : 'blue' - }, - empty : { text : ' ' } - }); - - multi.write('\nStatus: '); - - let xPos, yPos; - charm.position( (x, y) => { - xPos = x; - yPos = y; - }); - - const writtens = []; - this.writeStatus = (str) => { - writtens.push(str); - //require('fs').writeFileSync('writtens.json', JSON.stringify(writtens)); - charm - .position(xPos, yPos) - .erase('end') - .write(str) - ; - }; - - this.downloadPercent = (pct) => downloadBar.percent(pct); - - this.appliedPercent = (pct) => appliedBar.percent(pct); - - this.end = () => { - multi.write('\nAll done.\n'); - multi.destroy(); - }; - - downloadBar.percent(0); - appliedBar.percent(0); -} - -function LoggerWatcher() { - - let downPct = 0, appliedPct = 0, lastMsg; - - this.showProgress = () => logger.info('Downloaded %s%, Applied %s%', downPct, appliedPct); - - this.writeStatus = (str) => { - if (str != lastMsg) { - lastMsg = str; - logger.info(str); - } - }; - - this.downloadPercent = (pct) => { - if (pct !== undefined) { - let changed = pct > downPct; - downPct = pct; - if (changed) this.showProgress(); - } - return downPct; - }; - - this.appliedPercent = (pct) => { - if (pct !== undefined) { - let changed = pct > appliedPct; - appliedPct = pct; - if (changed) this.showProgress(); - } - return appliedPct; - }; - - this.end = () => { - }; - -} - -function P2PDownloader(localNumber, to, toHash, peers, watcher) { - - const that = this; - const PARALLEL_PER_CHUNK = 1; - const MAX_DELAY_PER_DOWNLOAD = 15000; - const NO_NODES_AVAILABLE = "No node available for download"; - const TOO_LONG_TIME_DOWNLOAD = "No answer after " + MAX_DELAY_PER_DOWNLOAD + "ms, will retry download later."; - const nbBlocksToDownload = Math.max(0, to - localNumber); - const numberOfChunksToDownload = Math.ceil(nbBlocksToDownload / CONST_BLOCKS_CHUNK); - const chunks = Array.from({ length: numberOfChunksToDownload }).map(() => null); - const processing = Array.from({ length: numberOfChunksToDownload }).map(() => false); - const handler = Array.from({ length: numberOfChunksToDownload }).map(() => null); - const resultsDeferers = Array.from({ length: numberOfChunksToDownload }).map(() => null); - const resultsData = Array.from({ length: numberOfChunksToDownload }).map((unused, index) => new Promise((resolve, reject) => { - resultsDeferers[index] = { resolve, reject }; - })); - - // Create slots of download, in a ready stage - let downloadSlots = Math.min(INITIAL_DOWNLOAD_SLOTS, peers.length); - - let nodes = {}; - - let nbDownloadsTried = 0, nbDownloading = 0; - let lastAvgDelay = MAX_DELAY_PER_DOWNLOAD; - let aSlotWasAdded = false; - - /** - * Get a list of P2P nodes to use for download. - * If a node is not yet correctly initialized (we can test a node before considering it good for downloading), then - * this method would not return it. - */ - const getP2Pcandidates = () => co(function*() { - let promises = peers.reduce((chosens, other, index) => { - if (!nodes[index]) { - // Create the node - let p = new Peer(peers[index]); - nodes[index] = makeQuerablePromise(co(function*() { - // We wait for the download process to be triggered - // yield downloadStarter; - // if (nodes[index - 1]) { - // try { yield nodes[index - 1]; } catch (e) {} - // } - const node = yield p.connect(); - // We initialize nodes with the near worth possible notation - node.tta = 1; - node.nbSuccess = 0; - return node; - })); - chosens.push(nodes[index]); - } else { - chosens.push(nodes[index]); - } - // Continue - return chosens; - }, []); - let candidates = yield promises; - candidates.forEach((c) => { - c.tta = c.tta || 0; // By default we say a node is super slow to answer - c.ttas = c.ttas || []; // Memorize the answer delays - }); - if (candidates.length === 0) { - throw NO_NODES_AVAILABLE; - } - // We remove the nodes impossible to reach (timeout) - let withGoodDelays = _.filter(candidates, (c) => c.tta <= MAX_DELAY_PER_DOWNLOAD); - if (withGoodDelays.length === 0) { - // No node can be reached, we can try to lower the number of nodes on which we download - downloadSlots = Math.floor(downloadSlots / 2); - // We reinitialize the nodes - nodes = {}; - // And try it all again - return getP2Pcandidates(); - } - const parallelMax = Math.min(PARALLEL_PER_CHUNK, withGoodDelays.length); - withGoodDelays = _.sortBy(withGoodDelays, (c) => c.tta); - withGoodDelays = withGoodDelays.slice(0, parallelMax); - // We temporarily augment the tta to avoid asking several times to the same node in parallel - withGoodDelays.forEach((c) => c.tta = MAX_DELAY_PER_DOWNLOAD); - return withGoodDelays; - }); - - /** - * Download a chunk of blocks using P2P network through BMA API. - * @param from The starting block to download - * @param count The number of blocks to download. - * @param chunkIndex The # of the chunk in local algorithm (logging purposes only) - */ - const p2pDownload = (from, count, chunkIndex) => co(function*() { - let candidates = yield getP2Pcandidates(); - // Book the nodes - return yield raceOrCancelIfTimeout(MAX_DELAY_PER_DOWNLOAD, candidates.map((node) => co(function*() { - try { - const start = Date.now(); - handler[chunkIndex] = node; - node.downloading = true; - nbDownloading++; - watcher.writeStatus('Getting chunck #' + chunkIndex + '/' + (numberOfChunksToDownload - 1) + ' from ' + from + ' to ' + (from + count - 1) + ' on peer ' + [node.host, node.port].join(':')); - let blocks = yield node.getBlocks(count, from); - node.ttas.push(Date.now() - start); - // Only keep a flow of 5 ttas for the node - if (node.ttas.length > 5) node.ttas.shift(); - // Average time to answer - node.tta = Math.round(node.ttas.reduce((sum, tta) => sum + tta, 0) / node.ttas.length); - watcher.writeStatus('GOT chunck #' + chunkIndex + '/' + (numberOfChunksToDownload - 1) + ' from ' + from + ' to ' + (from + count - 1) + ' on peer ' + [node.host, node.port].join(':')); - node.nbSuccess++; - - // Opening/Closing slots depending on the Interne connection - if (slots.length == downloadSlots) { - const peers = yield Object.values(nodes); - const downloading = _.filter(peers, (p) => p.downloading && p.ttas.length); - const currentAvgDelay = downloading.reduce((sum, c) => { - const tta = Math.round(c.ttas.reduce((sum, tta) => sum + tta, 0) / c.ttas.length); - return sum + tta; - }, 0) / downloading.length; - // Check the impact of an added node (not first time) - if (!aSlotWasAdded) { - // We try to add a node - const newValue = Math.min(peers.length, downloadSlots + 1); - if (newValue !== downloadSlots) { - downloadSlots = newValue; - aSlotWasAdded = true; - logger.info('AUGMENTED DOWNLOAD SLOTS! Now has %s slots', downloadSlots); - } - } else { - aSlotWasAdded = false; - const decelerationPercent = currentAvgDelay / lastAvgDelay - 1; - const addedNodePercent = 1 / nbDownloading; - logger.info('Deceleration = %s (%s/%s), AddedNodePercent = %s', decelerationPercent, currentAvgDelay, lastAvgDelay, addedNodePercent); - if (decelerationPercent > addedNodePercent) { - downloadSlots = Math.max(1, downloadSlots - 1); // We reduce the number of slots, but we keep at least 1 slot - logger.info('REDUCED DOWNLOAD SLOT! Now has %s slots', downloadSlots); - } - } - lastAvgDelay = currentAvgDelay; - } - - nbDownloadsTried++; - nbDownloading--; - node.downloading = false; - - return blocks; - } catch (e) { - nbDownloading--; - node.downloading = false; - nbDownloadsTried++; - node.ttas.push(MAX_DELAY_PER_DOWNLOAD + 1); // No more ask on this node - // Average time to answer - node.tta = Math.round(node.ttas.reduce((sum, tta) => sum + tta, 0) / node.ttas.length); - throw e; - } - }))); - }); - - /** - * Function for downloading a chunk by its number. - * @param index Number of the chunk. - */ - const downloadChunk = (index) => co(function*() { - // The algorithm to download a chunk - const from = localNumber + 1 + index * CONST_BLOCKS_CHUNK; - let count = CONST_BLOCKS_CHUNK; - if (index == numberOfChunksToDownload - 1) { - count = nbBlocksToDownload % CONST_BLOCKS_CHUNK || CONST_BLOCKS_CHUNK; - } - try { - return yield p2pDownload(from, count, index); - } catch (e) { - logger.error(e); - return downloadChunk(index); - } - }); - - const slots = []; - const downloads = {}; - - /** - * Utility function that starts a race between promises but cancels it if no answer is found before `timeout` - * @param timeout - * @param races - * @returns {Promise} - */ - const raceOrCancelIfTimeout = (timeout, races) => { - return Promise.race([ - // Process the race, but cancel it if we don't get an anwser quickly enough - new Promise((resolve, reject) => { - setTimeout(() => { - reject(TOO_LONG_TIME_DOWNLOAD); - }, MAX_DELAY_PER_DOWNLOAD); - }) - ].concat(races)); - }; - - /** - * Triggers for starting the download. - */ - let startResolver; - const downloadStarter = new Promise((resolve) => startResolver = resolve); - - const chainsCorrectly = (blocks, index) => co(function*() { - - if (!blocks.length) { - logger.error('No block was downloaded'); - return false; - } - - for (let i = blocks.length - 1; i > 0; i--) { - if (blocks[i].number !== blocks[i - 1].number + 1 || blocks[i].previousHash !== blocks[i - 1].hash) { - logger.error("Blocks do not chaing correctly", blocks[i].number); - return false; - } - if (blocks[i].version != blocks[i - 1].version && blocks[i].version != blocks[i - 1].version + 1) { - logger.error("Version cannot be downgraded", blocks[i].number); - return false; - } - } - - // Check hashes - for (let i = 0; i < blocks.length; i++) { - // Note: the hash, in Duniter, is made only on the **signing part** of the block: InnerHash + Nonce - if (blocks[i].version >= 6) { - for (const tx of blocks[i].transactions) { - tx.version = constants.TRANSACTION_VERSION; - } - } - if (blocks[i].inner_hash !== hashf(rawer.getBlockInnerPart(blocks[i])).toUpperCase()) { - logger.error("Inner hash of block#%s from %s does not match", blocks[i].number); - return false; - } - if (blocks[i].hash !== hashf(rawer.getBlockInnerHashAndNonceWithSignature(blocks[i])).toUpperCase()) { - logger.error("Hash of block#%s from %s does not match", blocks[i].number); - return false; - } - } - - const lastBlockOfChunk = blocks[blocks.length - 1]; - if (lastBlockOfChunk.number == to && lastBlockOfChunk.hash != toHash) { - // Top chunk - logger.error('Top block is not on the right chain'); - return false; - } else { - // Chaining between downloads - const previousChunk = yield that.getChunk(index + 1); - const blockN = blocks[blocks.length - 1]; // The block n - const blockNp1 = previousChunk[0]; // The block n + 1 - if (blockN && blockNp1 && (blockN.number + 1 !== blockNp1.number || blockN.hash != blockNp1.previousHash)) { - logger.error('Chunk is not referenced by the upper one'); - return false; - } - } - return true; - }); - - /** - * Download worker - * @type {*|Promise} When finished. - */ - co(function*() { - try { - yield downloadStarter; - let doneCount = 0, resolvedCount = 0; - while (resolvedCount < chunks.length) { - doneCount = 0; - resolvedCount = 0; - // Add as much possible downloads as possible, and count the already done ones - for (let i = chunks.length - 1; i >= 0; i--) { - if (chunks[i] === null && !processing[i] && slots.indexOf(i) === -1 && slots.length < downloadSlots) { - slots.push(i); - processing[i] = true; - downloads[i] = makeQuerablePromise(downloadChunk(i)); // Starts a new download - } else if (downloads[i] && downloads[i].isFulfilled() && processing[i]) { - doneCount++; - } - // We count the number of perfectly downloaded & validated chunks - if (chunks[i]) { - resolvedCount++; - } - } - watcher.downloadPercent(Math.round(doneCount / numberOfChunksToDownload * 100)); - let races = slots.map((i) => downloads[i]); - if (races.length) { - try { - yield raceOrCancelIfTimeout(MAX_DELAY_PER_DOWNLOAD, races); - } catch (e) { - logger.warn(e); - } - for (let i = 0; i < slots.length; i++) { - // We must know the index of what resolved/rejected to free the slot - const doneIndex = slots.reduce((found, realIndex, index) => { - if (found !== null) return found; - if (downloads[realIndex].isFulfilled()) return index; - return null; - }, null); - if (doneIndex !== null) { - const realIndex = slots[doneIndex]; - if (downloads[realIndex].isResolved()) { - co(function*() { - const blocks = yield downloads[realIndex]; - if (realIndex < chunks.length - 1) { - // We must wait for NEXT blocks to be STRONGLY validated before going any further, otherwise we - // could be on the wrong chain - yield that.getChunk(realIndex + 1); - } - const chainsWell = yield chainsCorrectly(blocks, realIndex); - if (chainsWell) { - // Chunk is COMPLETE - logger.warn("Chunk #%s is COMPLETE from %s", realIndex, [handler[realIndex].host, handler[realIndex].port].join(':')); - chunks[realIndex] = blocks; - resultsDeferers[realIndex].resolve(chunks[realIndex]); - } else { - logger.warn("Chunk #%s DOES NOT CHAIN CORRECTLY from %s", realIndex, [handler[realIndex].host, handler[realIndex].port].join(':')); - // Penality on this node to avoid its usage - handler[realIndex].tta += MAX_DELAY_PER_DOWNLOAD; - // Need a retry - processing[realIndex] = false; - } - }); - } else { - processing[realIndex] = false; // Need a retry - } - slots.splice(doneIndex, 1); - } - } - } - // Wait a bit - yield new Promise((resolve, reject) => setTimeout(resolve, 10)); - } - } catch (e) { - logger.error('Fatal error in the downloader:'); - logger.error(e); - } - }); - - /** - * PUBLIC API - */ - - /*** - * Triggers the downloading - */ - this.start = () => startResolver(); - - /*** - * Promises a chunk to be downloaded and returned - * @param index The number of the chunk to download & return - */ - this.getChunk = (index) => resultsData[index] || Promise.resolve([]); -} - -util.inherits(Synchroniser, stream.Duplex); diff --git a/app/modules/crawler.js b/app/modules/crawler.js deleted file mode 100644 index 463ccefc38b0eebe697902fc0577d187e92328ca..0000000000000000000000000000000000000000 --- a/app/modules/crawler.js +++ /dev/null @@ -1,440 +0,0 @@ -"use strict"; - -const _ = require('underscore'); -const Q = require('q'); -const co = require('co'); -const async = require('async'); -const constants = require('../lib/constants'); -const Peer = require('../lib/entity/peer'); -const querablep = require('../lib/querablep'); -const pulling = require('../lib/pulling'); -const Transaction = require('../lib/entity/transaction'); - -module.exports = { - duniter: { - service: { - neutral: (server, conf, logger) => new Crawler(server, conf, logger) - }, - - methods: { - - pullBlocks: (server, pubkey) => co(function*() { - const crawler = new Crawler(server, server.conf, server.logger); - return crawler.pullBlocks(server, pubkey); - }) - } - } -} - -/** - * Service which triggers the server's peering generation (actualization of the Peer document). - * @constructor - */ -function Crawler(server, conf, logger) { - - const peerCrawler = new PeerCrawler(server, conf, logger); - const peerTester = new PeerTester(server, conf, logger); - const blockCrawler = new BlockCrawler(server, logger); - - this.pullBlocks = blockCrawler.pullBlocks; - - this.startService = () => co(function*() { - return yield [ - peerCrawler.startService(), - peerTester.startService(), - blockCrawler.startService() - ]; - }); - - this.stopService = () => co(function*() { - return yield [ - peerCrawler.stopService(), - peerTester.stopService(), - blockCrawler.stopService() - ]; - }); -} - -function PeerCrawler(server, conf, logger) { - - const DONT_IF_MORE_THAN_FOUR_PEERS = true; - - let crawlPeersInterval = null; - - const crawlPeersFifo = async.queue((task, callback) => task(callback), 1); - - this.startService = () => co(function*() { - if (crawlPeersInterval) - clearInterval(crawlPeersInterval); - crawlPeersInterval = setInterval(() => crawlPeersFifo.push(() => crawlPeers(server, conf)), 1000 * conf.avgGenTime * constants.NETWORK.SYNC_PEERS_INTERVAL); - yield crawlPeers(server, conf, DONT_IF_MORE_THAN_FOUR_PEERS); - }); - - this.stopService = () => co(function*() { - crawlPeersFifo.kill(); - clearInterval(crawlPeersInterval); - }); - - const crawlPeers = (server, conf, dontCrawlIfEnoughPeers) => { - logger.info('Crawling the network...'); - return co(function *() { - const peers = yield server.dal.listAllPeersWithStatusNewUPWithtout(conf.pair.pub); - if (peers.length > constants.NETWORK.COUNT_FOR_ENOUGH_PEERS && dontCrawlIfEnoughPeers == DONT_IF_MORE_THAN_FOUR_PEERS) { - return; - } - let peersToTest = peers.slice().map((p) => Peer.statics.peerize(p)); - let tested = []; - const found = []; - while (peersToTest.length > 0) { - const results = yield peersToTest.map((p) => crawlPeer(server, p)); - tested = tested.concat(peersToTest.map((p) => p.pubkey)); - // End loop condition - peersToTest.splice(0); - // Eventually continue the loop - for (let i = 0, len = results.length; i < len; i++) { - const res = results[i]; - for (let j = 0, len2 = res.length; j < len2; j++) { - try { - const subpeer = res[j].leaf.value; - if (subpeer.currency && tested.indexOf(subpeer.pubkey) === -1) { - const p = Peer.statics.peerize(subpeer); - peersToTest.push(p); - found.push(p); - } - } catch (e) { - logger.warn('Invalid peer %s', res[j]); - } - } - } - // Make unique list - peersToTest = _.uniq(peersToTest, false, (p) => p.pubkey); - } - logger.info('Crawling done.'); - for (let i = 0, len = found.length; i < len; i++) { - let p = found[i]; - try { - // Try to write it - p.documentType = 'peer'; - yield server.singleWritePromise(p); - } catch(e) { - // Silent error - } - } - }); - }; - - const crawlPeer = (server, aPeer) => co(function *() { - let subpeers = []; - try { - logger.debug('Crawling peers of %s %s', aPeer.pubkey.substr(0, 6), aPeer.getNamedURL()); - const node = yield aPeer.connect(); - yield checkPeerValidity(server, aPeer, node); - //let remotePeer = yield Q.nbind(node.network.peering.get)(); - const json = yield node.getPeers.bind(node)({ leaves: true }); - for (let i = 0, len = json.leaves.length; i < len; i++) { - let leaf = json.leaves[i]; - let subpeer = yield node.getPeers.bind(node)({ leaf: leaf }); - subpeers.push(subpeer); - } - return subpeers; - } catch (e) { - return subpeers; - } - }); -} - -function PeerTester(server, conf, logger) { - - const FIRST_CALL = true; - - const testPeerFifo = async.queue((task, callback) => task(callback), 1); - let testPeerFifoInterval = null; - - this.startService = () => co(function*() { - if (testPeerFifoInterval) - clearInterval(testPeerFifoInterval); - testPeerFifoInterval = setInterval(() => testPeerFifo.push(testPeers.bind(null, server, conf, !FIRST_CALL)), 1000 * constants.NETWORK.TEST_PEERS_INTERVAL); - yield testPeers(server, conf, FIRST_CALL); - }); - - this.stopService = () => co(function*() { - clearInterval(testPeerFifoInterval); - testPeerFifo.kill(); - }); - - const testPeers = (server, conf, displayDelays) => co(function *() { - let peers = yield server.dal.listAllPeers(); - let now = (new Date().getTime()); - peers = _.filter(peers, (p) => p.pubkey != conf.pair.pub); - for (let i = 0, len = peers.length; i < len; i++) { - let p = new Peer(peers[i]); - if (p.status == 'DOWN') { - let shouldDisplayDelays = displayDelays; - let downAt = p.first_down || now; - let waitRemaining = getWaitRemaining(now, downAt, p.last_try); - let nextWaitRemaining = getWaitRemaining(now, downAt, now); - let testIt = waitRemaining <= 0; - if (testIt) { - // We try to reconnect only with peers marked as DOWN - try { - logger.trace('Checking if node %s is UP... (%s:%s) ', p.pubkey.substr(0, 6), p.getHostPreferDNS(), p.getPort()); - // We register the try anyway - yield server.dal.setPeerDown(p.pubkey); - // Now we test - let node = yield p.connect(); - let peering = yield node.getPeer(); - yield checkPeerValidity(server, p, node); - // The node answered, it is no more DOWN! - logger.info('Node %s (%s:%s) is UP!', p.pubkey.substr(0, 6), p.getHostPreferDNS(), p.getPort()); - yield server.dal.setPeerUP(p.pubkey); - // We try to forward its peering entry - let sp1 = peering.block.split('-'); - let currentBlockNumber = sp1[0]; - let currentBlockHash = sp1[1]; - let sp2 = peering.block.split('-'); - let blockNumber = sp2[0]; - let blockHash = sp2[1]; - if (!(currentBlockNumber == blockNumber && currentBlockHash == blockHash)) { - // The peering changed - yield server.PeeringService.submitP(peering); - } - // Do not need to display when next check will occur: the node is now UP - shouldDisplayDelays = false; - } catch (err) { - // Error: we set the peer as DOWN - logger.trace("Peer %s is DOWN (%s)", p.pubkey, (err.httpCode && 'HTTP ' + err.httpCode) || err.code || err.message || err); - yield server.dal.setPeerDown(p.pubkey); - shouldDisplayDelays = true; - } - } - if (shouldDisplayDelays) { - logger.debug('Will check that node %s (%s:%s) is UP in %s min...', p.pubkey.substr(0, 6), p.getHostPreferDNS(), p.getPort(), (nextWaitRemaining / 60).toFixed(0)); - } - } - } - }); - - function getWaitRemaining(now, downAt, last_try) { - let downDelay = Math.floor((now - downAt) / 1000); - let waitedSinceLastTest = Math.floor((now - (last_try || now)) / 1000); - let waitRemaining = 1; - if (downDelay <= constants.DURATIONS.A_MINUTE) { - waitRemaining = constants.DURATIONS.TEN_SECONDS - waitedSinceLastTest; - } - else if (downDelay <= constants.DURATIONS.TEN_MINUTES) { - waitRemaining = constants.DURATIONS.A_MINUTE - waitedSinceLastTest; - } - else if (downDelay <= constants.DURATIONS.AN_HOUR) { - waitRemaining = constants.DURATIONS.TEN_MINUTES - waitedSinceLastTest; - } - else if (downDelay <= constants.DURATIONS.A_DAY) { - waitRemaining = constants.DURATIONS.AN_HOUR - waitedSinceLastTest; - } - else if (downDelay <= constants.DURATIONS.A_WEEK) { - waitRemaining = constants.DURATIONS.A_DAY - waitedSinceLastTest; - } - else if (downDelay <= constants.DURATIONS.A_MONTH) { - waitRemaining = constants.DURATIONS.A_WEEK - waitedSinceLastTest; - } - // Else do not check it, DOWN for too long - return waitRemaining; - } -} - -function BlockCrawler(server, logger) { - - const CONST_BLOCKS_CHUNK = 50; - - const programStart = Date.now(); - - let pullingActualIntervalDuration = constants.PULLING_MINIMAL_DELAY; - const syncBlockFifo = async.queue((task, callback) => task(callback), 1); - let syncBlockInterval = null; - - this.startService = () => co(function*() { - if (syncBlockInterval) - clearInterval(syncBlockInterval); - syncBlockInterval = setInterval(() => syncBlockFifo.push(() => syncBlock(server)), 1000 * pullingActualIntervalDuration); - syncBlock(server); - }); - - this.stopService = () => co(function*() { - clearInterval(syncBlockInterval); - syncBlockFifo.kill(); - }); - - this.pullBlocks = syncBlock; - - function syncBlock(server, pubkey) { - - // Eventually change the interval duration - const minutesElapsed = Math.ceil((Date.now() - programStart) / (60 * 1000)); - const FACTOR = Math.sin((minutesElapsed / constants.PULLING_INTERVAL_TARGET) * (Math.PI / 2)); - // Make the interval always higher than before - const pullingTheoreticalIntervalNow = Math.max(parseInt(Math.max(FACTOR * constants.PULLING_INTERVAL_TARGET, constants.PULLING_MINIMAL_DELAY)), pullingActualIntervalDuration); - if (pullingTheoreticalIntervalNow !== pullingActualIntervalDuration) { - pullingActualIntervalDuration = pullingTheoreticalIntervalNow; - // Change the interval - if (syncBlockInterval) - clearInterval(syncBlockInterval); - syncBlockInterval = setInterval(() => syncBlockFifo.push(() => syncBlock(server)), 1000 * pullingActualIntervalDuration); - } - - return co(function *() { - try { - let current = yield server.dal.getCurrentBlockOrNull(); - if (current) { - pullingEvent(server, 'start', current.number); - logger && logger.info("Pulling blocks from the network..."); - let peers = yield server.dal.findAllPeersNEWUPBut([server.conf.pair.pub]); - peers = _.shuffle(peers); - if (pubkey) { - _(peers).filter((p) => p.pubkey == pubkey); - } - // Shuffle the peers - peers = _.shuffle(peers); - // Only take at max X of them - peers = peers.slice(0, constants.MAX_NUMBER_OF_PEERS_FOR_PULLING); - for (let i = 0, len = peers.length; i < len; i++) { - let p = new Peer(peers[i]); - pullingEvent(server, 'peer', _.extend({number: i, length: peers.length}, p)); - logger && logger.trace("Try with %s %s", p.getURL(), p.pubkey.substr(0, 6)); - try { - let node = yield p.connect(); - node.pubkey = p.pubkey; - yield checkPeerValidity(server, p, node); - let lastDownloaded; - let dao = pulling.abstractDao({ - - // Get the local blockchain current block - localCurrent: () => server.dal.getCurrentBlockOrNull(), - - // Get the remote blockchain (bc) current block - remoteCurrent: (thePeer) => thePeer.getCurrent(), - - // Get the remote peers to be pulled - remotePeers: () => Q([node]), - - // Get block of given peer with given block number - getLocalBlock: (number) => server.dal.getBlock(number), - - // Get block of given peer with given block number - getRemoteBlock: (thePeer, number) => co(function *() { - let block = null; - try { - block = yield thePeer.getBlock(number); - Transaction.statics.cleanSignatories(block.transactions); - } catch (e) { - if (e.httpCode != 404) { - throw e; - } - } - return block; - }), - - // Simulate the adding of a single new block on local blockchain - applyMainBranch: (block) => co(function *() { - let addedBlock = yield server.BlockchainService.submitBlock(block, true, constants.FORK_ALLOWED); - if (!lastDownloaded) { - lastDownloaded = yield dao.remoteCurrent(node); - } - pullingEvent(server, 'applying', {number: block.number, last: lastDownloaded.number}); - if (addedBlock) { - current = addedBlock; - server.streamPush(addedBlock); - } - }), - - // Eventually remove forks later on - removeForks: () => Q(), - - // Tells wether given peer is a member peer - isMemberPeer: (thePeer) => co(function *() { - let idty = yield server.dal.getWrittenIdtyByPubkey(thePeer.pubkey); - return (idty && idty.member) || false; - }), - - // Simulates the downloading of blocks from a peer - downloadBlocks: (thePeer, fromNumber, count) => co(function*() { - if (!count) { - count = CONST_BLOCKS_CHUNK; - } - let blocks = yield thePeer.getBlocks(count, fromNumber); - // Fix for #734 - for (const block of blocks) { - for (const tx of block.transactions) { - tx.version = constants.TRANSACTION_VERSION; - } - } - return blocks; - }) - }); - - yield pulling.pull(server.conf, dao); - } catch (e) { - if (isConnectionError(e)) { - logger && logger.info("Peer %s unreachable: now considered as DOWN.", p.pubkey); - yield server.dal.setPeerDown(p.pubkey); - } - else if (e.httpCode == 404) { - logger && logger.trace("No new block from %s %s", p.pubkey.substr(0, 6), p.getURL()); - } - else { - logger && logger.warn(e); - } - } - } - pullingEvent(server, 'end', current.number); - } - logger && logger.info('Will pull blocks from the network in %s min %s sec', Math.floor(pullingActualIntervalDuration / 60), Math.floor(pullingActualIntervalDuration % 60)); - } catch(err) { - pullingEvent(server, 'error'); - logger && logger.warn(err.code || err.stack || err.message || err); - } - }); - } - - function pullingEvent(server, type, number) { - server.push({ - pulling: { - type: type, - data: number - } - }); - } - - function isConnectionError(err) { - return err && ( - err.code == "E_DUNITER_PEER_CHANGED" - || err.code == "EINVAL" - || err.code == "ECONNREFUSED" - || err.code == "ETIMEDOUT" - || (err.httpCode !== undefined && err.httpCode !== 404)); - } -} - -const checkPeerValidity = (server, p, node) => co(function *() { - try { - let document = yield node.getPeer(); - let thePeer = Peer.statics.peerize(document); - let goodSignature = server.PeeringService.checkPeerSignature(thePeer); - if (!goodSignature) { - throw 'Signature from a peer must match'; - } - if (p.currency !== thePeer.currency) { - throw 'Currency has changed from ' + p.currency + ' to ' + thePeer.currency; - } - if (p.pubkey !== thePeer.pubkey) { - throw 'Public key of the peer has changed from ' + p.pubkey + ' to ' + thePeer.pubkey; - } - let sp1 = p.block.split('-'); - let sp2 = thePeer.block.split('-'); - let blockNumber1 = parseInt(sp1[0]); - let blockNumber2 = parseInt(sp2[0]); - if (blockNumber2 < blockNumber1) { - throw 'Signature date has changed from block ' + blockNumber1 + ' to older block ' + blockNumber2; - } - } catch (e) { - throw { code: "E_DUNITER_PEER_CHANGED" }; - } -}); diff --git a/app/modules/peer.js b/app/modules/peer.js deleted file mode 100644 index 1ebf00e96a6b5d94cdd114b3e381403fdf29ede7..0000000000000000000000000000000000000000 --- a/app/modules/peer.js +++ /dev/null @@ -1,43 +0,0 @@ -"use strict"; - -const Q = require('q'); -const co = require('co'); -const multicaster = require('../lib/streams/multicaster'); -const contacter = require('../lib/contacter'); -const Peer = require('../lib/entity/peer'); - -const ERASE_IF_ALREADY_RECORDED = true; - -module.exports = { - duniter: { - cli: [{ - name: 'peer [host] [port]', - desc: 'Exchange peerings with another node', - onDatabaseExecute: (server, conf, program, params) => co(function*() { - const host = params[0]; - const port = params[1]; - const logger = server.logger; - try { - logger.info('Fetching peering record at %s:%s...', host, port); - let peering = yield contacter.statics.fetchPeer(host, port); - logger.info('Apply peering ...'); - yield server.PeeringService.submitP(peering, ERASE_IF_ALREADY_RECORDED, !program.nocautious); - logger.info('Applied'); - let selfPeer = yield server.dal.getPeer(server.PeeringService.pubkey); - if (!selfPeer) { - yield Q.nfcall(server.PeeringService.generateSelfPeer, server.conf, 0); - selfPeer = yield server.dal.getPeer(server.PeeringService.pubkey); - } - logger.info('Send self peering ...'); - const caster = multicaster(); - yield caster.sendPeering(Peer.statics.peerize(peering), Peer.statics.peerize(selfPeer)); - logger.info('Sent.'); - yield server.disconnect(); - } catch(e) { - logger.error(e.code || e.message || e); - throw Error("Exiting"); - } - }) - }] - } -} diff --git a/app/modules/synchronization.js b/app/modules/synchronization.js deleted file mode 100644 index 5c58eba2635a870cca0901afe0b2c49193e68a2b..0000000000000000000000000000000000000000 --- a/app/modules/synchronization.js +++ /dev/null @@ -1,34 +0,0 @@ -"use strict"; - -const co = require('co'); - -module.exports = { - duniter: { - cli: [{ - name: 'sync [host] [port] [to]', - desc: 'Synchronize blockchain from a remote Duniter node', - onDatabaseExecute: (server, conf, program, params) => co(function*() { - const host = params[0]; - const port = params[1]; - const to = params[2]; - if (!host) { - throw 'Host is required.'; - } - if (!port) { - throw 'Port is required.'; - } - let cautious; - if (program.nocautious) { - cautious = false; - } - if (program.cautious) { - cautious = true; - } - yield server.synchronize(host, port, parseInt(to), 0, !program.nointeractive, cautious, program.nopeers, program.noshuffle); - if (server) { - yield server.disconnect(); - } - }) - }] - } -} diff --git a/app/service/BlockchainService.js b/app/service/BlockchainService.js index 9be39b22b8b5fc72bb3a399523024ce8b53f3bb7..2e6ed4c41b7a071a3b53766dce9be834f5e7ee4f 100644 --- a/app/service/BlockchainService.js +++ b/app/service/BlockchainService.js @@ -179,8 +179,8 @@ function BlockchainService (server) { const eventuallySwitchOnSideChain = (current) => co(function *() { const branches = yield that.branches(); - const blocksAdvance = constants.BRANCHES.SWITCH_ON_BRANCH_AHEAD_BY_X_MINUTES / (conf.avgGenTime / 60); - const timeAdvance = constants.BRANCHES.SWITCH_ON_BRANCH_AHEAD_BY_X_MINUTES * 60; + const blocksAdvance = conf.swichOnTimeAheadBy / (conf.avgGenTime / 60); + const timeAdvance = conf.swichOnTimeAheadBy * 60; let potentials = _.without(branches, current); // We switch only to blockchain with X_MIN advance considering both theoretical time by block + written time potentials = _.filter(potentials, (p) => p.number - current.number >= blocksAdvance diff --git a/index.js b/index.js index b6737e3986ce3f2845762f7bc23799a6f2e50a85..0bba06f98d01ff59b70461596c7addc484578d89 100644 --- a/index.js +++ b/index.js @@ -15,16 +15,14 @@ const dkeypairDependency = require('duniter-keypair'); const configDependency = require('./app/modules/config'); const wizardDependency = require('./app/modules/wizard'); const genDependency = require('./app/modules/gen'); -const syncDependency = require('./app/modules/synchronization'); const resetDependency = require('./app/modules/reset'); const checkConfDependency = require('./app/modules/check-config'); const exportBcDependency = require('./app/modules/export-bc'); const reapplyDependency = require('./app/modules/reapply'); const revertDependency = require('./app/modules/revert'); -const peerDependency = require('./app/modules/peer'); const daemonDependency = require('./app/modules/daemon'); const pSignalDependency = require('./app/modules/peersignal'); -const crawlerDependency = require('./app/modules/crawler'); +const crawlerDependency = require('duniter-crawler'); const proverDependency = require('./app/modules/prover'); const bmapiDependency = require('duniter-bma'); const routerDependency = require('./app/modules/router'); @@ -34,7 +32,6 @@ const MINIMAL_DEPENDENCIES = [ ]; const DEFAULT_DEPENDENCIES = MINIMAL_DEPENDENCIES.concat([ - { name: 'duniter-sync', required: syncDependency }, { name: 'duniter-wizard', required: wizardDependency }, { name: 'duniter-gen', required: genDependency }, { name: 'duniter-reset', required: resetDependency }, @@ -42,7 +39,6 @@ const DEFAULT_DEPENDENCIES = MINIMAL_DEPENDENCIES.concat([ { name: 'duniter-exportbc', required: exportBcDependency }, { name: 'duniter-reapply', required: reapplyDependency }, { name: 'duniter-revert', required: revertDependency }, - { name: 'duniter-peer', required: peerDependency }, { name: 'duniter-daemon', required: daemonDependency }, { name: 'duniter-psignal', required: pSignalDependency }, { name: 'duniter-crawler', required: crawlerDependency }, diff --git a/package.json b/package.json index fe9ee7dc93439c5fe8681c044378dab824adfb96..2d1374520d9cbcdb2aefc702be85dc9145f7cd26 100644 --- a/package.json +++ b/package.json @@ -42,14 +42,14 @@ "colors": "1.1.2", "commander": "2.9.0", "daemonize2": "0.4.2", - "duniter-keypair": "duniter/duniter-keypair", "duniter-bma": "duniter/duniter-bma", + "duniter-crawler": "duniter/duniter-crawler", + "duniter-keypair": "duniter/duniter-keypair", "event-stream": "3.3.4", "inquirer": "0.8.5", "jison": "0.4.17", "merkle": "0.5.1", "moment": "2.15.1", - "multimeter": "0.1.1", "naclb": "1.3.7", "node-pre-gyp": "0.6.32", "node-uuid": "1.4.7", @@ -57,6 +57,7 @@ "parallelshell": "2.0.0", "q": "1.4.1", "q-io": "1.13.2", + "querablep": "^0.1.0", "request": "2.75.0", "request-promise": "4.1.1", "sha1": "1.1.1", diff --git a/server.js b/server.js index f9566ccbf26e9cabf4b973ff3f43876378ab0ff3..768bdb7ed9599a2c70a72a162a950a7225af2f9c 100644 --- a/server.js +++ b/server.js @@ -15,7 +15,6 @@ const jsonpckg = require('./package.json'); const keyring = require('./app/lib/crypto/keyring'); const directory = require('./app/lib/system/directory'); const dos2unix = require('./app/lib/system/dos2unix'); -const Synchroniser = require('./app/lib/sync'); const rawer = require('./app/lib/ucp/rawer'); function Server (home, memoryOnly, overrideConf) { @@ -43,11 +42,13 @@ function Server (home, memoryOnly, overrideConf) { that.lib.rawer = require('./app/lib/ucp/rawer'); that.lib.http2raw = require('duniter-bma').duniter.methods.http2raw; that.lib.dos2unix = require('./app/lib/system/dos2unix'); - that.lib.contacter = require('./app/lib/contacter'); + that.lib.contacter = require('duniter-crawler').duniter.methods.contacter; that.lib.bma = require('duniter-bma').duniter.methods.bma; that.lib.network = require('./app/lib/system/network'); that.lib.constants = require('./app/lib/constants'); that.lib.ucp = require('./app/lib/ucp/buid'); + that.lib.hashf = require('./app/lib/ucp/hashf'); + that.lib.indexer = require('./app/lib/dup/indexer'); that.MerkleService = require("./app/lib/helpers/merkle"); that.IdentityService = require('./app/service/IdentityService')(); @@ -352,35 +353,6 @@ function Server (home, memoryOnly, overrideConf) { this.singleWritePromise = (obj) => that.submit(obj); - /** - * Synchronize the server with another server. - * - * If local server's blockchain is empty, process a fast sync: **no block is verified in such a case**, unless - * you force value `askedCautious` to true. - * - * @param onHost Syncs on given host. - * @param onPort Syncs on given port. - * @param upTo Sync up to this number, if `upTo` value is a positive integer. - * @param chunkLength Length of each chunk of blocks to download. Kind of buffer size. - * @param interactive Tell if the loading bars should be used for console output. - * @param askedCautious If true, force the verification of each downloaded block. This is the right way to have a valid blockchain for sure. - * @param nopeers If true, sync will omit to retrieve peer documents. - * @param noShufflePeers If true, sync will NOT shuffle the retrieved peers before downloading on them. - */ - this.synchronize = (onHost, onPort, upTo, chunkLength, interactive, askedCautious, nopeers, noShufflePeers) => { - const remote = new Synchroniser(that, onHost, onPort, that.conf, interactive === true); - const syncPromise = remote.sync(upTo, chunkLength, askedCautious, nopeers, noShufflePeers === true); - return { - flow: remote, - syncPromise: syncPromise - }; - }; - - this.testForSync = (onHost, onPort) => { - const remote = new Synchroniser(that, onHost, onPort); - return remote.test(); - }; - this.applyCPU = (cpu) => that.BlockchainService.changeProverCPUSetting(cpu); this.rawer = rawer; diff --git a/test/fast/block_pulling.js b/test/fast/block_pulling.js deleted file mode 100644 index ae497c9d450d00eb2423a9d68c19a3f9ed8e9bd7..0000000000000000000000000000000000000000 --- a/test/fast/block_pulling.js +++ /dev/null @@ -1,257 +0,0 @@ -"use strict"; -var should = require('should'); -var _ = require('underscore'); -var co = require('co'); -var Q = require('q'); -var pulling = require('../../app/lib/pulling'); -var constants = require("../../app/lib/constants.js"); - -let commonConf = { - avgGenTime: constants.BRANCHES.SWITCH_ON_BRANCH_AHEAD_BY_X_MINUTES * 60, - forksize: 100 -}; - -describe('Pulling blocks', () => { - - it('from genesis with good sidechain should work', pullinTest({ - blockchain: [ - newBlock(0, 'A') - ], - sidechains: [ - [ - newBlock(0, 'A'), - newBlock(1, 'A') // <-- 1) checks this block: is good, we add it - ] - ], - expectHash: 'A1' - })); - - it('from genesis with fork sidechain should not work', pullinTest({ - blockchain: [ - newBlock(0, 'A') - ], - sidechains: [ - [ - newBlock(0, 'B'), // <-- 2) oh no this not common with blockchain A, leave this blockchain B alone - newBlock(1, 'B') // <-- 1) checks this block: ah, a fork! let's find common root ... - ] - ], - expectHash: 'A0' - })); - - it('from genesis with multiple good sidechains should work', pullinTest({ - blockchain: [ - newBlock(0, 'A') - ], - sidechains: [ - [ - newBlock(0, 'A'), - newBlock(1, 'A'), // <-- 1) checks this block: is good, we add it - newBlock(2, 'A') // <-- 2) checks this block: is good, we add it - ], - [ - newBlock(0, 'A'), - newBlock(1, 'A') // <-- 3) you are a bit late ... we are on A2 yet! - ], - [ - newBlock(0, 'A'), - newBlock(1, 'A'), - newBlock(2, 'A'), - newBlock(3, 'A') // <-- 4) checks this block: is good, we add it - ], - [ - newBlock(0, 'A'), - newBlock(1, 'A') // <-- 5 really too late - ] - ], - expectHash: 'A3' - })); - - it('sync with a single fork', pullinTest({ - blockchain: [ - newBlock(0, 'A'), - newBlock(1, 'A'), - newBlock(2, 'A'), - newBlock(3, 'A') - ], - sidechains: [ - [ - newBlock(0, 'A'), // <-- 2) sees a common root, yet not *the* common root (A1 is not a fork block) - newBlock(1, 'A'), // <-- 4) yep this is the good one! sync from B2 to B5 - newBlock(2, 'B'), // <-- 3) check the middle, not the common root - newBlock(3, 'B'), - newBlock(4, 'B'), // <-- 1) checks this block: a fork, let's find common root - newBlock(5, 'B') - ] - ], - expectHash: 'B5' - })); - - it('sync with multiple forks', pullinTest({ - blockchain: [ - newBlock(0, 'A'), - newBlock(1, 'A'), - newBlock(2, 'A'), - newBlock(3, 'A') - ], - sidechains: [ - [ - newBlock(0, 'A'), // <-- 2) sees a common root, yet not *the* common root (A1 is not a fork block) - newBlock(1, 'A'), // <-- 4) yep this is the good one! sync from B2 to B5 - newBlock(2, 'B'), // <-- 3) check the middle, not the common root - newBlock(3, 'B'), - newBlock(4, 'B'), // <-- 1) checks this block: a fork, let's find common root - newBlock(5, 'B') - ], - // This fork should not be followed because we switch only one time per pulling, and B5 is already OK - [ - newBlock(0, 'A'), - newBlock(1, 'A'), - newBlock(2, 'B'), - newBlock(3, 'B'), - newBlock(4, 'B'), - newBlock(5, 'B'), - newBlock(6, 'B') - ] - ], - expectHash: 'B5' - })); - - it('sync with inconsistant fork should skip it', pullinTest({ - blockchain: [ - newBlock(0, 'A'), - newBlock(1, 'A'), - newBlock(2, 'A'), - newBlock(3, 'A') - ], - sidechains: [ - [ - newBlock(0, 'A'), // <-- 2) sees a common root, yet not *the* common root (A1 is not a fork block) - qwaBlock(1, 'A'), // <-- 4) checks the middle: the block has changed and now displays C! this is inconsistent - newBlock(2, 'C'), // <-- 3) checks the middle (binary search): too high, go downwards - newBlock(3, 'C'), - newBlock(4, 'C'), // <-- 1) sees a fork, try to find common root - newBlock(5, 'C') - ] - ], - expectHash: 'A3' - })); -}); - -function newBlock(number, branch, rootBranch, quantum) { - let previousNumber = number - 1; - let previousBranch = rootBranch || branch; - let previousHash = previousNumber >= 0 ? previousBranch + previousNumber : ''; - return { - number: number, - medianTime: number * constants.BRANCHES.SWITCH_ON_BRANCH_AHEAD_BY_X_MINUTES * 60, - hash: branch + number, - previousHash: previousHash, - // this is not a real field, just here for the sake of demonstration: a quantum block changes itself - // when we consult it, making the chain inconsistent - quantum: quantum - }; -} - -function qwaBlock(number, branch, rootBranch) { - return newBlock(number, branch, rootBranch, true); -} - -function pullinTest(testConfiguration) { - return () => co(function *() { - - // The blockchains we are testing against - let blockchain = testConfiguration.blockchain; - let sidechains = testConfiguration.sidechains; - - // The data access object simulating network access - let dao = mockDao(blockchain, sidechains); - - // The very last block of a blockchain should have the good number - (yield dao.localCurrent()).should.have.property('number').equal(blockchain[blockchain.length - 1].number); - - // And after we make a pulling... - yield pulling.pull(commonConf, dao); - - // We test the new local blockchain current block (it should have changed in case of successful pull) - let localCurrent = yield dao.localCurrent(); - if (testConfiguration.expectHash !== undefined && testConfiguration.expectHash !== null) { - localCurrent.should.have.property('hash').equal(testConfiguration.expectHash); - } - if (testConfiguration.expectFunc !== undefined && testConfiguration.expectFunc !== null) { - testConfiguration.expectFunc(dao); - } - }); -} - -/** - * Network mocker - * @param blockchain - * @param sideChains - * @returns {{localCurrent: (function(): (*|Q.Promise<*>|Q.Promise<T>)), remoteCurrent: (function(): (*|Q.Promise<*>|Q.Promise<T>)), remotePeers: (function(): (*|Q.Promise<*>|Q.Promise<T>)), getRemoteBlock: (function(): (*|Q.Promise<*|null>|Q.Promise<T>)), applyMainBranch: (function(): (*|Q.Promise<Number|*|_Chain<*>>|Q.Promise<T>)), removeForks: (function(): (*|Q.Promise<T>)), isMemberPeer: (function(): (*|Q.Promise<boolean>|Q.Promise<T>)), findCommonRoot: (function(): (*|Promise)), downloadBlocks: (function(): (*|Q.Promise<Buffer|ArrayBuffer|Array.<any>|string|*|_Chain<any>>|Q.Promise<T>)), applyBranch: (function())}} - */ -function mockDao(blockchain, sideChains) { - const dao = pulling.abstractDao({ - - // Get the local blockchain current block - localCurrent: () => co(function*() { - return blockchain[blockchain.length - 1] - }), - - // Get the remote blockchain (bc) current block - remoteCurrent: (bc) => co(function*() { - return bc[bc.length - 1] - }), - - // Get the remote peers to be pulled - remotePeers: () => Q(sideChains.map((sc, index) => { - sc.pubkey = 'PUBK' + index; - return sc; - })), - - // Get block of given peer with given block number - getLocalBlock: (number) => co(function*() { - return blockchain[number] || null - }), - - // Get block of given peer with given block number - getRemoteBlock: (bc, number) => co(function *() { - let block = bc[number] || null; - // Quantum block implementation - if (block && block.quantum) { - bc[number] = _.clone(block); - bc[number].hash = 'Q' + block.hash; - } - return block; - }), - - // Simulate the adding of a single new block on local blockchain - applyMainBranch: (block) => co(function*() { - return blockchain.push(block) - }), - - // Clean the eventual fork blocks already registered in DB (since real fork mechanism uses them, so we want - // every old fork block to be removed) - removeForks: () => co(function*() {}), - - // Tells wether given peer is a member peer - isMemberPeer: (peer) => co(function*() { - return true; - }), - - // Simulates the downloading of blocks from a peer - downloadBlocks: (bc, fromNumber, count) => co(function*() { - if (!count) { - const block = yield dao.getRemoteBlock(bc, fromNumber); - if (block) { - return [block]; - } - else { - return []; - } - } - return bc.slice(fromNumber, fromNumber + count); - }), - }); - return dao; -} diff --git a/test/integration/branches2.js b/test/integration/branches2.js index e3161951489d07967f892ce3dbe4519e7515244d..e5265a9a265681991ffcc9878ccea278c3027be3 100644 --- a/test/integration/branches2.js +++ b/test/integration/branches2.js @@ -30,7 +30,8 @@ const commonConf = { currency: 'bb', httpLogs: true, forksize: 10, - avgGenTime: constants.BRANCHES.SWITCH_ON_BRANCH_AHEAD_BY_X_MINUTES * 60, + swichOnTimeAheadBy: 30, + avgGenTime: 30 * 60, sigQty: 1 }; @@ -103,7 +104,7 @@ describe("SelfFork", function() { yield s1.singleWritePromise(s2p); // Forking S1 from S2 - return require('../../app/modules/crawler').duniter.methods.pullBlocks(s1, s2p.pubkey); + return require('duniter-crawler').duniter.methods.pullBlocks(s1, s2p.pubkey); })); describe("Server 1 /blockchain", function() { diff --git a/test/integration/branches_switch.js b/test/integration/branches_switch.js index d19a35aa60be964a2d1d32457e7b34090166e4cf..b2463e2ec3733801580956c088182931efc6381c 100644 --- a/test/integration/branches_switch.js +++ b/test/integration/branches_switch.js @@ -27,6 +27,7 @@ const s1 = duniter( '/bb11', MEMORY_MODE, _.extend({ + swichOnTimeAheadBy: 0, port: '7788', pair: { pub: 'HgTTJLAQ5sqfknMq7yLPZbehtuLSsKj9CxWN7k8QvYJd', @@ -40,6 +41,7 @@ const s2 = duniter( '/bb12', MEMORY_MODE, _.extend({ + swichOnTimeAheadBy: 0, port: '7789', pair: { pub: 'DKpQPUL4ckzXYdnDRvCRKAm1gNvSdmAXnTrJZ7LvM5Qo', @@ -80,14 +82,10 @@ describe("Switch", function() { // So we now have: // S1 01234 // S2 `3456789 - let oldVal = constants.BRANCHES.SWITCH_ON_BRANCH_AHEAD_BY_X_MINUTES = 0; - yield s1.singleWritePromise(s2p); // Forking S1 from S2 - yield require('../../app/modules/crawler').duniter.methods.pullBlocks(s1, s2p.pubkey); - - constants.BRANCHES.SWITCH_ON_BRANCH_AHEAD_BY_X_MINUTES = oldVal; + yield require('duniter-crawler').duniter.methods.pullBlocks(s1, s2p.pubkey); // S1 should have switched to the other branch })); diff --git a/test/integration/continuous-proof.js b/test/integration/continuous-proof.js index 865ea9ede7d3ba9f584947c02dffb5d5b0402fff..f2bb6c8a78a42fa7e2a1535d9d4c927e87640314 100644 --- a/test/integration/continuous-proof.js +++ b/test/integration/continuous-proof.js @@ -123,7 +123,7 @@ describe("Continous proof-of-work", function() { yield s2.until('block', 15); s2.stopBlockComputation(); yield [ - require('../../app/modules/crawler').duniter.methods.pullBlocks(s3), + require('duniter-crawler').duniter.methods.pullBlocks(s3), s3.startBlockComputation() ]; yield s3.expectJSON('/blockchain/current', { number: 15 }); diff --git a/test/integration/peerings.js b/test/integration/peerings.js index b296c807d310b92e92fbb377d1b9f8361981458f..b8aa83f37ca984ccf28c60a9a0b6a5fd92684810 100644 --- a/test/integration/peerings.js +++ b/test/integration/peerings.js @@ -12,7 +12,7 @@ const rp = require('request-promise'); const httpTest = require('./tools/http'); const commit = require('./tools/commit'); const sync = require('./tools/sync'); -const contacter = require('../../app/lib/contacter'); +const contacter = require('duniter-crawler').duniter.methods.contacter; const until = require('./tools/until'); const multicaster = require('../../app/lib/streams/multicaster'); const Peer = require('../../app/lib/entity/peer'); diff --git a/test/integration/start_generate_blocks.js b/test/integration/start_generate_blocks.js index ede15accc5db85e8428b8a7524cd568e5e2e3de6..61e1959acfff1ff93907429b637e9cf906cc9f16 100644 --- a/test/integration/start_generate_blocks.js +++ b/test/integration/start_generate_blocks.js @@ -11,7 +11,7 @@ const commit = require('./tools/commit'); const until = require('./tools/until'); const multicaster = require('../../app/lib/streams/multicaster'); const Peer = require('../../app/lib/entity/peer'); -const contacter = require('../../app/lib/contacter'); +const contacter = require('duniter-crawler').duniter.methods.contacter; const sync = require('./tools/sync'); const expectJSON = httpTest.expectJSON; diff --git a/test/integration/tools/node.js b/test/integration/tools/node.js index 63b2be1adc6374b9d170bf52fa8e758202a86330..c66b56f53cf247da1bb1027cb8fb323e939ccb4a 100644 --- a/test/integration/tools/node.js +++ b/test/integration/tools/node.js @@ -6,7 +6,7 @@ var _ = require('underscore'); var async = require('async'); var request = require('request'); var rules = require('../../../app/lib/rules'); -var contacter = require('../../../app/lib/contacter'); +var contacter = require('duniter-crawler').duniter.methods.contacter; var duniter = require('../../../index'); var multicaster = require('../../../app/lib/streams/multicaster'); var Configuration = require('../../../app/lib/entity/configuration'); diff --git a/test/integration/tools/user.js b/test/integration/tools/user.js index bf632dd9f2210659ac65f39805eb44403a2e08e1..14d774e016e66dcdadb425199a1da75d08e94b54 100644 --- a/test/integration/tools/user.js +++ b/test/integration/tools/user.js @@ -4,7 +4,7 @@ const Q = require('q'); const _ = require('underscore'); const async = require('async'); const request = require('request'); -const contacter = require('../../../app/lib/contacter'); +const contacter = require('duniter-crawler').duniter.methods.contacter; const ucp = require('../../../app/lib/ucp/buid'); const parsers = require('../../../app/lib/streams/parsers'); const keyring = require('../../../app/lib/crypto/keyring'); diff --git a/test/integration/v1.0-modules-api.js b/test/integration/v1.0-modules-api.js index e0a8462a0d04a958988e93d63ba94649a16f7856..c2591618e17fdfa18dd54c256633fbda008c5205 100644 --- a/test/integration/v1.0-modules-api.js +++ b/test/integration/v1.0-modules-api.js @@ -7,7 +7,7 @@ const util = require('util'); const stream = require('stream'); const duniter = require('../../index'); const parsers = require('../../app/lib/streams/parsers/index'); -const querablep = require('../../app/lib/querablep'); +const querablep = require('querablep'); describe("v1.0 Module API", () => {