@@ -288,24 +288,21 @@ export class ChronikBlockchainClient {
288288 return ( await this . chronik . script ( type , hash160 ) . history ( page , pageSize ) ) . txs
289289 }
290290
291- // For each address, fetch PAGE_CONCURRENCY pages in parallel (“burst”),
292- // then use the burst’s newest/oldest timestamps to decide whether to continue.
293- // Yields happen only in the generator body (after each slice finishes, and at final flush).
291+ /*
292+ * For each address, fetch PAGE_CONCURRENCY pages in parallel (“burst”),
293+ * then use the burst’s newest/oldest timestamps to decide whether to continue.
294+ * Yields happen only in the generator body (after each slice finishes, and at final flush).
295+ */
294296 private async * fetchLatestTxsForAddresses (
295297 addresses : Address [ ]
296298 ) : AsyncGenerator < Prisma . TransactionUncheckedCreateInput [ ] > {
297- // 1024 -> 7:10m
298- // 512 -> 7:34.175
299- // 256 -> 7:47.876
300- const pagesPerBurstPerAddress = 1 // pageConcurrency
301299 const logPrefix = `${ this . CHRONIK_MSG_PREFIX } [PARALLEL FETCHING]`
302300
303301 console . log (
304302 `${ logPrefix } : Will fetch latest txs for ${ addresses . length } addresses ` +
305- `(addressConcurrency=${ INITIAL_ADDRESS_SYNC_FETCH_CONCURRENTLY } , pageConcurrency=${ pagesPerBurstPerAddress } ).`
303+ `(addressConcurrency=${ INITIAL_ADDRESS_SYNC_FETCH_CONCURRENTLY } , pageConcurrency=1 ).`
306304 )
307305
308- // Shared accumulation buffer — emitted only at the top-level generator yields
309306 let transactionsToEmitBuffer : Prisma . TransactionUncheckedCreateInput [ ] = [ ]
310307
311308 for ( let i = 0 ; i < addresses . length ; i += INITIAL_ADDRESS_SYNC_FETCH_CONCURRENTLY ) {
@@ -321,82 +318,59 @@ export class ChronikBlockchainClient {
321318 let hasReachedStoppingCondition = false
322319
323320 while ( ! hasReachedStoppingCondition ) {
324- const pageIndicesInBurst = Array . from (
325- { length : pagesPerBurstPerAddress } ,
326- ( _ , k ) => nextBurstBasePageIndex + k
327- )
328-
329- // Fetch one "burst" of pages for this address in parallel.
330- // Swallow individual page errors so one failing page doesn't cancel the address worker.
331- const burstFetchResults = await Promise . all (
332- pageIndicesInBurst . map ( async ( pageIndex ) => {
333- try {
334- const value = await this . getPaginatedTxs ( address . address , pageIndex , CHRONIK_FETCH_N_TXS_PER_PAGE )
335- return { page : pageIndex , value }
336- } catch ( err : any ) {
337- console . warn ( `${ addrLogPrefix } page=${ pageIndex } failed: ${ err ?. message as string ?? err as string } ` )
338- return { page : pageIndex , value : [ ] as any [ ] }
339- }
340- } )
341- )
321+ const pageIndex = nextBurstBasePageIndex
322+
323+ // Fetch the single page for this burst; swallow page errors.
324+ let pageTxs : any [ ] = [ ]
325+ try {
326+ pageTxs = await this . getPaginatedTxs ( address . address , pageIndex , CHRONIK_FETCH_N_TXS_PER_PAGE )
327+ } catch ( err : any ) {
328+ console . warn ( `${ addrLogPrefix } page=${ pageIndex } failed: ${ err ?. message as string ?? err as string } ` )
329+ pageTxs = [ ]
330+ }
342331
343- // Only consider non-empty pages for timestamp bounds and processing.
344- const nonEmptyPageResults = burstFetchResults . filter ( r => r . value . length > 0 )
345- if ( nonEmptyPageResults . length === 0 ) {
332+ // If the page is empty, treat as "EMPTY ADDRESS" and stop.
333+ if ( pageTxs . length === 0 ) {
346334 console . log ( `${ addrLogPrefix } EMPTY ADDRESS` )
347- break // nothing in this burst -> done with this address
335+ break
348336 }
349337
350- // Determine burst-level timestamp bounds
351- const newestTimestampAcrossBurst = Math . max (
352- ...nonEmptyPageResults . map ( r => Number ( r . value [ 0 ] . block ?. timestamp ?? r . value [ 0 ] . timeFirstSeen ) )
353- )
354- const oldestTimestampAcrossBurst = Math . min (
355- ...nonEmptyPageResults . map ( r => {
356- const pageTxs = r . value
357- const lastTx = pageTxs [ pageTxs . length - 1 ]
358- return Number ( lastTx . block ?. timestamp ?? lastTx . timeFirstSeen )
359- } )
360- )
338+ // Burst-level bounds collapse to this single page
339+ const newestTimestampAcrossBurst = Number ( pageTxs [ 0 ] . block ?. timestamp ?? pageTxs [ 0 ] . timeFirstSeen )
340+ const lastTxInPage = pageTxs [ pageTxs . length - 1 ]
341+ const oldestTimestampAcrossBurst = Number ( lastTxInPage . block ?. timestamp ?? lastTxInPage . timeFirstSeen )
361342
362- // If the newest tx across the entire burst is older than our last sync, we can quit immediately .
343+ // If even the newest is older than lastSync, stop .
363344 if ( newestTimestampAcrossBurst < lastSyncedTimestampSeconds ) {
364345 console . log ( `${ addrLogPrefix } NO NEW TXS` )
365346 break
366347 }
367348
368- // Process each non-empty page in ascending page order.
349+ // Process this page
369350 let keptTransactionsInBurstCount = 0
370- for ( const pageResult of nonEmptyPageResults . sort ( ( a , b ) => a . page - b . page ) ) {
371- const pageTxs = pageResult . value
372-
373- const filteredTxs = pageTxs
374- . filter ( txThresholdFilter )
375- . filter ( t => t . block === undefined || t . block . timestamp >= lastSyncedTimestampSeconds )
376-
377- if ( filteredTxs . length > 0 ) {
378- const txRowsToCreate = await Promise . all (
379- filteredTxs . map ( async t => await this . getTransactionFromChronikTransaction ( t , address ) )
380- )
381- transactionsToEmitBuffer . push ( ...txRowsToCreate )
382- keptTransactionsInBurstCount += txRowsToCreate . length
383- }
384351
385- // If this page’s oldest tx is already older than our last sync, we can stop after the burst.
386- const oldestTimestampInPage = Number (
387- pageTxs [ pageTxs . length - 1 ] . block ?. timestamp ?? pageTxs [ pageTxs . length - 1 ] . timeFirstSeen
352+ const filteredTxs = pageTxs
353+ . filter ( txThresholdFilter )
354+ . filter ( t => t . block === undefined || t . block . timestamp >= lastSyncedTimestampSeconds )
355+
356+ if ( filteredTxs . length > 0 ) {
357+ const txRowsToCreate = await Promise . all (
358+ filteredTxs . map ( async t => await this . getTransactionFromChronikTransaction ( t , address ) )
388359 )
389- if ( oldestTimestampInPage < lastSyncedTimestampSeconds ) {
390- hasReachedStoppingCondition = true
391- // continue the for-loop to finish logging consistently
392- }
360+ transactionsToEmitBuffer . push ( ...txRowsToCreate )
361+ keptTransactionsInBurstCount += txRowsToCreate . length
362+ }
363+
364+ // If the page’s oldest tx is older than lastSync, stop after this burst.
365+ const oldestTimestampInPage = oldestTimestampAcrossBurst
366+ if ( oldestTimestampInPage < lastSyncedTimestampSeconds ) {
367+ hasReachedStoppingCondition = true
393368 }
394369
395370 console . log ( `${ addrLogPrefix } ${ keptTransactionsInBurstCount } new txs...` )
396371
397- nextBurstBasePageIndex += pagesPerBurstPerAddress
372+ nextBurstBasePageIndex += 1
398373
399- // Fast-stop: if we kept nothing in this burst and the burst’s oldest is older than lastSync, we’re done.
400374 if ( keptTransactionsInBurstCount === 0 && oldestTimestampAcrossBurst < lastSyncedTimestampSeconds ) {
401375 hasReachedStoppingCondition = true
402376 }
@@ -719,19 +693,16 @@ export class ChronikBlockchainClient {
719693 console . time ( `${ this . CHRONIK_MSG_PREFIX } syncAddresses` )
720694 await setSyncingBatch ( addresses . map ( a => a . address ) , true )
721695
722- // per-address counters
723696 const perAddrCount = new Map < string , number > ( )
724697 addresses . forEach ( a => perAddrCount . set ( a . id , 0 ) )
725698
726- // commit buffer
727699 let toCommit : Prisma . TransactionUncheckedCreateInput [ ] = [ ]
728700
729701 try {
730702 const pfx = `${ this . CHRONIK_MSG_PREFIX } [PARALLEL FETCHING]`
731- // consume generator: it yields batches of prepared txs
732- console . log ( `${ pfx } will fetch batches of ${ INITIAL_ADDRESS_SYNC_FETCH_CONCURRENTLY } txs from chronik` )
703+ console . log ( `${ pfx } will fetch batches of ${ INITIAL_ADDRESS_SYNC_FETCH_CONCURRENTLY } addresses from chronik` )
733704 for await ( const batch of this . fetchLatestTxsForAddresses ( addresses ) ) {
734- console . log ( `${ pfx } fetched batch of ${ batch . length } tx from chronik` )
705+ console . log ( `${ pfx } fetched batch of ${ batch . length } txs from chronik` )
735706 // count per address before committing
736707 for ( const tx of batch ) {
737708 perAddrCount . set ( tx . addressId , ( perAddrCount . get ( tx . addressId ) ?? 0 ) + 1 )
@@ -753,13 +724,11 @@ export class ChronikBlockchainClient {
753724
754725 // broadcast/triggers after commit
755726 if ( created . length > 0 ) {
756- // Always broadcast per tx (keeps existing behavior)
757727 const triggerBatch : BroadcastTxData [ ] = [ ]
758728 for ( const tx of created ) {
759729 const bd = this . broadcastIncomingTx ( tx . address . address , tx , [ ] ) // inputAddresses left empty in bulk
760730 triggerBatch . push ( bd )
761731 }
762- // Then, if enabled, execute triggers **in batch**
763732 if ( runTriggers ) {
764733 console . log ( `${ this . CHRONIK_MSG_PREFIX } executing trigger batch — broadcasts=${ triggerBatch . length } ` )
765734 await executeTriggersBatch ( triggerBatch , this . networkId )
0 commit comments