Skip to content

Commit baf3324

Browse files
committed
graph, store: Remove GRAPH_STORE_LAST_ROLLUP_FROM_POI
This flag was only meant as a safety switch in case the fixed behavior caused trouble. Since it's not been needed in several months, it's safe to remove it.
1 parent bafc3dd commit baf3324

File tree

4 files changed

+6
-54
lines changed

4 files changed

+6
-54
lines changed

graph/src/env/store.rs

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -129,14 +129,6 @@ pub struct EnvVarsStore {
129129
pub use_brin_for_all_query_types: bool,
130130
/// Temporary env var to disable certain lookups in the chain store
131131
pub disable_block_cache_for_lookup: bool,
132-
/// Temporary env var to fall back to the old broken way of determining
133-
/// the time of the last rollup from the POI table instead of the new
134-
/// way that fixes
135-
/// https://github.com/graphprotocol/graph-node/issues/5530 Remove this
136-
/// and all code that is dead as a consequence once this has been vetted
137-
/// sufficiently, probably after 2024-12-01
138-
/// Defaults to `false`, i.e. using the new fixed behavior
139-
pub last_rollup_from_poi: bool,
140132
/// Safety switch to increase the number of columns used when
141133
/// calculating the chunk size in `InsertQuery::chunk_size`. This can be
142134
/// used to work around Postgres errors complaining 'number of
@@ -197,7 +189,6 @@ impl TryFrom<InnerStore> for EnvVarsStore {
197189
create_gin_indexes: x.create_gin_indexes,
198190
use_brin_for_all_query_types: x.use_brin_for_all_query_types,
199191
disable_block_cache_for_lookup: x.disable_block_cache_for_lookup,
200-
last_rollup_from_poi: x.last_rollup_from_poi,
201192
insert_extra_cols: x.insert_extra_cols,
202193
fdw_fetch_size: x.fdw_fetch_size,
203194
};
@@ -276,8 +267,6 @@ pub struct InnerStore {
276267
use_brin_for_all_query_types: bool,
277268
#[envconfig(from = "GRAPH_STORE_DISABLE_BLOCK_CACHE_FOR_LOOKUP", default = "false")]
278269
disable_block_cache_for_lookup: bool,
279-
#[envconfig(from = "GRAPH_STORE_LAST_ROLLUP_FROM_POI", default = "false")]
280-
last_rollup_from_poi: bool,
281270
#[envconfig(from = "GRAPH_STORE_INSERT_EXTRA_COLS", default = "0")]
282271
insert_extra_cols: usize,
283272
#[envconfig(from = "GRAPH_STORE_FDW_FETCH_SIZE", default = "1000")]

store/postgres/src/deployment_store.rs

Lines changed: 2 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -904,20 +904,12 @@ impl DeploymentStore {
904904
.await
905905
}
906906

907-
pub(crate) fn block_time(
908-
&self,
909-
site: Arc<Site>,
910-
block: BlockNumber,
911-
) -> Result<Option<BlockTime>, StoreError> {
907+
pub(crate) fn block_time(&self, site: Arc<Site>) -> Result<Option<BlockTime>, StoreError> {
912908
let store = self.cheap_clone();
913909

914910
let mut conn = self.get_conn()?;
915911
let layout = store.layout(&mut conn, site.cheap_clone())?;
916-
if ENV_VARS.store.last_rollup_from_poi {
917-
layout.block_time(&mut conn, block)
918-
} else {
919-
layout.last_rollup(&mut conn)
920-
}
912+
layout.last_rollup(&mut conn)
921913
}
922914

923915
pub(crate) async fn get_proof_of_indexing(

store/postgres/src/relational.rs

Lines changed: 1 addition & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@ use graph::blockchain::block_stream::{EntityOperationKind, EntitySourceOperation
3232
use graph::blockchain::BlockTime;
3333
use graph::cheap_clone::CheapClone;
3434
use graph::components::store::write::{RowGroup, WriteChunk};
35-
use graph::components::subgraph::PoICausalityRegion;
3635
use graph::constraint_violation;
3736
use graph::data::graphql::TypeExt as _;
3837
use graph::data::query::Trace;
@@ -69,7 +68,7 @@ use crate::{
6968
},
7069
};
7170
use graph::components::store::{AttributeNames, DerivedEntityQuery};
72-
use graph::data::store::{Id, IdList, IdType, BYTES_SCALAR};
71+
use graph::data::store::{IdList, IdType, BYTES_SCALAR};
7372
use graph::data::subgraph::schema::POI_TABLE;
7473
use graph::prelude::{
7574
anyhow, info, BlockNumber, DeploymentHash, Entity, EntityOperation, Logger,
@@ -1113,32 +1112,6 @@ impl Layout {
11131112
Ok(Arc::new(layout))
11141113
}
11151114

1116-
pub(crate) fn block_time(
1117-
&self,
1118-
conn: &mut PgConnection,
1119-
block: BlockNumber,
1120-
) -> Result<Option<BlockTime>, StoreError> {
1121-
let block_time_name = self.input_schema.poi_block_time();
1122-
let poi_type = self.input_schema.poi_type();
1123-
let id = Id::String(Word::from(PoICausalityRegion::from_network(
1124-
&self.site.network,
1125-
)));
1126-
let key = poi_type.key(id);
1127-
1128-
let block_time = self
1129-
.find(conn, &key, block)?
1130-
.and_then(|entity| {
1131-
entity.get(&block_time_name).map(|value| {
1132-
value
1133-
.as_int8()
1134-
.ok_or_else(|| constraint_violation!("block_time must have type Int8"))
1135-
})
1136-
})
1137-
.transpose()?
1138-
.map(|value| BlockTime::since_epoch(value, 0));
1139-
Ok(block_time)
1140-
}
1141-
11421115
/// Find the time of the last rollup for the subgraph. We do this by
11431116
/// looking for the maximum timestamp in any aggregation table and
11441117
/// adding a little bit more than the corresponding interval to it. This

store/postgres/src/writable.rs

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -95,8 +95,8 @@ impl LastRollup {
9595
let kind = match (has_aggregations, block) {
9696
(false, _) => LastRollup::NotNeeded,
9797
(true, None) => LastRollup::Unknown,
98-
(true, Some(block)) => {
99-
let block_time = store.block_time(site, block)?;
98+
(true, Some(_)) => {
99+
let block_time = store.block_time(site)?;
100100
block_time
101101
.map(|b| LastRollup::Some(b))
102102
.unwrap_or(LastRollup::Unknown)
@@ -240,9 +240,7 @@ impl SyncStore {
240240
firehose_cursor,
241241
)?;
242242

243-
let block_time = self
244-
.writable
245-
.block_time(self.site.cheap_clone(), block_ptr_to.number)?;
243+
let block_time = self.writable.block_time(self.site.cheap_clone())?;
246244
self.last_rollup.set(block_time)
247245
})
248246
}

0 commit comments

Comments
 (0)