diff --git a/indexer/src/blocks_chunks.rs b/indexer/src/blocks_chunks.rs
index 0cfeb006766875117f92333068b48fe755c732d9..3afdf70e7ec64326eb4761c7f50575b0ff21d1ee 100644
--- a/indexer/src/blocks_chunks.rs
+++ b/indexer/src/blocks_chunks.rs
@@ -29,33 +29,72 @@ pub fn apply_block_blocks_chunk<B: Backend>(
 ) -> KvResult<()> {
     let block_number = block.number().0;
     let chunks_folder_path = profile_path.join("data/gva_v1_blocks_chunks");
-    gva_db.write(|mut db| {
-        db.current_blocks_chunk.upsert(
-            U32BE(block_number),
-            GvaBlockDbV1(DubpBlock::V10(block.clone())),
-        );
 
+    gva_db.current_blocks_chunk_write().upsert(
+        U32BE(block_number),
+        GvaBlockDbV1(DubpBlock::V10(block.clone())),
+    )?;
+
+    if (block_number + 1) % CHUNK_SIZE == 0 {
+        let current_chunk: Vec<GvaBlockDbV1> = gva_db
+            .current_blocks_chunk()
+            .iter(.., |it| it.values().collect::<Result<Vec<_>, _>>())?;
+        let current_chunk_bin = bincode_db()
+            .serialize(&current_chunk)
+            .map_err(|e| KvError::DeserError(e.into()))?;
+        let chunk_hash = Hash::compute_blake3(current_chunk_bin.as_ref());
+        let chunk_index = U32BE(block_number / CHUNK_SIZE);
+        gva_db
+            .blocks_chunk_hash_write()
+            .upsert(chunk_index, HashDb(chunk_hash))?;
+
+        write_and_compress_chunk_in_file(
+            current_chunk_bin.as_ref(),
+            chunk_index.0,
+            chunks_folder_path.as_path(),
+        )
+        .map_err(|e| KvError::Custom(e.into()))?;
+        gva_db.current_blocks_chunk_write().clear()?;
+    }
+
+    Ok(())
+}
+
+pub fn revert_block_blocks_chunk<B: Backend>(
+    block: &DubpBlockV10,
+    gva_db: &GvaV1Db<B>,
+    profile_path: &Path,
+) -> KvResult<()> {
+    let block_number = block.number().0;
+    let chunks_folder_path = profile_path.join("data/gva_v1_blocks_chunks");
+    gva_db.write(|mut db| {
         if (block_number + 1) % CHUNK_SIZE == 0 {
-            let current_chunk: Vec<GvaBlockDbV1> = db
-                .current_blocks_chunk
-                .iter(.., |it| it.values().collect::<Result<Vec<_>, _>>())?;
-            let current_chunk_bin = bincode_db()
-                .serialize(&current_chunk)
-                .map_err(|e| KvError::DeserError(e.into()))?;
-            let chunk_hash = Hash::compute_blake3(current_chunk_bin.as_ref());
+            // Uncompress last compressed chunk and replace it in current chunk
             let chunk_index = U32BE(block_number / CHUNK_SIZE);
-            db.blocks_chunk_hash.upsert(chunk_index, HashDb(chunk_hash));
+            if let Some(current_chunk_bin) =
+                read_and_remove_compressed_chunk(chunk_index.0, chunks_folder_path.as_path())?
+            {
+                db.blocks_chunk_hash.remove(chunk_index);
 
-            write_and_compress_chunk_in_file(
-                current_chunk_bin.as_ref(),
-                chunk_index.0,
-                chunks_folder_path.as_path(),
-            )
-            .map_err(|e| KvError::Custom(e.into()))?;
+                let current_chunk: Vec<GvaBlockDbV1> = bincode_db()
+                    .deserialize(current_chunk_bin.as_ref())
+                    .map_err(|e| KvError::DeserError(e.into()))?;
+                let current_chunk_begin = block_number - CHUNK_SIZE + 1;
+                for (i, block) in current_chunk.into_iter().enumerate() {
+                    db.current_blocks_chunk
+                        .upsert(U32BE(current_chunk_begin + i as u32), block);
+                }
+            } else {
+                return Err(KvError::DbCorrupted(
+                    "Not found last compressed chunk".to_owned(),
+                ));
+            }
+        } else {
+            db.current_blocks_chunk.remove(U32BE(block_number));
         }
+
         Ok(())
-    })?;
-    gva_db.current_blocks_chunk_write().clear()
+    })
 }
 
 /// Read and decompress bytes from file
@@ -102,40 +141,3 @@ fn write_and_compress_chunk_in_file(
 
     Ok(())
 }
-
-pub fn revert_block_blocks_chunk<B: Backend>(
-    block: &DubpBlockV10,
-    gva_db: &GvaV1Db<B>,
-    profile_path: &Path,
-) -> KvResult<()> {
-    let block_number = block.number().0;
-    let chunks_folder_path = profile_path.join("data/gva_v1_blocks_chunks");
-    gva_db.write(|mut db| {
-        if (block_number + 1) % CHUNK_SIZE == 0 {
-            // Uncompress last compressed chunk and replace it in current chunk
-            let chunk_index = U32BE(block_number / CHUNK_SIZE);
-            if let Some(current_chunk_bin) =
-                read_and_remove_compressed_chunk(chunk_index.0, chunks_folder_path.as_path())?
-            {
-                db.blocks_chunk_hash.remove(chunk_index);
-
-                let current_chunk: Vec<GvaBlockDbV1> = bincode_db()
-                    .deserialize(current_chunk_bin.as_ref())
-                    .map_err(|e| KvError::DeserError(e.into()))?;
-                let current_chunk_begin = block_number - CHUNK_SIZE + 1;
-                for (i, block) in current_chunk.into_iter().enumerate() {
-                    db.current_blocks_chunk
-                        .upsert(U32BE(current_chunk_begin + i as u32), block);
-                }
-            } else {
-                return Err(KvError::DbCorrupted(
-                    "Not found last compressed chunk".to_owned(),
-                ));
-            }
-        } else {
-            db.current_blocks_chunk.remove(U32BE(block_number));
-        }
-
-        Ok(())
-    })
-}