From 939cbdd46224ae8297d8956a8d41ca8bb0ed8762 Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Mon, 10 Feb 2025 17:23:22 +0100 Subject: [PATCH 1/6] fail when user fail to send file to MSP after several retries --- node/src/tasks/user_sends_file.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/node/src/tasks/user_sends_file.rs b/node/src/tasks/user_sends_file.rs index 6aa88ab0f..2c9e0fc95 100644 --- a/node/src/tasks/user_sends_file.rs +++ b/node/src/tasks/user_sends_file.rs @@ -383,7 +383,8 @@ where tokio::time::sleep(std::time::Duration::from_secs(1)).await; } Err(RequestError::RequestFailure(RequestFailure::Refused)) => { - // TODO: Handle MSP not receiving file after multiple retries. + // If MSP doesnt receive file, the burden of downloading the file will be on the MSP. + return Err(anyhow::anyhow!("Failed to send file {:?}", file_key)); } Err(e) => { error!( From 544ca5e07008ee676c47775345db710adfc720b8 Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Mon, 17 Feb 2025 17:08:32 +0100 Subject: [PATCH 2/6] fix the return error bug I created; Added 2 integration tests to test different scenario; --- node/src/tasks/user_sends_file.rs | 390 ++++++++++-------- .../user/send-file-to-provider.test.ts | 144 +++++++ test/util/bspNet/docker.ts | 98 +++++ test/util/pjsKeyring.ts | 2 + 4 files changed, 451 insertions(+), 183 deletions(-) create mode 100644 test/suites/integration/user/send-file-to-provider.test.ts diff --git a/node/src/tasks/user_sends_file.rs b/node/src/tasks/user_sends_file.rs index 2c9e0fc95..3bd6c8801 100644 --- a/node/src/tasks/user_sends_file.rs +++ b/node/src/tasks/user_sends_file.rs @@ -1,4 +1,4 @@ -use log::{debug, error, info, warn}; +use log::{debug, info, warn}; use sc_network::{PeerId, RequestFailure}; use shc_actors_framework::event_bus::EventHandler; use shc_blockchain_service::{ @@ -12,6 +12,7 @@ use shc_file_manager::traits::FileStorage; use shc_file_transfer_service::commands::{FileTransferServiceInterface, RequestError}; use shp_constants::FILE_CHUNK_SIZE; use shp_file_metadata::ChunkId; +use sp_core::H256; use sp_runtime::AccountId32; use crate::services::{handler::StorageHubHandler, types::ShNodeType}; @@ -136,7 +137,8 @@ where info!(target: LOG_TARGET, "No peers were found to receive file key {:?}", file_key); } - self.send_chunks_to_provider(peer_ids, &file_metadata).await + self.send_chunks_to_providers(peer_ids, &file_metadata) + .await } } @@ -182,7 +184,8 @@ where info!(target: LOG_TARGET, "No peers were found to receive file key {:?}", file_key); } - self.send_chunks_to_provider(peer_ids, &file_metadata).await + self.send_chunks_to_providers(peer_ids, &file_metadata) + .await } } @@ -190,7 +193,7 @@ impl UserSendsFileTask where NT: ShNodeType, { - async fn send_chunks_to_provider( + async fn send_chunks_to_providers( &mut self, peer_ids: Vec, file_metadata: &FileMetadata, @@ -201,212 +204,233 @@ where // Iterates and tries to send file to peer. // Breaks loop after first successful attempt since all peer ids belong to the same provider. for peer_id in peer_ids { - debug!(target: LOG_TARGET, "Attempting to send chunks of file key {:?} to peer {:?}", file_key, peer_id); + match self + .send_chunks(peer_id, file_metadata, file_key, chunk_count) + .await + { + Err(err) => { + // if sending chunk failed with one peer id, we try with the next one. + warn!(target: LOG_TARGET, "{:?}", err); + continue; + } + Ok(()) => { + // if successfull our job is done. No need to try with the next peer id. + return Ok(()); + } + }; + } - let mut current_batch = Vec::new(); - let mut current_batch_size = 0; + Err(anyhow::anyhow!( + "Failed to send file {:?} to any of the peers", + file_metadata.fingerprint + )) + } - for chunk_id in 0..chunk_count { - // Calculate the size of the current chunk - let chunk_size = if chunk_id == chunk_count - 1 { - file_metadata.file_size % FILE_CHUNK_SIZE as u64 - } else { - FILE_CHUNK_SIZE as u64 + async fn send_chunks( + &mut self, + peer_id: PeerId, + file_metadata: &FileMetadata, + file_key: H256, + chunk_count: u64, + ) -> Result<(), anyhow::Error> { + debug!(target: LOG_TARGET, "Attempting to send chunks of file key {:?} to peer {:?}", file_key, peer_id); + + let mut current_batch = Vec::new(); + let mut current_batch_size = 0; + + for chunk_id in 0..chunk_count { + // Calculate the size of the current chunk + let chunk_size = if chunk_id == chunk_count - 1 { + file_metadata.file_size % FILE_CHUNK_SIZE as u64 + } else { + FILE_CHUNK_SIZE as u64 + }; + + // Check if adding this chunk would exceed the batch size limit + if current_batch_size + (chunk_size as usize) > BATCH_CHUNK_FILE_TRANSFER_MAX_SIZE { + // Send current batch before adding new chunk + debug!( + target: LOG_TARGET, + "Sending batch of {} chunks (total size: {} bytes) for file {:?} to peer {:?}", + current_batch.len(), + current_batch_size, + file_key, + peer_id + ); + + // Generate proof for the entire batch + let proof = match self + .storage_hub_handler + .file_storage + .read() + .await + .generate_proof(&file_key, ¤t_batch) + { + Ok(proof) => proof, + Err(e) => { + return Err(anyhow::anyhow!( + "Failed to generate proof for batch of file {:?}\n Error: {:?}", + file_key, + e + )); + } }; - // Check if adding this chunk would exceed the batch size limit - if current_batch_size + (chunk_size as usize) > BATCH_CHUNK_FILE_TRANSFER_MAX_SIZE { - // Send current batch before adding new chunk - debug!( - target: LOG_TARGET, - "Sending batch of {} chunks (total size: {} bytes) for file {:?} to peer {:?}", - current_batch.len(), - current_batch_size, - file_key, - peer_id - ); - - // Generate proof for the entire batch - let proof = match self + let mut retry_attempts = 0; + loop { + let upload_response = self .storage_hub_handler - .file_storage - .read() - .await - .generate_proof(&file_key, ¤t_batch) - { - Ok(proof) => proof, + .file_transfer + .upload_request(peer_id, file_key.as_ref().into(), proof.clone(), None) + .await; + + match upload_response { + Ok(r) => { + debug!( + target: LOG_TARGET, + "Successfully uploaded batch for file {:?} to peer {:?}", + file_metadata.fingerprint, + peer_id + ); + + // If the provider signals they have the entire file, we can stop + if r.file_complete { + info!( + target: LOG_TARGET, + "Stopping file upload process. Peer {:?} has the entire file {:?}", + peer_id, + file_metadata.fingerprint + ); + return Ok(()); + } + + break; + } + Err(RequestError::RequestFailure(RequestFailure::Refused)) + if retry_attempts < 3 => + { + warn!( + target: LOG_TARGET, + "Batch upload rejected by peer {:?}, retrying... (attempt {})", + peer_id, + retry_attempts + 1 + ); + retry_attempts += 1; + + // Wait for a short time before retrying + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + } + Err(RequestError::RequestFailure(RequestFailure::Refused)) => { + // If MSP doesn't receive file, the burden of downloading the file will be on the MSP. + return Err(anyhow::anyhow!("Failed to send file {:?}", file_key)); + } Err(e) => { return Err(anyhow::anyhow!( - "Failed to generate proof for batch of file {:?}\n Error: {:?}", - file_key, + "Failed to upload batch to peer {:?} (Error: {:?})", + peer_id, e )); } - }; - - let mut retry_attempts = 0; - loop { - let upload_response = self - .storage_hub_handler - .file_transfer - .upload_request(peer_id, file_key.as_ref().into(), proof.clone(), None) - .await; - - match upload_response { - Ok(r) => { - debug!( - target: LOG_TARGET, - "Successfully uploaded batch for file {:?} to peer {:?}", - file_metadata.fingerprint, - peer_id - ); + } + } - // If the provider signals they have the entire file, we can stop - if r.file_complete { - info!( - target: LOG_TARGET, - "Stopping file upload process. Peer {:?} has the entire file {:?}", - peer_id, - file_metadata.fingerprint - ); - return Ok(()); - } - - break; - } - Err(RequestError::RequestFailure(RequestFailure::Refused)) - if retry_attempts < 3 => - { - warn!( - target: LOG_TARGET, - "Batch upload rejected by peer {:?}, retrying... (attempt {})", - peer_id, - retry_attempts + 1 - ); - retry_attempts += 1; + // Clear the batch for next iteration + current_batch.clear(); + current_batch_size = 0; + } - // Wait for a short time before retrying - tokio::time::sleep(std::time::Duration::from_secs(1)).await; - } - Err(RequestError::RequestFailure(RequestFailure::Refused)) => { - // TODO: Handle MSP not receiving file after multiple retries. - } - Err(e) => { - error!( + // Add chunk to current batch + current_batch.push(ChunkId::new(chunk_id)); + current_batch_size += chunk_size as usize; + + // If this is the last chunk, send the final batch + if chunk_id == chunk_count - 1 && !current_batch.is_empty() { + debug!( + target: LOG_TARGET, + "Sending final batch of {} chunks (total size: {} bytes) for file {:?} to peer {:?}", + current_batch.len(), + current_batch_size, + file_key, + peer_id + ); + + // Generate proof for the final batch + let proof = match self + .storage_hub_handler + .file_storage + .read() + .await + .generate_proof(&file_key, ¤t_batch) + { + Ok(proof) => proof, + Err(e) => { + return Err(anyhow::anyhow!( + "Failed to generate proof for final batch of file {:?}\n Error: {:?}", + file_key, + e + )); + } + }; + + let mut retry_attempts = 0; + loop { + let upload_response = self + .storage_hub_handler + .file_transfer + .upload_request(peer_id, file_key.as_ref().into(), proof.clone(), None) + .await; + + match upload_response { + Ok(r) => { + debug!( + target: LOG_TARGET, + "Successfully uploaded final batch for file {:?} to peer {:?}", + file_metadata.fingerprint, + peer_id + ); + + if r.file_complete { + info!( target: LOG_TARGET, - "Failed to upload batch to peer {:?}\n Error: {:?}", + "File upload complete. Peer {:?} has the entire file {:?}", peer_id, - e + file_metadata.fingerprint ); - break; } + break; } - } - - // Clear the batch for next iteration - current_batch.clear(); - current_batch_size = 0; - } + Err(RequestError::RequestFailure(RequestFailure::Refused)) + if retry_attempts < 3 => + { + warn!( + target: LOG_TARGET, + "Final batch upload rejected by peer {:?}, retrying... (attempt {})", + peer_id, + retry_attempts + 1 + ); + retry_attempts += 1; + + // Wait for a short time before retrying + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + } + Err(RequestError::RequestFailure(RequestFailure::Refused)) => { + // If MSP doesn't receive file, the burden of downloading the file will be on the MSP. - // Add chunk to current batch - current_batch.push(ChunkId::new(chunk_id)); - current_batch_size += chunk_size as usize; - - // If this is the last chunk, send the final batch - if chunk_id == chunk_count - 1 && !current_batch.is_empty() { - debug!( - target: LOG_TARGET, - "Sending final batch of {} chunks (total size: {} bytes) for file {:?} to peer {:?}", - current_batch.len(), - current_batch_size, - file_key, - peer_id - ); - - // Generate proof for the final batch - let proof = match self - .storage_hub_handler - .file_storage - .read() - .await - .generate_proof(&file_key, ¤t_batch) - { - Ok(proof) => proof, + return Err(anyhow::anyhow!("Failed to send file {:?}", file_key)); + } Err(e) => { return Err(anyhow::anyhow!( - "Failed to generate proof for final batch of file {:?}\n Error: {:?}", - file_key, + "Failed to upload final batch to peer {:?} (Error: {:?})", + peer_id, e )); } - }; - - let mut retry_attempts = 0; - loop { - let upload_response = self - .storage_hub_handler - .file_transfer - .upload_request(peer_id, file_key.as_ref().into(), proof.clone(), None) - .await; - - match upload_response { - Ok(r) => { - debug!( - target: LOG_TARGET, - "Successfully uploaded final batch for file {:?} to peer {:?}", - file_metadata.fingerprint, - peer_id - ); - - if r.file_complete { - info!( - target: LOG_TARGET, - "File upload complete. Peer {:?} has the entire file {:?}", - peer_id, - file_metadata.fingerprint - ); - } - break; - } - Err(RequestError::RequestFailure(RequestFailure::Refused)) - if retry_attempts < 3 => - { - warn!( - target: LOG_TARGET, - "Final batch upload rejected by peer {:?}, retrying... (attempt {})", - peer_id, - retry_attempts + 1 - ); - retry_attempts += 1; - - // Wait for a short time before retrying - tokio::time::sleep(std::time::Duration::from_secs(1)).await; - } - Err(RequestError::RequestFailure(RequestFailure::Refused)) => { - // If MSP doesnt receive file, the burden of downloading the file will be on the MSP. - return Err(anyhow::anyhow!("Failed to send file {:?}", file_key)); - } - Err(e) => { - error!( - target: LOG_TARGET, - "Failed to upload final batch to peer {:?}\n Error: {:?}", - peer_id, - e - ); - break; - } - } } } } - - info!(target: LOG_TARGET, "Successfully sent file {:?} to peer {:?}", file_metadata.fingerprint, peer_id); - return Ok(()); } - Err(anyhow::anyhow!( - "Failed to send file {:?} to any of the peers", - file_metadata.fingerprint - )) + info!(target: LOG_TARGET, "Successfully sent file {:?} to peer {:?}", file_metadata.fingerprint, peer_id); + return Ok(()); } } diff --git a/test/suites/integration/user/send-file-to-provider.test.ts b/test/suites/integration/user/send-file-to-provider.test.ts new file mode 100644 index 000000000..55677418f --- /dev/null +++ b/test/suites/integration/user/send-file-to-provider.test.ts @@ -0,0 +1,144 @@ +import { describeMspNet, shUser, type EnrichedBspApi, getContainerIp, addMspContainer, mspThreeKey, createBucket } from "../../../util"; +import { CAPACITY, MAX_STORAGE_CAPACITY } from "../../../util/bspNet/consts.ts"; + +describeMspNet("User: Send file to provider", ({ before, createUserApi, it }) => { + let userApi: EnrichedBspApi; + + before(async () => { + userApi = await createUserApi(); + }); + + it("MSP is down and user should show error logs", async () => { + const source = "res/smile.jpg"; + const destination = "test/smile.jpg"; + const bucketName = "enron"; + + const newBucketEventEvent = await userApi.createBucket(bucketName); + const newBucketEventDataBlob = + userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; + + if (!newBucketEventDataBlob) { + throw new Error("Event doesn't match Type"); + } + + await userApi.rpc.storagehubclient.loadFileInStorage( + source, + destination, + userApi.shConsts.NODE_INFOS.user.AddressId, + newBucketEventDataBlob.bucketId + ); + + await userApi.docker.pauseBspContainer(userApi.shConsts.NODE_INFOS.msp1.containerName); + + await userApi.block.seal({ + calls: [ + userApi.tx.fileSystem.issueStorageRequest( + newBucketEventDataBlob.bucketId, + destination, + userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].fingerprint, + userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].size, + userApi.shConsts.DUMMY_MSP_ID, + [userApi.shConsts.NODE_INFOS.user.expectedPeerId], + { + Basic: null + } + ) + ], + signer: shUser + }); + + await userApi.assert.eventPresent("fileSystem", "NewStorageRequest"); + + await userApi.docker.waitForLog({ searchString: "Failed to send file", containerName: userApi.shConsts.NODE_INFOS.user.containerName }); + }); + + it("MSP first multihash is wrong and second should be correct. User will be able to connect and send file.", async () => { + const { containerName, rpcPort, p2pPort, peerId } = await addMspContainer({ + name: "lola1", + additionalArgs: [ + "--database=rocksdb", + `--max-storage-capacity=${MAX_STORAGE_CAPACITY}`, + `--jump-capacity=${CAPACITY[1024]}`, + "--msp-charging-period=12" + ] + }); + + //Give it some balance. + const amount = 10000n * 10n ** 12n; + await userApi.block.seal({ calls: [userApi.tx.sudo.sudo(userApi.tx.balances.forceSetBalance(mspThreeKey.address, amount))] }); + + const mspIp = await getContainerIp(containerName); + const multiAddressMsp = `/ip4/${mspIp}/tcp/${p2pPort}/p2p/${peerId}`; + await userApi.block.seal({ + calls: [ + userApi.tx.sudo.sudo( + userApi.tx.providers.forceMspSignUp( + mspThreeKey.address, + mspThreeKey.publicKey, + userApi.shConsts.CAPACITY_512, + [`/ip4/51.75.30.194/tcp/30350/p2p/${peerId}`, multiAddressMsp], + 100 * 1024 * 1024, + "Terms of Service...", + 9999999, + mspThreeKey.address + ) + ) + ] + }); + + const source = "res/smile.jpg"; + const destination = "test/smile.jpg"; + const bucketName = "theranos"; + + const valueProps = await userApi.call.storageProvidersApi.queryValuePropositionsForMsp( + mspThreeKey.publicKey + ); + + const localValuePropId = valueProps[0].id; + const newBucketEventEvent = await createBucket( + userApi, + bucketName, // Bucket name + localValuePropId, // Value proposition ID from MSP + `0xc0647914b37034d861ddc3f0750ded6defec0823de5c782f3ca7c64ba29a4a2e`, // We got with cyberchef + shUser // Owner (the user) + ); + const newBucketEventDataBlob = + userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; + + if (!newBucketEventDataBlob) { + throw new Error("Event doesn't match Type"); + } + + await userApi.rpc.storagehubclient.loadFileInStorage( + source, + destination, + userApi.shConsts.NODE_INFOS.user.AddressId, + newBucketEventDataBlob.bucketId + ); + + await userApi.block.seal({ + calls: [ + userApi.tx.fileSystem.issueStorageRequest( + newBucketEventDataBlob.bucketId, + destination, + userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].fingerprint, + userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].size, + mspThreeKey.publicKey, + [userApi.shConsts.NODE_INFOS.user.expectedPeerId], + { + Basic: null + } + ) + ], + signer: shUser + }); + + await userApi.assert.eventPresent("fileSystem", "NewStorageRequest"); + + // Fail to connect to the first libp2p address because it is a phony one. + await userApi.docker.waitForLog({ searchString: "Failed to upload batch to peer", containerName: userApi.shConsts.NODE_INFOS.user.containerName }); + + // Second libp2p address is the right one so we should successfully send the file through this one. + await userApi.docker.waitForLog({ searchString: "File upload complete.", containerName: userApi.shConsts.NODE_INFOS.user.containerName }); + }) +}); diff --git a/test/util/bspNet/docker.ts b/test/util/bspNet/docker.ts index 82e1e8e7e..fa6156b7a 100644 --- a/test/util/bspNet/docker.ts +++ b/test/util/bspNet/docker.ts @@ -140,6 +140,104 @@ export const addBspContainer = async (options?: { return { containerName, rpcPort, p2pPort, peerId }; }; +export const addMspContainer = async (options?: { + name?: string; + connectToPeer?: boolean; // unused + additionalArgs?: string[]; +}) => { + const docker = new Docker(); + const existingMsps = ( + await docker.listContainers({ + filters: { ancestor: [DOCKER_IMAGE] } + }) + ) + .flatMap(({ Command }) => Command) + .filter((cmd) => cmd.includes("--provider-type=msp")); + + const bspNum = existingMsps.length; + + assert(bspNum > 0, "No existing MSP containers"); + + const p2pPort = 30350 + bspNum; + const rpcPort = 9888 + bspNum * 7; + const containerName = options?.name || `docker-sh-msp-${bspNum + 1}`; + // get bootnode from docker args + + const { Args } = await docker.getContainer("docker-sh-user-1").inspect(); + + const bootNodeArg = Args.find((arg) => arg.includes("--bootnodes=")); + + assert(bootNodeArg, "No bootnode found in docker args"); + + let keystorePath: string; + const keystoreArg = Args.find((arg) => arg.includes("--keystore-path=")); + if (keystoreArg) { + keystorePath = keystoreArg.split("=")[1]; + } else { + keystorePath = "/keystore"; + } + + const container = await docker.createContainer({ + Image: DOCKER_IMAGE, + name: containerName, + platform: "linux/amd64", + NetworkingConfig: { + EndpointsConfig: { + docker_default: {} + } + }, + HostConfig: { + PortBindings: { + "9944/tcp": [{ HostPort: rpcPort.toString() }], + [`${p2pPort}/tcp`]: [{ HostPort: p2pPort.toString() }] + }, + Binds: [`${process.cwd()}/../docker/dev-keystores:${keystorePath}:rw`] + }, + Cmd: [ + "--dev", + "--sealing=manual", + "--provider", + "--provider-type=msp", + `--name=${containerName}`, + "--no-hardware-benchmarks", + "--unsafe-rpc-external", + "--rpc-methods=unsafe", + "--rpc-cors=all", + `--port=${p2pPort}`, + "--base-path=/data", + bootNodeArg, + ...(options?.additionalArgs || []) + ] + }); + await container.start(); + + let peerId: string | undefined; + for (let i = 0; i < 200; i++) { + try { + peerId = await sendCustomRpc(`http://127.0.0.1:${rpcPort}`, "system_localPeerId"); + break; + } catch { + await sleep(50); + } + } + + assert(peerId, "Failed to connect after 10s. Exiting..."); + + const api = await BspNetTestApi.create(`ws://127.0.0.1:${rpcPort}`); + + const chainName = api.consts.system.version.specName.toString(); + + assert(chainName === "storage-hub-runtime", `Error connecting to MSP via api ${containerName}`); + + await api.disconnect(); + + console.log( + `▶️ MSP container started with name: ${containerName}, rpc port: ${rpcPort}, p2p port: ${p2pPort}, peerId: ${peerId}` + ); + + return { containerName, rpcPort, p2pPort, peerId }; +}; + // Make this a rusty style OO function with api contexts export const pauseBspContainer = async (containerName: string) => { const docker = new Docker(); diff --git a/test/util/pjsKeyring.ts b/test/util/pjsKeyring.ts index b1b19d630..f67220fa0 100644 --- a/test/util/pjsKeyring.ts +++ b/test/util/pjsKeyring.ts @@ -36,6 +36,8 @@ export const mspDownSeed = "//Sh-MSP-Down"; export const mspDownKey = keyring.addFromUri(mspDownSeed, { name: "Sh-MSP-Down" }); export const mspTwoSeed = "//Sh-MSP-Two"; export const mspTwoKey = keyring.addFromUri(mspTwoSeed, { name: "Sh-MSP-Two" }); +export const mspThreeSeed = "//Sh-MSP-Three"; +export const mspThreeKey = keyring.addFromUri(mspThreeSeed, { name: "Sh-MSP-Three" }); export const collator = keyring.addFromUri("//Sh-collator", { name: "Sh-collator" From 37cdf9564a83b6524507610f2f46789d796ac1a4 Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Mon, 17 Feb 2025 17:14:56 +0100 Subject: [PATCH 3/6] linter --- .../user/send-file-to-provider.test.ts | 291 ++++++++++-------- 1 file changed, 156 insertions(+), 135 deletions(-) diff --git a/test/suites/integration/user/send-file-to-provider.test.ts b/test/suites/integration/user/send-file-to-provider.test.ts index 55677418f..e7558d09e 100644 --- a/test/suites/integration/user/send-file-to-provider.test.ts +++ b/test/suites/integration/user/send-file-to-provider.test.ts @@ -1,144 +1,165 @@ -import { describeMspNet, shUser, type EnrichedBspApi, getContainerIp, addMspContainer, mspThreeKey, createBucket } from "../../../util"; +import { + describeMspNet, + shUser, + type EnrichedBspApi, + getContainerIp, + addMspContainer, + mspThreeKey, + createBucket +} from "../../../util"; import { CAPACITY, MAX_STORAGE_CAPACITY } from "../../../util/bspNet/consts.ts"; describeMspNet("User: Send file to provider", ({ before, createUserApi, it }) => { - let userApi: EnrichedBspApi; + let userApi: EnrichedBspApi; + + before(async () => { + userApi = await createUserApi(); + }); + + it("MSP is down and user should show error logs", async () => { + const source = "res/smile.jpg"; + const destination = "test/smile.jpg"; + const bucketName = "enron"; + + const newBucketEventEvent = await userApi.createBucket(bucketName); + const newBucketEventDataBlob = + userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; + + if (!newBucketEventDataBlob) { + throw new Error("Event doesn't match Type"); + } + + await userApi.rpc.storagehubclient.loadFileInStorage( + source, + destination, + userApi.shConsts.NODE_INFOS.user.AddressId, + newBucketEventDataBlob.bucketId + ); + + await userApi.docker.pauseBspContainer(userApi.shConsts.NODE_INFOS.msp1.containerName); + + await userApi.block.seal({ + calls: [ + userApi.tx.fileSystem.issueStorageRequest( + newBucketEventDataBlob.bucketId, + destination, + userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].fingerprint, + userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].size, + userApi.shConsts.DUMMY_MSP_ID, + [userApi.shConsts.NODE_INFOS.user.expectedPeerId], + { + Basic: null + } + ) + ], + signer: shUser + }); + + await userApi.assert.eventPresent("fileSystem", "NewStorageRequest"); + + await userApi.docker.waitForLog({ + searchString: "Failed to send file", + containerName: userApi.shConsts.NODE_INFOS.user.containerName + }); + }); + + it("MSP first multihash is wrong and second should be correct. User will be able to connect and send file.", async () => { + const { containerName, rpcPort, p2pPort, peerId } = await addMspContainer({ + name: "lola1", + additionalArgs: [ + "--database=rocksdb", + `--max-storage-capacity=${MAX_STORAGE_CAPACITY}`, + `--jump-capacity=${CAPACITY[1024]}`, + "--msp-charging-period=12" + ] + }); - before(async () => { - userApi = await createUserApi(); + //Give it some balance. + const amount = 10000n * 10n ** 12n; + await userApi.block.seal({ + calls: [ + userApi.tx.sudo.sudo(userApi.tx.balances.forceSetBalance(mspThreeKey.address, amount)) + ] }); - it("MSP is down and user should show error logs", async () => { - const source = "res/smile.jpg"; - const destination = "test/smile.jpg"; - const bucketName = "enron"; - - const newBucketEventEvent = await userApi.createBucket(bucketName); - const newBucketEventDataBlob = - userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; - - if (!newBucketEventDataBlob) { - throw new Error("Event doesn't match Type"); - } - - await userApi.rpc.storagehubclient.loadFileInStorage( - source, - destination, - userApi.shConsts.NODE_INFOS.user.AddressId, - newBucketEventDataBlob.bucketId - ); - - await userApi.docker.pauseBspContainer(userApi.shConsts.NODE_INFOS.msp1.containerName); - - await userApi.block.seal({ - calls: [ - userApi.tx.fileSystem.issueStorageRequest( - newBucketEventDataBlob.bucketId, - destination, - userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].fingerprint, - userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].size, - userApi.shConsts.DUMMY_MSP_ID, - [userApi.shConsts.NODE_INFOS.user.expectedPeerId], - { - Basic: null - } - ) - ], - signer: shUser - }); - - await userApi.assert.eventPresent("fileSystem", "NewStorageRequest"); - - await userApi.docker.waitForLog({ searchString: "Failed to send file", containerName: userApi.shConsts.NODE_INFOS.user.containerName }); + const mspIp = await getContainerIp(containerName); + const multiAddressMsp = `/ip4/${mspIp}/tcp/${p2pPort}/p2p/${peerId}`; + await userApi.block.seal({ + calls: [ + userApi.tx.sudo.sudo( + userApi.tx.providers.forceMspSignUp( + mspThreeKey.address, + mspThreeKey.publicKey, + userApi.shConsts.CAPACITY_512, + [`/ip4/51.75.30.194/tcp/30350/p2p/${peerId}`, multiAddressMsp], + 100 * 1024 * 1024, + "Terms of Service...", + 9999999, + mspThreeKey.address + ) + ) + ] }); - it("MSP first multihash is wrong and second should be correct. User will be able to connect and send file.", async () => { - const { containerName, rpcPort, p2pPort, peerId } = await addMspContainer({ - name: "lola1", - additionalArgs: [ - "--database=rocksdb", - `--max-storage-capacity=${MAX_STORAGE_CAPACITY}`, - `--jump-capacity=${CAPACITY[1024]}`, - "--msp-charging-period=12" - ] - }); - - //Give it some balance. - const amount = 10000n * 10n ** 12n; - await userApi.block.seal({ calls: [userApi.tx.sudo.sudo(userApi.tx.balances.forceSetBalance(mspThreeKey.address, amount))] }); - - const mspIp = await getContainerIp(containerName); - const multiAddressMsp = `/ip4/${mspIp}/tcp/${p2pPort}/p2p/${peerId}`; - await userApi.block.seal({ - calls: [ - userApi.tx.sudo.sudo( - userApi.tx.providers.forceMspSignUp( - mspThreeKey.address, - mspThreeKey.publicKey, - userApi.shConsts.CAPACITY_512, - [`/ip4/51.75.30.194/tcp/30350/p2p/${peerId}`, multiAddressMsp], - 100 * 1024 * 1024, - "Terms of Service...", - 9999999, - mspThreeKey.address - ) - ) - ] - }); - - const source = "res/smile.jpg"; - const destination = "test/smile.jpg"; - const bucketName = "theranos"; - - const valueProps = await userApi.call.storageProvidersApi.queryValuePropositionsForMsp( - mspThreeKey.publicKey - ); - - const localValuePropId = valueProps[0].id; - const newBucketEventEvent = await createBucket( - userApi, - bucketName, // Bucket name - localValuePropId, // Value proposition ID from MSP - `0xc0647914b37034d861ddc3f0750ded6defec0823de5c782f3ca7c64ba29a4a2e`, // We got with cyberchef - shUser // Owner (the user) - ); - const newBucketEventDataBlob = - userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; - - if (!newBucketEventDataBlob) { - throw new Error("Event doesn't match Type"); - } - - await userApi.rpc.storagehubclient.loadFileInStorage( - source, - destination, - userApi.shConsts.NODE_INFOS.user.AddressId, - newBucketEventDataBlob.bucketId - ); - - await userApi.block.seal({ - calls: [ - userApi.tx.fileSystem.issueStorageRequest( - newBucketEventDataBlob.bucketId, - destination, - userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].fingerprint, - userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].size, - mspThreeKey.publicKey, - [userApi.shConsts.NODE_INFOS.user.expectedPeerId], - { - Basic: null - } - ) - ], - signer: shUser - }); - - await userApi.assert.eventPresent("fileSystem", "NewStorageRequest"); - - // Fail to connect to the first libp2p address because it is a phony one. - await userApi.docker.waitForLog({ searchString: "Failed to upload batch to peer", containerName: userApi.shConsts.NODE_INFOS.user.containerName }); - - // Second libp2p address is the right one so we should successfully send the file through this one. - await userApi.docker.waitForLog({ searchString: "File upload complete.", containerName: userApi.shConsts.NODE_INFOS.user.containerName }); - }) + const source = "res/smile.jpg"; + const destination = "test/smile.jpg"; + const bucketName = "theranos"; + + const valueProps = await userApi.call.storageProvidersApi.queryValuePropositionsForMsp( + mspThreeKey.publicKey + ); + + const localValuePropId = valueProps[0].id; + const newBucketEventEvent = await createBucket( + userApi, + bucketName, // Bucket name + localValuePropId, // Value proposition ID from MSP + "0xc0647914b37034d861ddc3f0750ded6defec0823de5c782f3ca7c64ba29a4a2e", // We got with cyberchef + shUser // Owner (the user) + ); + const newBucketEventDataBlob = + userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; + + if (!newBucketEventDataBlob) { + throw new Error("Event doesn't match Type"); + } + + await userApi.rpc.storagehubclient.loadFileInStorage( + source, + destination, + userApi.shConsts.NODE_INFOS.user.AddressId, + newBucketEventDataBlob.bucketId + ); + + await userApi.block.seal({ + calls: [ + userApi.tx.fileSystem.issueStorageRequest( + newBucketEventDataBlob.bucketId, + destination, + userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].fingerprint, + userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].size, + mspThreeKey.publicKey, + [userApi.shConsts.NODE_INFOS.user.expectedPeerId], + { + Basic: null + } + ) + ], + signer: shUser + }); + + await userApi.assert.eventPresent("fileSystem", "NewStorageRequest"); + + // Fail to connect to the first libp2p address because it is a phony one. + await userApi.docker.waitForLog({ + searchString: "Failed to upload batch to peer", + containerName: userApi.shConsts.NODE_INFOS.user.containerName + }); + + // Second libp2p address is the right one so we should successfully send the file through this one. + await userApi.docker.waitForLog({ + searchString: "File upload complete.", + containerName: userApi.shConsts.NODE_INFOS.user.containerName + }); + }); }); From 33a5b2dd3ce091e2196b421390dea3321fa34110 Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Tue, 18 Feb 2025 09:22:52 +0100 Subject: [PATCH 4/6] clean unused variable --- .../user/send-file-to-provider.test.ts | 306 +++++++++--------- 1 file changed, 153 insertions(+), 153 deletions(-) diff --git a/test/suites/integration/user/send-file-to-provider.test.ts b/test/suites/integration/user/send-file-to-provider.test.ts index e7558d09e..60dfe2dae 100644 --- a/test/suites/integration/user/send-file-to-provider.test.ts +++ b/test/suites/integration/user/send-file-to-provider.test.ts @@ -1,165 +1,165 @@ import { - describeMspNet, - shUser, - type EnrichedBspApi, - getContainerIp, - addMspContainer, - mspThreeKey, - createBucket + describeMspNet, + shUser, + type EnrichedBspApi, + getContainerIp, + addMspContainer, + mspThreeKey, + createBucket } from "../../../util"; import { CAPACITY, MAX_STORAGE_CAPACITY } from "../../../util/bspNet/consts.ts"; describeMspNet("User: Send file to provider", ({ before, createUserApi, it }) => { - let userApi: EnrichedBspApi; - - before(async () => { - userApi = await createUserApi(); - }); - - it("MSP is down and user should show error logs", async () => { - const source = "res/smile.jpg"; - const destination = "test/smile.jpg"; - const bucketName = "enron"; - - const newBucketEventEvent = await userApi.createBucket(bucketName); - const newBucketEventDataBlob = - userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; - - if (!newBucketEventDataBlob) { - throw new Error("Event doesn't match Type"); - } - - await userApi.rpc.storagehubclient.loadFileInStorage( - source, - destination, - userApi.shConsts.NODE_INFOS.user.AddressId, - newBucketEventDataBlob.bucketId - ); - - await userApi.docker.pauseBspContainer(userApi.shConsts.NODE_INFOS.msp1.containerName); - - await userApi.block.seal({ - calls: [ - userApi.tx.fileSystem.issueStorageRequest( - newBucketEventDataBlob.bucketId, - destination, - userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].fingerprint, - userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].size, - userApi.shConsts.DUMMY_MSP_ID, - [userApi.shConsts.NODE_INFOS.user.expectedPeerId], - { - Basic: null - } - ) - ], - signer: shUser - }); - - await userApi.assert.eventPresent("fileSystem", "NewStorageRequest"); - - await userApi.docker.waitForLog({ - searchString: "Failed to send file", - containerName: userApi.shConsts.NODE_INFOS.user.containerName - }); - }); - - it("MSP first multihash is wrong and second should be correct. User will be able to connect and send file.", async () => { - const { containerName, rpcPort, p2pPort, peerId } = await addMspContainer({ - name: "lola1", - additionalArgs: [ - "--database=rocksdb", - `--max-storage-capacity=${MAX_STORAGE_CAPACITY}`, - `--jump-capacity=${CAPACITY[1024]}`, - "--msp-charging-period=12" - ] - }); + let userApi: EnrichedBspApi; - //Give it some balance. - const amount = 10000n * 10n ** 12n; - await userApi.block.seal({ - calls: [ - userApi.tx.sudo.sudo(userApi.tx.balances.forceSetBalance(mspThreeKey.address, amount)) - ] + before(async () => { + userApi = await createUserApi(); }); - const mspIp = await getContainerIp(containerName); - const multiAddressMsp = `/ip4/${mspIp}/tcp/${p2pPort}/p2p/${peerId}`; - await userApi.block.seal({ - calls: [ - userApi.tx.sudo.sudo( - userApi.tx.providers.forceMspSignUp( - mspThreeKey.address, - mspThreeKey.publicKey, - userApi.shConsts.CAPACITY_512, - [`/ip4/51.75.30.194/tcp/30350/p2p/${peerId}`, multiAddressMsp], - 100 * 1024 * 1024, - "Terms of Service...", - 9999999, - mspThreeKey.address - ) - ) - ] - }); - - const source = "res/smile.jpg"; - const destination = "test/smile.jpg"; - const bucketName = "theranos"; - - const valueProps = await userApi.call.storageProvidersApi.queryValuePropositionsForMsp( - mspThreeKey.publicKey - ); - - const localValuePropId = valueProps[0].id; - const newBucketEventEvent = await createBucket( - userApi, - bucketName, // Bucket name - localValuePropId, // Value proposition ID from MSP - "0xc0647914b37034d861ddc3f0750ded6defec0823de5c782f3ca7c64ba29a4a2e", // We got with cyberchef - shUser // Owner (the user) - ); - const newBucketEventDataBlob = - userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; - - if (!newBucketEventDataBlob) { - throw new Error("Event doesn't match Type"); - } - - await userApi.rpc.storagehubclient.loadFileInStorage( - source, - destination, - userApi.shConsts.NODE_INFOS.user.AddressId, - newBucketEventDataBlob.bucketId - ); - - await userApi.block.seal({ - calls: [ - userApi.tx.fileSystem.issueStorageRequest( - newBucketEventDataBlob.bucketId, - destination, - userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].fingerprint, - userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].size, - mspThreeKey.publicKey, - [userApi.shConsts.NODE_INFOS.user.expectedPeerId], - { - Basic: null - } - ) - ], - signer: shUser - }); - - await userApi.assert.eventPresent("fileSystem", "NewStorageRequest"); - - // Fail to connect to the first libp2p address because it is a phony one. - await userApi.docker.waitForLog({ - searchString: "Failed to upload batch to peer", - containerName: userApi.shConsts.NODE_INFOS.user.containerName + it("MSP is down and user should show error logs", async () => { + const source = "res/smile.jpg"; + const destination = "test/smile.jpg"; + const bucketName = "enron"; + + const newBucketEventEvent = await userApi.createBucket(bucketName); + const newBucketEventDataBlob = + userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; + + if (!newBucketEventDataBlob) { + throw new Error("Event doesn't match Type"); + } + + await userApi.rpc.storagehubclient.loadFileInStorage( + source, + destination, + userApi.shConsts.NODE_INFOS.user.AddressId, + newBucketEventDataBlob.bucketId + ); + + await userApi.docker.pauseBspContainer(userApi.shConsts.NODE_INFOS.msp1.containerName); + + await userApi.block.seal({ + calls: [ + userApi.tx.fileSystem.issueStorageRequest( + newBucketEventDataBlob.bucketId, + destination, + userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].fingerprint, + userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].size, + userApi.shConsts.DUMMY_MSP_ID, + [userApi.shConsts.NODE_INFOS.user.expectedPeerId], + { + Basic: null + } + ) + ], + signer: shUser + }); + + await userApi.assert.eventPresent("fileSystem", "NewStorageRequest"); + + await userApi.docker.waitForLog({ + searchString: "Failed to send file", + containerName: userApi.shConsts.NODE_INFOS.user.containerName + }); }); - // Second libp2p address is the right one so we should successfully send the file through this one. - await userApi.docker.waitForLog({ - searchString: "File upload complete.", - containerName: userApi.shConsts.NODE_INFOS.user.containerName + it("MSP first multihash is wrong and second should be correct. User will be able to connect and send file.", async () => { + const { containerName, p2pPort, peerId } = await addMspContainer({ + name: "lola1", + additionalArgs: [ + "--database=rocksdb", + `--max-storage-capacity=${MAX_STORAGE_CAPACITY}`, + `--jump-capacity=${CAPACITY[1024]}`, + "--msp-charging-period=12" + ] + }); + + //Give it some balance. + const amount = 10000n * 10n ** 12n; + await userApi.block.seal({ + calls: [ + userApi.tx.sudo.sudo(userApi.tx.balances.forceSetBalance(mspThreeKey.address, amount)) + ] + }); + + const mspIp = await getContainerIp(containerName); + const multiAddressMsp = `/ip4/${mspIp}/tcp/${p2pPort}/p2p/${peerId}`; + await userApi.block.seal({ + calls: [ + userApi.tx.sudo.sudo( + userApi.tx.providers.forceMspSignUp( + mspThreeKey.address, + mspThreeKey.publicKey, + userApi.shConsts.CAPACITY_512, + [`/ip4/51.75.30.194/tcp/30350/p2p/${peerId}`, multiAddressMsp], + 100 * 1024 * 1024, + "Terms of Service...", + 9999999, + mspThreeKey.address + ) + ) + ] + }); + + const source = "res/smile.jpg"; + const destination = "test/smile.jpg"; + const bucketName = "theranos"; + + const valueProps = await userApi.call.storageProvidersApi.queryValuePropositionsForMsp( + mspThreeKey.publicKey + ); + + const localValuePropId = valueProps[0].id; + const newBucketEventEvent = await createBucket( + userApi, + bucketName, // Bucket name + localValuePropId, // Value proposition ID from MSP + "0xc0647914b37034d861ddc3f0750ded6defec0823de5c782f3ca7c64ba29a4a2e", // We got with cyberchef + shUser // Owner (the user) + ); + const newBucketEventDataBlob = + userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; + + if (!newBucketEventDataBlob) { + throw new Error("Event doesn't match Type"); + } + + await userApi.rpc.storagehubclient.loadFileInStorage( + source, + destination, + userApi.shConsts.NODE_INFOS.user.AddressId, + newBucketEventDataBlob.bucketId + ); + + await userApi.block.seal({ + calls: [ + userApi.tx.fileSystem.issueStorageRequest( + newBucketEventDataBlob.bucketId, + destination, + userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].fingerprint, + userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].size, + mspThreeKey.publicKey, + [userApi.shConsts.NODE_INFOS.user.expectedPeerId], + { + Basic: null + } + ) + ], + signer: shUser + }); + + await userApi.assert.eventPresent("fileSystem", "NewStorageRequest"); + + // Fail to connect to the first libp2p address because it is a phony one. + await userApi.docker.waitForLog({ + searchString: "Failed to upload batch to peer", + containerName: userApi.shConsts.NODE_INFOS.user.containerName + }); + + // Second libp2p address is the right one so we should successfully send the file through this one. + await userApi.docker.waitForLog({ + searchString: "File upload complete.", + containerName: userApi.shConsts.NODE_INFOS.user.containerName + }); }); - }); }); From 95eff04d89b8bf02fa93317f73c38aad4e2f06a6 Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Tue, 18 Feb 2025 14:48:27 +0100 Subject: [PATCH 5/6] linter --- .../user/send-file-to-provider.test.ts | 306 +++++++++--------- 1 file changed, 153 insertions(+), 153 deletions(-) diff --git a/test/suites/integration/user/send-file-to-provider.test.ts b/test/suites/integration/user/send-file-to-provider.test.ts index 60dfe2dae..2504e641d 100644 --- a/test/suites/integration/user/send-file-to-provider.test.ts +++ b/test/suites/integration/user/send-file-to-provider.test.ts @@ -1,165 +1,165 @@ import { - describeMspNet, - shUser, - type EnrichedBspApi, - getContainerIp, - addMspContainer, - mspThreeKey, - createBucket + describeMspNet, + shUser, + type EnrichedBspApi, + getContainerIp, + addMspContainer, + mspThreeKey, + createBucket } from "../../../util"; import { CAPACITY, MAX_STORAGE_CAPACITY } from "../../../util/bspNet/consts.ts"; describeMspNet("User: Send file to provider", ({ before, createUserApi, it }) => { - let userApi: EnrichedBspApi; + let userApi: EnrichedBspApi; + + before(async () => { + userApi = await createUserApi(); + }); + + it("MSP is down and user should show error logs", async () => { + const source = "res/smile.jpg"; + const destination = "test/smile.jpg"; + const bucketName = "enron"; + + const newBucketEventEvent = await userApi.createBucket(bucketName); + const newBucketEventDataBlob = + userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; + + if (!newBucketEventDataBlob) { + throw new Error("Event doesn't match Type"); + } + + await userApi.rpc.storagehubclient.loadFileInStorage( + source, + destination, + userApi.shConsts.NODE_INFOS.user.AddressId, + newBucketEventDataBlob.bucketId + ); + + await userApi.docker.pauseBspContainer(userApi.shConsts.NODE_INFOS.msp1.containerName); + + await userApi.block.seal({ + calls: [ + userApi.tx.fileSystem.issueStorageRequest( + newBucketEventDataBlob.bucketId, + destination, + userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].fingerprint, + userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].size, + userApi.shConsts.DUMMY_MSP_ID, + [userApi.shConsts.NODE_INFOS.user.expectedPeerId], + { + Basic: null + } + ) + ], + signer: shUser + }); + + await userApi.assert.eventPresent("fileSystem", "NewStorageRequest"); + + await userApi.docker.waitForLog({ + searchString: "Failed to send file", + containerName: userApi.shConsts.NODE_INFOS.user.containerName + }); + }); + + it("MSP first multihash is wrong and second should be correct. User will be able to connect and send file.", async () => { + const { containerName, p2pPort, peerId } = await addMspContainer({ + name: "lola1", + additionalArgs: [ + "--database=rocksdb", + `--max-storage-capacity=${MAX_STORAGE_CAPACITY}`, + `--jump-capacity=${CAPACITY[1024]}`, + "--msp-charging-period=12" + ] + }); - before(async () => { - userApi = await createUserApi(); + //Give it some balance. + const amount = 10000n * 10n ** 12n; + await userApi.block.seal({ + calls: [ + userApi.tx.sudo.sudo(userApi.tx.balances.forceSetBalance(mspThreeKey.address, amount)) + ] }); - it("MSP is down and user should show error logs", async () => { - const source = "res/smile.jpg"; - const destination = "test/smile.jpg"; - const bucketName = "enron"; - - const newBucketEventEvent = await userApi.createBucket(bucketName); - const newBucketEventDataBlob = - userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; - - if (!newBucketEventDataBlob) { - throw new Error("Event doesn't match Type"); - } - - await userApi.rpc.storagehubclient.loadFileInStorage( - source, - destination, - userApi.shConsts.NODE_INFOS.user.AddressId, - newBucketEventDataBlob.bucketId - ); - - await userApi.docker.pauseBspContainer(userApi.shConsts.NODE_INFOS.msp1.containerName); - - await userApi.block.seal({ - calls: [ - userApi.tx.fileSystem.issueStorageRequest( - newBucketEventDataBlob.bucketId, - destination, - userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].fingerprint, - userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].size, - userApi.shConsts.DUMMY_MSP_ID, - [userApi.shConsts.NODE_INFOS.user.expectedPeerId], - { - Basic: null - } - ) - ], - signer: shUser - }); - - await userApi.assert.eventPresent("fileSystem", "NewStorageRequest"); - - await userApi.docker.waitForLog({ - searchString: "Failed to send file", - containerName: userApi.shConsts.NODE_INFOS.user.containerName - }); + const mspIp = await getContainerIp(containerName); + const multiAddressMsp = `/ip4/${mspIp}/tcp/${p2pPort}/p2p/${peerId}`; + await userApi.block.seal({ + calls: [ + userApi.tx.sudo.sudo( + userApi.tx.providers.forceMspSignUp( + mspThreeKey.address, + mspThreeKey.publicKey, + userApi.shConsts.CAPACITY_512, + [`/ip4/51.75.30.194/tcp/30350/p2p/${peerId}`, multiAddressMsp], + 100 * 1024 * 1024, + "Terms of Service...", + 9999999, + mspThreeKey.address + ) + ) + ] + }); + + const source = "res/smile.jpg"; + const destination = "test/smile.jpg"; + const bucketName = "theranos"; + + const valueProps = await userApi.call.storageProvidersApi.queryValuePropositionsForMsp( + mspThreeKey.publicKey + ); + + const localValuePropId = valueProps[0].id; + const newBucketEventEvent = await createBucket( + userApi, + bucketName, // Bucket name + localValuePropId, // Value proposition ID from MSP + "0xc0647914b37034d861ddc3f0750ded6defec0823de5c782f3ca7c64ba29a4a2e", // We got with cyberchef + shUser // Owner (the user) + ); + const newBucketEventDataBlob = + userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; + + if (!newBucketEventDataBlob) { + throw new Error("Event doesn't match Type"); + } + + await userApi.rpc.storagehubclient.loadFileInStorage( + source, + destination, + userApi.shConsts.NODE_INFOS.user.AddressId, + newBucketEventDataBlob.bucketId + ); + + await userApi.block.seal({ + calls: [ + userApi.tx.fileSystem.issueStorageRequest( + newBucketEventDataBlob.bucketId, + destination, + userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].fingerprint, + userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].size, + mspThreeKey.publicKey, + [userApi.shConsts.NODE_INFOS.user.expectedPeerId], + { + Basic: null + } + ) + ], + signer: shUser + }); + + await userApi.assert.eventPresent("fileSystem", "NewStorageRequest"); + + // Fail to connect to the first libp2p address because it is a phony one. + await userApi.docker.waitForLog({ + searchString: "Failed to upload batch to peer", + containerName: userApi.shConsts.NODE_INFOS.user.containerName }); - it("MSP first multihash is wrong and second should be correct. User will be able to connect and send file.", async () => { - const { containerName, p2pPort, peerId } = await addMspContainer({ - name: "lola1", - additionalArgs: [ - "--database=rocksdb", - `--max-storage-capacity=${MAX_STORAGE_CAPACITY}`, - `--jump-capacity=${CAPACITY[1024]}`, - "--msp-charging-period=12" - ] - }); - - //Give it some balance. - const amount = 10000n * 10n ** 12n; - await userApi.block.seal({ - calls: [ - userApi.tx.sudo.sudo(userApi.tx.balances.forceSetBalance(mspThreeKey.address, amount)) - ] - }); - - const mspIp = await getContainerIp(containerName); - const multiAddressMsp = `/ip4/${mspIp}/tcp/${p2pPort}/p2p/${peerId}`; - await userApi.block.seal({ - calls: [ - userApi.tx.sudo.sudo( - userApi.tx.providers.forceMspSignUp( - mspThreeKey.address, - mspThreeKey.publicKey, - userApi.shConsts.CAPACITY_512, - [`/ip4/51.75.30.194/tcp/30350/p2p/${peerId}`, multiAddressMsp], - 100 * 1024 * 1024, - "Terms of Service...", - 9999999, - mspThreeKey.address - ) - ) - ] - }); - - const source = "res/smile.jpg"; - const destination = "test/smile.jpg"; - const bucketName = "theranos"; - - const valueProps = await userApi.call.storageProvidersApi.queryValuePropositionsForMsp( - mspThreeKey.publicKey - ); - - const localValuePropId = valueProps[0].id; - const newBucketEventEvent = await createBucket( - userApi, - bucketName, // Bucket name - localValuePropId, // Value proposition ID from MSP - "0xc0647914b37034d861ddc3f0750ded6defec0823de5c782f3ca7c64ba29a4a2e", // We got with cyberchef - shUser // Owner (the user) - ); - const newBucketEventDataBlob = - userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; - - if (!newBucketEventDataBlob) { - throw new Error("Event doesn't match Type"); - } - - await userApi.rpc.storagehubclient.loadFileInStorage( - source, - destination, - userApi.shConsts.NODE_INFOS.user.AddressId, - newBucketEventDataBlob.bucketId - ); - - await userApi.block.seal({ - calls: [ - userApi.tx.fileSystem.issueStorageRequest( - newBucketEventDataBlob.bucketId, - destination, - userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].fingerprint, - userApi.shConsts.TEST_ARTEFACTS["res/smile.jpg"].size, - mspThreeKey.publicKey, - [userApi.shConsts.NODE_INFOS.user.expectedPeerId], - { - Basic: null - } - ) - ], - signer: shUser - }); - - await userApi.assert.eventPresent("fileSystem", "NewStorageRequest"); - - // Fail to connect to the first libp2p address because it is a phony one. - await userApi.docker.waitForLog({ - searchString: "Failed to upload batch to peer", - containerName: userApi.shConsts.NODE_INFOS.user.containerName - }); - - // Second libp2p address is the right one so we should successfully send the file through this one. - await userApi.docker.waitForLog({ - searchString: "File upload complete.", - containerName: userApi.shConsts.NODE_INFOS.user.containerName - }); + // Second libp2p address is the right one so we should successfully send the file through this one. + await userApi.docker.waitForLog({ + searchString: "File upload complete.", + containerName: userApi.shConsts.NODE_INFOS.user.containerName }); + }); }); From b4b53c7b62a46cca7bd5398ee0e62138ab71c3a7 Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Wed, 19 Feb 2025 13:01:11 +0100 Subject: [PATCH 6/6] minor changes --- node/src/tasks/user_sends_file.rs | 21 ++++++++----------- .../user/send-file-to-provider.test.ts | 2 +- 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/node/src/tasks/user_sends_file.rs b/node/src/tasks/user_sends_file.rs index 3bd6c8801..4e24764dd 100644 --- a/node/src/tasks/user_sends_file.rs +++ b/node/src/tasks/user_sends_file.rs @@ -137,8 +137,7 @@ where info!(target: LOG_TARGET, "No peers were found to receive file key {:?}", file_key); } - self.send_chunks_to_providers(peer_ids, &file_metadata) - .await + self.send_chunks_to_provider(peer_ids, &file_metadata).await } } @@ -184,8 +183,7 @@ where info!(target: LOG_TARGET, "No peers were found to receive file key {:?}", file_key); } - self.send_chunks_to_providers(peer_ids, &file_metadata) - .await + self.send_chunks_to_provider(peer_ids, &file_metadata).await } } @@ -193,7 +191,7 @@ impl UserSendsFileTask where NT: ShNodeType, { - async fn send_chunks_to_providers( + async fn send_chunks_to_provider( &mut self, peer_ids: Vec, file_metadata: &FileMetadata, @@ -209,12 +207,12 @@ where .await { Err(err) => { - // if sending chunk failed with one peer id, we try with the next one. + // If sending chunk failed with one peer id, we try with the next one. warn!(target: LOG_TARGET, "{:?}", err); continue; } Ok(()) => { - // if successfull our job is done. No need to try with the next peer id. + // If successful our job is done. No need to try with the next peer id. return Ok(()); } }; @@ -321,12 +319,12 @@ where tokio::time::sleep(std::time::Duration::from_secs(1)).await; } Err(RequestError::RequestFailure(RequestFailure::Refused)) => { - // If MSP doesn't receive file, the burden of downloading the file will be on the MSP. + // Return an error if the provider refused to answer. return Err(anyhow::anyhow!("Failed to send file {:?}", file_key)); } Err(e) => { return Err(anyhow::anyhow!( - "Failed to upload batch to peer {:?} (Error: {:?})", + "Unexpected error while trying to upload batch to peer {:?} (Error: {:?})", peer_id, e )); @@ -414,13 +412,12 @@ where tokio::time::sleep(std::time::Duration::from_secs(1)).await; } Err(RequestError::RequestFailure(RequestFailure::Refused)) => { - // If MSP doesn't receive file, the burden of downloading the file will be on the MSP. - + // Return an error if the provider refused to answer. return Err(anyhow::anyhow!("Failed to send file {:?}", file_key)); } Err(e) => { return Err(anyhow::anyhow!( - "Failed to upload final batch to peer {:?} (Error: {:?})", + "Unexpected error while trying to upload final batch to peer {:?} (Error: {:?})", peer_id, e )); diff --git a/test/suites/integration/user/send-file-to-provider.test.ts b/test/suites/integration/user/send-file-to-provider.test.ts index 2504e641d..96669cae2 100644 --- a/test/suites/integration/user/send-file-to-provider.test.ts +++ b/test/suites/integration/user/send-file-to-provider.test.ts @@ -63,7 +63,7 @@ describeMspNet("User: Send file to provider", ({ before, createUserApi, it }) => }); }); - it("MSP first multihash is wrong and second should be correct. User will be able to connect and send file.", async () => { + it("MSP first libp2p multiaddress is wrong and second should be correct. User will be able to connect and send file on the second attempt.", async () => { const { containerName, p2pPort, peerId } = await addMspContainer({ name: "lola1", additionalArgs: [