1

我正在尝试将文件上传到awsin rust,因为我正在使用 s3 rust 客户端,当这些部分是从单个rusoto_s3线程发送时,我设法让多部分上传代码工作,但是,这不是我想要的,我想上传大文件,我希望能够在多个线程中发送这些部分,为此,我做了一点谷歌搜索,我遇到了rayon

有关信息,分段上传的工作方式如下:

  1. 启动多部分 -> aws 将返回一个 ID
  2. 使用这个ID发送不同的part,传递文件chunk,part number -> aws会返回一个Etag
  3. 发送所有部分后,发送一个完整的上传请求,其中包含所有已完成的部分,因为数组包含Etag和 部分编号。

我是 rust 新手,来自 C++ 和 Java 背景,这是我的代码:

#[tokio::test]
async fn if_multipart_then_upload_multiparts_dicom() {
    let now = Instant::now();
    dotenv().ok();
    let local_filename = "./files/test_big.DCM";
    let destination_filename = "24_time_test.dcm";

    let mut file = std::fs::File::open(local_filename).unwrap();
    const CHUNK_SIZE: usize = 7_000_000;
    let mut buffer = Vec::with_capacity(CHUNK_SIZE);

    let client = super::get_client().await;
    let create_multipart_request = CreateMultipartUploadRequest {
        bucket: client.bucket_name.to_owned(),
        key: destination_filename.to_owned(),
        ..Default::default()
    };

    // Start the multipart upload and note the upload_id generated
    let response = client
        .s3
        .create_multipart_upload(create_multipart_request)
        .await
        .expect("Couldn't create multipart upload");
    let upload_id = response.upload_id.unwrap();

    // Create upload parts
    let create_upload_part = |body: Vec<u8>, part_number: i64| -> UploadPartRequest {
        UploadPartRequest {
            body: Some(body.into()),
            bucket: client.bucket_name.to_owned(),
            key: destination_filename.to_owned(),
            upload_id: upload_id.to_owned(),
            part_number: part_number,
            ..Default::default()
        }
    };

    let completed_parts = Arc::new(Mutex::new(vec![]));

    rayon::scope(|scope| {
        let mut part_number = 1;
        loop {
            let maximum_bytes_to_read = CHUNK_SIZE - buffer.len();
            println!("maximum_bytes_to_read: {}", maximum_bytes_to_read);
            file.by_ref()
                .take(maximum_bytes_to_read as u64)
                .read_to_end(&mut buffer)
                .unwrap();

            println!("length: {}", buffer.len());
            println!("part_number: {}", part_number);
            if buffer.len() == 0 {
                // The file has ended.
                break;
            }

            let next_buffer = Vec::with_capacity(CHUNK_SIZE);
            let data_to_send = buffer;
            let completed_parts_cloned = completed_parts.clone();
            scope.spawn(move |_| {
                let part = create_upload_part(data_to_send.to_vec(), part_number);
                {
                    let part_number = part.part_number;
                    let client = executor::block_on(super::get_client());
                    let response = executor::block_on(client.s3.upload_part(part));

                    completed_parts_cloned.lock().unwrap().push(CompletedPart {
                        e_tag: response
                            .expect("Couldn't complete multipart upload")
                            .e_tag
                            .clone(),
                        part_number: Some(part_number),
                    });
                }
            });

            buffer = next_buffer;
            part_number = part_number + 1;
        }
    });

    let completed_upload = CompletedMultipartUpload {
        parts: Some(completed_parts.lock().unwrap().to_vec()),
    };

    let complete_req = CompleteMultipartUploadRequest {
        bucket: client.bucket_name.to_owned(),
        key: destination_filename.to_owned(),
        upload_id: upload_id.to_owned(),
        multipart_upload: Some(completed_upload),
        ..Default::default()
    };
    client
        .s3
        .complete_multipart_upload(complete_req)
        .await
        .expect("Couldn't complete multipart upload");
    println!(
        "time taken: {}, with chunk:: {}",
        now.elapsed().as_secs(),
        CHUNK_SIZE
    );
}

这是我得到的输出和错误示例:

maximum_bytes_to_read: 7000000
length: 7000000
part_number: 1
maximum_bytes_to_read: 7000000
length: 7000000
part_number: 2
maximum_bytes_to_read: 7000000
thread '<unnamed>' panicked at 'there is no reactor running, must be called from the context of a Tokio 1.x runtime', C:\Users\DNDT\.cargo\registry\src\github.com-1ecc6299db9ec823\tokio-1.2.0\src\runtime\blocking\pool.rs:85:33
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
thread '<unnamed>' panicked at 'there is no reactor running, must be called from the context of a Tokio 1.x runtime', C:\Users\DNDT\.cargo\registry\src\github.com-1ecc6299db9ec823\tokio-1.2.0\src\runtime\blocking\pool.rs:85:33
length: 7000000

我用谷歌搜索了这个错误,但我并不清楚它的实际含义:

there is no reactor running, must be called from the context of Tokio runtime” 

这是我发现的: another question with the same error

还有一个问题

这似乎是一些兼容性问题,因为 s3 可能使用的某些版本tokio与我拥有的 tokio 版本不兼容。

以下是一些相关的依赖项:

tokio = { version = "1", features = ["full"] }
tokio-compat-02 = "0.1.2"
rusoto_s3 = "0.46.0"
rusoto_core = "0.46.0"
rusoto_credential = "0.46.0"
rayon = "1.5.0"

我认为主要问题在于实际上想要在线程中运行async代码。rayon我尝试async使用 将我的代码更改为阻塞代码executor::block_on,我还花了一些时间试图让编译器满意,我有多个线程都想写入,let completed_parts = Arc::new(Mutex::new(vec![]));所以我在这里做了一些克隆以使编译器满意。

另外,如果我用过的craes很重要,那么它们是:

extern crate dotenv;
extern crate tokio;
use bytes::Bytes;
use dotenv::dotenv;
use futures::executor;
use futures::*;
use rusoto_core::credential::{EnvironmentProvider, ProvideAwsCredentials};
use rusoto_s3::util::{PreSignedRequest, PreSignedRequestOption};
use rusoto_s3::PutObjectRequest;
use rusoto_s3::StreamingBody;
use rusoto_s3::{
    CompleteMultipartUploadRequest, CompletedMultipartUpload, CompletedPart,
    CreateMultipartUploadRequest, UploadPartRequest, S3,
};

use std::io::Read;
use std::sync::{Arc, Mutex};
use std::time::Duration;
use std::time::Instant;
use tokio::fs;

生锈的新手,所以有很多移动部件可以让这个正确!

4

1 回答 1

1

感谢@Jmb 的讨论,我摆脱了线程,我spawntokio任务如下:

创建一个向量来持有或期货,这样我们就可以等待它们:

let mut multiple_parts_futures = Vec::new();

产生async任务:

loop { // loop file chuncks
    ...
    let send_part_task_future = tokio::task::spawn(async move {
    // Upload part
    ...
}

然后等待所有期货:

let _results = futures::future::join_all(multiple_parts_futures).await;

值得一提的是,完成的部分需要排序:

let mut completed_parts_vector = completed_parts.lock().unwrap().to_vec();
completed_parts_vector.sort_by_key(|part| part.part_number);

整个代码是:

#[tokio::test]
async fn if_multipart_then_upload_multiparts_dicom() {
    let now = Instant::now();
    dotenv().ok();
    let local_filename = "./files/test_big.DCM";
    let destination_filename = generate_unique_name();
    let destination_filename_clone = destination_filename.clone();
    let mut file = std::fs::File::open(local_filename).unwrap();
    const CHUNK_SIZE: usize = 6_000_000;
    let mut buffer = Vec::with_capacity(CHUNK_SIZE);

    let client = super::get_client().await;
    let create_multipart_request = CreateMultipartUploadRequest {
        bucket: client.bucket_name.to_owned(),
        key: destination_filename.to_owned(),
        ..Default::default()
    };

    // Start the multipart upload and note the upload_id generated
    let response = client
        .s3
        .create_multipart_upload(create_multipart_request)
        .await
        .expect("Couldn't create multipart upload");
    let upload_id = response.upload_id.unwrap();

    let upload_id_clone = upload_id.clone();
    // Create upload parts
    let create_upload_part = move |body: Vec<u8>, part_number: i64| -> UploadPartRequest {
        UploadPartRequest {
            body: Some(body.into()),
            bucket: client.bucket_name.to_owned(),
            key: destination_filename_clone.to_owned(),
            upload_id: upload_id_clone.to_owned(),
            part_number: part_number,
            ..Default::default()
        }
    };

    let create_upload_part_arc = Arc::new(create_upload_part);
    let completed_parts = Arc::new(Mutex::new(vec![]));

    let mut part_number = 1;

    let mut multiple_parts_futures = Vec::new();
    loop {
        let maximum_bytes_to_read = CHUNK_SIZE - buffer.len();
        println!("maximum_bytes_to_read: {}", maximum_bytes_to_read);
        file.by_ref()
            .take(maximum_bytes_to_read as u64)
            .read_to_end(&mut buffer)
            .unwrap();
        println!("length: {}", buffer.len());
        println!("part_number: {}", part_number);
        if buffer.len() == 0 {
            // The file has ended.
            break;
        }
        let next_buffer = Vec::with_capacity(CHUNK_SIZE);
        let data_to_send = buffer;
        let completed_parts_cloned = completed_parts.clone();
        let create_upload_part_arc_cloned = create_upload_part_arc.clone();
        let send_part_task_future = tokio::task::spawn(async move {
            let part = create_upload_part_arc_cloned(data_to_send.to_vec(), part_number);
            {
                let part_number = part.part_number;
                let client = super::get_client().await;
                let response = client.s3.upload_part(part).await;
                completed_parts_cloned.lock().unwrap().push(CompletedPart {
                    e_tag: response
                        .expect("Couldn't complete multipart upload")
                        .e_tag
                        .clone(),
                    part_number: Some(part_number),
                });
            }
        });
        multiple_parts_futures.push(send_part_task_future);
        buffer = next_buffer;
        part_number = part_number + 1;
    }
    let client = super::get_client().await;
    println!("waiting for futures");
    let _results = futures::future::join_all(multiple_parts_futures).await;

    let mut completed_parts_vector = completed_parts.lock().unwrap().to_vec();
    completed_parts_vector.sort_by_key(|part| part.part_number);
    println!("futures done");
    let completed_upload = CompletedMultipartUpload {
        parts: Some(completed_parts_vector),
    };

    let complete_req = CompleteMultipartUploadRequest {
        bucket: client.bucket_name.to_owned(),
        key: destination_filename.to_owned(),
        upload_id: upload_id.to_owned(),
        multipart_upload: Some(completed_upload),
        ..Default::default()
    };

    client
        .s3
        .complete_multipart_upload(complete_req)
        .await
        .expect("Couldn't complete multipart upload");
    println!(
        "time taken: {}, with chunk:: {}",
        now.elapsed().as_secs(),
        CHUNK_SIZE
    );
}
于 2021-03-11T08:13:13.867 回答