0

我正在尝试将 Revit 文件逐块发送到我的 Bucket 块。我的 Revit 文件将近 13 MB。这是我的代码:

function handleFileSelect(evt) {
    var files = evt.target.files; 
    var file = files[0];

    var segmentSize = 1024 * 1024 * 5; //5 MB
    var startingByte = 0;
    var endingByte = startingByte + segmentSize - 1;
    var segments = Math.ceil(file.size / segmentSize);
    var session = Math.floor(100000000 + Math.random() * -900000000);
    

    for (var i = 0; i < segments; i ++)
    {
        var blob = file.slice(startingByte, endingByte);
        var url = 'https://developer.api.autodesk.com/oss/v2/buckets/' + 'linked_model' + '/objects/' + file.name + '/resumable';
        //console.log(url);
        var contentRange = 'bytes ' + startingByte + '-' + endingByte + '/' + file.size;

        $.ajax({
            type: 'PUT',
            url: url,
            data: blob,
            headers: {
                'Authorization':'Bearer ' + token,
                'Content-Type':'application/octet-stream',
                'Content-Range': contentRange,
                'Session-Id': session
            },
            crossDomain: true,
            processData: false,
            success: function (data) {
                console.log(i);
                startingByte = endingByte + 1;
                endingByte = startingByte + segmentSize - 1;
                },
            error: function (XMLHttpRequest, textStatus, errorThrown) {
                alert("Status: " + textStatus); alert("Error: " + errorThrown);
                console.log(startingByte);
                console.log(endingByte);
                console.log(file.size);
            }
        });
    }
}

它给了我错误:416(请求的范围不满足)

任何人都可以帮忙吗?

4

1 回答 1

1

我遇到了同样的 416 错误,但我的问题是我尝试上传小于 2MB 的块,这是不可行的(最后一个块除外)。

当我将块大小增加到 5MB 时,它开始工作。我刚刚写了一篇关于它的博客文章:https ://forge.autodesk.com/blog/nailing-large-files-uploads-forge-resumable-api

下面是处理分块和上传的核心代码(在 node.js 中)。

顺便说一句,我强烈不鼓励您按照您的代码段所建议的那样在客户端执行这种操作,这意味着您必须将写入访问令牌传递给网页,这会危及您的应用程序的安全性。您应该首先将文件上传到您的服务器,然后按照帖子和我的示例中的说明将其安全地上传到 Forge 。

/////////////////////////////////////////////////////////
// Uploads object to bucket using resumable endpoint
//
/////////////////////////////////////////////////////////
uploadObjectChunked (getToken, bucketKey, objectKey,
                     file,  opts = {}) {

  return new Promise((resolve, reject) => {

    const chunkSize = opts.chunkSize || 5 * 1024 * 1024

    const nbChunks = Math.ceil(file.size / chunkSize)

    const chunksMap = Array.from({
      length: nbChunks
    }, (e, i) => i)

    // generates uniques session ID
    const sessionId = this.guid()

    // prepare the upload tasks
    const uploadTasks = chunksMap.map((chunkIdx) => {

      const start = chunkIdx * chunkSize

      const end = Math.min(
          file.size, (chunkIdx + 1) * chunkSize) - 1

      const range = `bytes ${start}-${end}/${file.size}`

      const length = end - start + 1

      const readStream =
        fs.createReadStream(file.path, {
          start, end: end
        })

      const run = async () => {

        const token = await getToken()

        return this._objectsAPI.uploadChunk(
          bucketKey, objectKey,
          length, range, sessionId,
          readStream, {},
          {autoRefresh: false}, token)
      }

      return {
        chunkIndex: chunkIdx,
        run
      }
    })

    let progress = 0

    // runs asynchronously in parallel the upload tasks
    // number of simultaneous uploads is defined by
    // opts.concurrentUploads
    eachLimit(uploadTasks, opts.concurrentUploads || 3,
      (task, callback) => {

        task.run().then((res) => {

          if (opts.onProgress) {

            progress += 100.0 / nbChunks

            opts.onProgress ({
              progress: Math.round(progress * 100) / 100,
              chunkIndex: task.chunkIndex
            })
          }

          callback ()

        }, (err) => {

          console.log('error')
          console.log(err)

          callback(err)
        })

    }, (err) => {

        if (err) {

          return reject(err)
        }

        return resolve({
          fileSize: file.size,
          bucketKey,
          objectKey,
          nbChunks
        })
    })
  })
}
于 2017-06-07T18:47:20.120 回答