Manage files

This section gives examples for the following: Upload a file

  • Initiate multi-chunks upload
  • Chunk upload
  • Download a file

Upload a file

This example demonstrates how to upload a new file named "MyNewFile.txt" to a previously created folder. You must initiate a multi-chunks Upload before you can upload any part. In response of your initiated request, the API will return an upload URI that you must include in the upload as part of the request. In this example, a file 'MyNewFile.txt' will be uploaded in 2 chunks with 1KB in each chunk (the user can choose their own chunk size).

Initiate multi-chunks upload

HTTP Method

POST

Request URL

https://{hostname}/rest/folders/{folder_id}/actions/initiateUpload

Request Body

filename - The new file name

totalSize - the total size of the file

totalChunks - the total number of chunks

{ "filename": "MyNewFile.txt", "totalSize": 2048, "totalChunks": 2 }

Response

If the upload is initiated successfully, the response is a 201 Created status code. The response body contains a JSON representation of the chunk upload info, including a uri property (dacfs_upload1/rest/uploads/7890 in this sample request) that you can use as the upload URI for subsequent requests.

Example Request

Copy
   -X POST 'https://{hostname}/rest/folders/1234/actions/initiateUpload' \
   -H 'Accept: application/json' \
   -H 'Content-Type: application/json' \
   -H 'X-Kiteworks-Version: 15' \
   -H 'Authorization: Bearer {access_token}' \
   -d '{"filename": "MyNewFile.txt", "totalSize": 2048, totalChunks': 2}'

Example Response

Copy
{
    "error": "OK",
    "totalSize": 2048,
    "timestamp": "2020-05-15T08:11:07Z",
    "uri": "dacfs_upload1/rest/uploads/7890",
    "userId": 1,
    "lastTimestamp": "2020-05-15T08:11:07Z",
    "uploadedSize": 0,
    "clientName": "OAuth Playground",
    "fileUrl": "",
    "location": "{hostname}",
    "totalChunks": 2,
    "uploadedChunks": 0,
    "completeOk": 0,
    "svrUploadTime": 0,
    "id": 7890,
    "replaceId": null,
    "backend": "acfs"
}

Example Python Code

Copy
import requests
import os

headers = {
    "Accept": "application/json",
    "X-Kiteworks-Version": api_version,
    "Authorization": "Bearer {access_token}".format(access_token=access_token)
}

To initiate Upload
file_size = os.path.getsize(upload_file_path)
file_name = os.path.basename(upload_file_path)

To calculate the number of chunks to be uploaded in sequence
total_num_of_chunks = file_size // chunk_size

For e.g. 7 // 2 = 3. But the actual number of chunks should be 4 [2, 2, 2, 1]
if file_size % chunk_size:
    total_num_of_chunks += 1

For min number of chunks is 0 even though file size is 0
if total_num_of_chunks == 0:
    total_num_of_chunks = 1

payload = {"filename": file_name, "totalSize": file_size, "totalChunks": total_num_of_chunks}
url = "https://{hostname}/rest/folders/{folder_id}/actions/initiateUpload".format(
    hostname=hostname,
    folder_id=folder_id
)
response = requests.post(url=url, data=payload, headers=headers)
response_json = response.json()

To get upload uri
upload_uri = response_json["uri"]

Chunk upload

HTTP Method

POST

Request URL

https://{hostname}/{upload_uri}

Request Body

compressMode - The compression mode of the chunk (only NORMAL is supported).

compressionSize - The compressed size (it’s always the same as originalSize).

originalSize - The original size of the chunk.

index - The index of the file.

{ "compressionMode": "NORMAL", "compressionSize": 1024, "originalSize": 1024, "index": 1 }

Response

If the individual chunk is completed, the response is a 200 OK status code. Once the whole multi-chunks upload is completed successfully, the response is a 201 Created status code. The response body contains a JSON representation of the chunk upload info, including a uri property (dacfs_upload1/rest/uploads/7890 in this sample request) that you can use as the URI for subsequent requests.

Example Request

Please note that the chunk /tmp/MyNewFile.txt_chunk_1 was generated beforehand not the actual full file.

Copy
   -X POST 'https://{hostname}/dacfs_upload1/rest/uploads/7890?returnEntity=true' \
   -H 'Accept: application/json' \
   -H 'X-Kiteworks-Version: 15' \
   -H 'Authorization: Bearer {access_token}' \
   -F 'compressionMode=NORMAL' \
   -F 'compressionSize=1024' \
   -F 'originalSize=1024' \
   -F 'index=1' \
   -F 'content=@/tmp/MyNewFile.txt_chunk_1'

Example Response (chunk)

Copy
{
    "error": "OK",
    "totalSize": 2048,
    "timestamp": "2020-05-15T08:11:07Z",
    "uri": "dacfs_upload1/rest/uploads/7890",
    "userId": 1,
    "lastTimestamp": "2020-05-15T08:11:07Z",
    "uploadedSize": 1024,
    "clientName": "OAuth Playground",
    "fileUrl": "",
    "location": "{hostname}",
    "totalChunks": 2,
    "uploadedChunks": 1,
    "completeOk": 0,
    "svrUploadTime": 0,
    "id": 7890,
    "replaceId": null,
    "backend": "acfs"
}

Example Response (Final chunk)

Copy
{
    "locked": false,
    "description": "",
    "created": "2020-05-15T06:25:16Z",
    "deleted": false,
    "clientModified": null,
    "fingerprint": "Generating...",
    "userId": 1,
    "modified": "2020-05-15T09:30:14Z",
    "clientCreated": null,
    "name": "MyNewFile.txt",
    "overriddenExpire": false,
    "expire": null,
    "mime": "text/plain",
    "permDeleted": false,
    "parentId": 1234,
    "type": "f",
    "id": 1240,
    "size": 2048
}

Example Python Code

Copy
class FileLimiter(object):

    def __init__(self, file_obj, read_limit):
        self.read_limit = read_limit
        self.file_obj = file_obj

    def read(self):
        data = self.file_obj.read(self.read_limit)
        return data

# get upload uri
upload_url = response_json["uri"]

file_obj = None
with open(upload_file_path, "rb") as fn:
    url = "https://{hostname}/{upload_url}?returnEntity=true".format(hostname=hostname, upload_url=upload_url)

    # upload the files in chunks
    for index in range(1, total_num_of_chunks + 1):
        read_limit = chunk_size if index < total_num_of_chunks else (file_size - (chunk_size * (index - 1)))
        offset = (index - 1) * chunk_size
        fn.seek(offset)

        # reading file in chunks
        payload = {"compressionMode": "NORMAL", "compressionSize": read_limit, "originalSize": read_limit, "index": index}
        response = requests.post(url=url, files={"content": FileLimiter(fn, read_limit)}, data=payload, headers=headers)
        if index == total_num_of_chunks:
            file_obj = response.json()

return file_obj

Example whole multi-chunks

Copy
import requests
import os

class FileLimiter(object):

    def __init__(self, file_obj, read_limit):
        self.read_limit = read_limit
        self.file_obj = file_obj

    def read(self):
        data = self.file_obj.read(self.read_limit)
        return data

headers = {
    "Accept": "application/json",
    "X-Kiteworks-Version": api_version,
    "Authorization": "Bearer {access_token}".format(access_token=access_token)
}

To initiate Upload
file_size = os.path.getsize(upload_file_path)
file_name = os.path.basename(upload_file_path)

To calculate the number of chunks to be uploaded in sequence
total_num_of_chunks = file_size // chunk_size

For e.g. 7 // 2 = 3. But the actual number of chunks should be 4 [2, 2, 2, 1]
if file_size % chunk_size:
    total_num_of_chunks += 1

For min number of chunks is 0 even though file size is 0
if total_num_of_chunks == 0:
    total_num_of_chunks = 1

payload = {"filename": file_name, "totalSize": file_size, "totalChunks": total_num_of_chunks}
url = "https://{hostname}/rest/folders/{folder_id}/actions/initiateUpload".format(
    hostname=hostname,
    folder_id=folder_id
)
response = requests.post(url=url, data=payload, headers=headers)
response_json = response.json()
# get upload uri
upload_url = response_json["uri"]

file_obj = None
with open(upload_file_path, "rb") as fn:
    url = "https://{hostname}/{upload_url}?returnEntity=true".format(hostname=hostname, upload_url=upload_url)

    # upload the files in chunks
    for index in range(1, total_num_of_chunks + 1):
        read_limit = chunk_size if index < total_num_of_chunks else (file_size - (chunk_size * (index - 1)))
        offset = (index - 1) * chunk_size
        fn.seek(offset)

        # reading file in chunks
        payload = {"compressionMode": "NORMAL", "compressionSize": read_limit, "originalSize": read_limit, "index": index}
        response = requests.post(url=url, files={"content": FileLimiter(fn, read_limit)}, data=payload, headers=headers)
        if index == total_num_of_chunks:
            file_obj = response.json()
return file_obj