Each user can upload a maximum of 1,000 files. Each file must not exceed 100 MB, and the total size of all uploaded files must not exceed 10 GB. The file parsing service is currently free, but rate limiting may be applied during peak traffic periods.
When uploading a file, use purpose="file-extract" if you want the model to use the extracted file contents as context.
python
curl
node.js
showLineNumbers
from pathlib import Pathfrom openai import OpenAIclient = OpenAI( api_key = "$MOONSHOT_API_KEY", base_url = "https://api.moonshot.ai/v1",)# xlnet.pdf is an example file; we support pdf, doc, and image formats.file_object = client.files.create(file=Path("xlnet.pdf"), purpose="file-extract")# Note: retrieve_content is deprecated in the latest version.# If you are using the latest SDK, use files.content instead.file_content = client.files.content(file_id=file_object.id).textmessages = [ { "role": "system", "content": "You are Kimi, an AI assistant provided by Moonshot AI. You are particularly skilled in Chinese and English conversations. You provide users with safe, helpful, and accurate answers. You will refuse to answer any questions involving terrorism, racism, pornography, or violence. Moonshot AI is a proper noun and should not be translated into other languages.", }, { "role": "system", "content": file_content, }, {"role": "user", "content": "Please give a brief introduction of what xlnet.pdf is about"},]completion = client.chat.completions.create( model="kimi-k2-turbo-preview", messages=messages, temperature=0.6,)print(completion.choices[0].message)
showLineNumbers
# xlnet.pdf is a sample filecurl https://api.moonshot.ai/v1/files \ -H "Authorization: Bearer $MOONSHOT_API_KEY" \ -F purpose="file-extract" \ -F file="@xlnet.pdf"
showLineNumbers
const OpenAI = require("openai");const fs = require("fs");const client = new OpenAI({ apiKey: "$MOONSHOT_API_KEY", baseURL: "https://api.moonshot.ai/v1",});async function main() { let file_object = await client.files.create({ file: fs.createReadStream("xlnet.pdf"), purpose: "file-extract" }); // retrieve_content is deprecated in the latest version. let file_content = await (await client.files.content(file_object.id)).text(); let messages = [ { "role": "system", "content": "You are Kimi, an AI assistant provided by Moonshot AI. You are more proficient in Chinese and English conversations. You provide users with safe, helpful, and accurate answers. You will refuse to answer any questions related to terrorism, racism, pornography, or violence. Moonshot AI is a proper noun and should not be translated into other languages.", }, { "role": "system", "content": file_content, }, {"role": "user", "content": "Please give a brief introduction of what xlnet.pdf is about"}, ]; const completion = await client.chat.completions.create({ model: "kimi-k2-turbo-preview", messages: messages, temperature: 0.6 }); console.log(completion.choices[0].message.content);}main();
Replace $MOONSHOT_API_KEY with your own API key, or set it as an environment variable before making the call.
Multi-file Chat Example
If you want to upload multiple files and have a conversation with Kimi based on these files, you can use the following pattern:
from typing import *import osimport jsonfrom pathlib import Pathfrom openai import OpenAIclient = OpenAI( base_url="https://api.moonshot.ai/v1", api_key=os.environ["MOONSHOT_DEMO_API_KEY"],)def upload_files(files: List[str]) -> List[Dict[str, Any]]: """ upload_files uploads all provided files (paths) via the file upload API '/v1/files', retrieves the uploaded file content, and generates file messages. Each file becomes an independent message with role set to system. The Kimi model will correctly recognize the file content in these system messages. :param files: A list of file paths to upload. Paths can be absolute or relative, passed as strings. :return: A list of messages containing file content. Add these messages to the Context, i.e., the messages parameter when calling the `/v1/chat/completions` API. """ messages = [] for file in files: file_object = client.files.create(file=Path(file), purpose="file-extract") file_content = client.files.content(file_id=file_object.id).text messages.append({ "role": "system", "content": file_content, }) return messagesdef main(): file_messages = upload_files(files=["upload_files.py"]) messages = [ *file_messages, { "role": "system", "content": "You are Kimi, an AI assistant provided by Moonshot AI. You are more proficient in Chinese and English conversations. You provide users with safe, helpful, and accurate answers. You will refuse to answer any questions related to terrorism, racism, pornography, or violence. Moonshot AI is a proper noun and should not be translated into other languages.", }, { "role": "user", "content": "Summarize the content of these files.", }, ] print(json.dumps(messages, indent=2, ensure_ascii=False)) completion = client.chat.completions.create( model="kimi-k2-turbo-preview", messages=messages, ) print(completion.choices[0].message.content)if __name__ == '__main__': main()
Image or Video Understanding
When uploading image or video assets for native model understanding, use purpose="image" or purpose="video".Please refer to Using Vision Models for end-to-end examples.
The Authorization header expects a Bearer token. Use an MOONSHOT_API_KEY as the token. This is a server-side secret key. Generate one on the API keys page in your dashboard.
Specifies how the uploaded file will be processed. file-extract: extract file contents; image: upload images for vision understanding; video: upload videos for video understanding
Purpose used when uploading the file. file-extract: extract file contents; image: upload images for vision understanding; video: upload videos for video understanding