Skip to content

Commit 3c72b9c

Browse files
committed
Merge main
2 parents 5f00d95 + 271af98 commit 3c72b9c

15 files changed

Lines changed: 586 additions & 226 deletions

File tree

.github/workflows/check_code_quality.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,6 @@ jobs:
2020
runs-on: ubuntu-latest
2121
steps:
2222
- name: Checkout
23-
uses: actions/checkout@v3
23+
uses: actions/checkout@v4
2424

2525
- uses: pre-commit/action@v3.0.1

.github/workflows/upload-to-pypi.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ jobs:
1212
python-version: ["3.10"]
1313

1414
steps:
15-
- uses: actions/checkout@v2
15+
- uses: actions/checkout@v4
1616
- name: Set up Python ${{ matrix.python-version }}
1717
uses: actions/setup-python@v2.2.2
1818
with:

README.md

Lines changed: 92 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -52,25 +52,101 @@ This repo contains both a Python Library and a CLI. We'll demonstrate how to use
5252
### Chat Completions
5353

5454
```python
55-
import os
5655
from together import Together
5756

58-
client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
57+
client = Together()
5958

59+
# Simple text message
6060
response = client.chat.completions.create(
6161
model="mistralai/Mixtral-8x7B-Instruct-v0.1",
6262
messages=[{"role": "user", "content": "tell me about new york"}],
6363
)
6464
print(response.choices[0].message.content)
65+
66+
# Multi-modal message with text and image
67+
response = client.chat.completions.create(
68+
model="meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
69+
messages=[{
70+
"role": "user",
71+
"content": [
72+
{
73+
"type": "text",
74+
"text": "What's in this image?"
75+
},
76+
{
77+
"type": "image_url",
78+
"image_url": {
79+
"url": "https://huggingface.co/datasets/patrickvonplaten/random_img/resolve/main/yosemite.png"
80+
}
81+
}
82+
]
83+
}]
84+
)
85+
print(response.choices[0].message.content)
86+
87+
# Multi-modal message with multiple images
88+
response = client.chat.completions.create(
89+
model="Qwen/Qwen2.5-VL-72B-Instruct",
90+
messages=[{
91+
"role": "user",
92+
"content": [
93+
{
94+
"type": "text",
95+
"text": "Compare these two images."
96+
},
97+
{
98+
"type": "image_url",
99+
"image_url": {
100+
"url": "https://huggingface.co/datasets/patrickvonplaten/random_img/resolve/main/yosemite.png"
101+
}
102+
},
103+
{
104+
"type": "image_url",
105+
"image_url": {
106+
"url": "https://huggingface.co/datasets/patrickvonplaten/random_img/resolve/main/slack.png"
107+
}
108+
}
109+
]
110+
}]
111+
)
112+
print(response.choices[0].message.content)
113+
114+
# Multi-modal message with text and video
115+
response = client.chat.completions.create(
116+
model="Qwen/Qwen2.5-VL-72B-Instruct",
117+
messages=[{
118+
"role": "user",
119+
"content": [
120+
{
121+
"type": "text",
122+
"text": "What's happening in this video?"
123+
},
124+
{
125+
"type": "video_url",
126+
"video_url": {
127+
"url": "http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerFun.mp4"
128+
}
129+
}
130+
]
131+
}]
132+
)
133+
print(response.choices[0].message.content)
65134
```
66135

136+
The chat completions API supports three types of content:
137+
- Plain text messages using the `content` field directly
138+
- Multi-modal messages with images using `type: "image_url"`
139+
- Multi-modal messages with videos using `type: "video_url"`
140+
141+
When using multi-modal content, the `content` field becomes an array of content objects, each with its own type and corresponding data.
142+
67143
#### Streaming
68144

69145
```python
70146
import os
71147
from together import Together
72148

73-
client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
149+
client = Together()
74150
stream = client.chat.completions.create(
75151
model="mistralai/Mixtral-8x7B-Instruct-v0.1",
76152
messages=[{"role": "user", "content": "tell me about new york"}],
@@ -84,17 +160,17 @@ for chunk in stream:
84160
#### Async usage
85161

86162
```python
87-
import os, asyncio
163+
import asyncio
88164
from together import AsyncTogether
89165

90-
async_client = AsyncTogether(api_key=os.environ.get("TOGETHER_API_KEY"))
166+
async_client = AsyncTogether()
91167
messages = [
92168
"What are the top things to do in San Francisco?",
93169
"What country is Paris in?",
94170
]
95171

96172
async def async_chat_completion(messages):
97-
async_client = AsyncTogether(api_key=os.environ.get("TOGETHER_API_KEY"))
173+
async_client = AsyncTogether()
98174
tasks = [
99175
async_client.chat.completions.create(
100176
model="mistralai/Mixtral-8x7B-Instruct-v0.1",
@@ -115,10 +191,9 @@ asyncio.run(async_chat_completion(messages))
115191
Completions are for code and language models shown [here](https://docs.together.ai/docs/inference-models). Below, a code model example is shown.
116192

117193
```python
118-
import os
119194
from together import Together
120195

121-
client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
196+
client = Together()
122197

123198
response = client.completions.create(
124199
model="codellama/CodeLlama-34b-Python-hf",
@@ -131,10 +206,9 @@ print(response.choices[0].text)
131206
#### Streaming
132207

133208
```python
134-
import os
135209
from together import Together
136210

137-
client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
211+
client = Together()
138212
stream = client.completions.create(
139213
model="codellama/CodeLlama-34b-Python-hf",
140214
prompt="Write a Next.js component with TailwindCSS for a header component.",
@@ -148,10 +222,10 @@ for chunk in stream:
148222
#### Async usage
149223

150224
```python
151-
import os, asyncio
225+
import asyncio
152226
from together import AsyncTogether
153227

154-
async_client = AsyncTogether(api_key=os.environ.get("TOGETHER_API_KEY"))
228+
async_client = AsyncTogether()
155229
prompts = [
156230
"Write a Next.js component with TailwindCSS for a header component.",
157231
"Write a python function for the fibonacci sequence",
@@ -176,10 +250,9 @@ asyncio.run(async_chat_completion(prompts))
176250
### Image generation
177251

178252
```python
179-
import os
180253
from together import Together
181254

182-
client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
255+
client = Together()
183256

184257
response = client.images.generate(
185258
prompt="space robots",
@@ -196,7 +269,7 @@ print(response.data[0].b64_json)
196269
from typing import List
197270
from together import Together
198271

199-
client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
272+
client = Together()
200273

201274
def get_embeddings(texts: List[str], model: str) -> List[List[float]]:
202275
texts = [text.replace("\n", " ") for text in texts]
@@ -215,7 +288,7 @@ print(embeddings)
215288
from typing import List
216289
from together import Together
217290

218-
client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
291+
client = Together()
219292

220293
def get_reranked_documents(query: str, documents: List[str], model: str, top_n: int = 3) -> List[str]:
221294
outputs = client.rerank.create(model=model, query=query, documents=documents, top_n=top_n)
@@ -237,10 +310,9 @@ Read more about Reranking [here](https://docs.together.ai/docs/rerank-overview).
237310
The files API is used for fine-tuning and allows developers to upload data to fine-tune on. It also has several methods to list all files, retrive files, and delete files. Please refer to our fine-tuning docs [here](https://docs.together.ai/docs/fine-tuning-python).
238311

239312
```python
240-
import os
241313
from together import Together
242314

243-
client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
315+
client = Together()
244316

245317
client.files.upload(file="somedata.jsonl") # uploads a file
246318
client.files.list() # lists all uploaded files
@@ -254,10 +326,9 @@ client.files.delete(id="file-d0d318cb-b7d9-493a-bd70-1cfe089d3815") # deletes a
254326
The finetune API is used for fine-tuning and allows developers to create finetuning jobs. It also has several methods to list all jobs, retrive statuses and get checkpoints. Please refer to our fine-tuning docs [here](https://docs.together.ai/docs/fine-tuning-python).
255327

256328
```python
257-
import os
258329
from together import Together
259330

260-
client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
331+
client = Together()
261332

262333
client.fine_tuning.create(
263334
training_file = 'file-d0d318cb-b7d9-493a-bd70-1cfe089d3815',
@@ -281,10 +352,9 @@ client.fine_tuning.download(id="ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b") # down
281352
This lists all the models that Together supports.
282353

283354
```python
284-
import os
285355
from together import Together
286356

287-
client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
357+
client = Together()
288358

289359
models = client.models.list()
290360

0 commit comments

Comments
 (0)