Update to Transformers.js v3.4
Browse files
README.md
CHANGED
|
@@ -7,14 +7,14 @@ https://huggingface.co/google/siglip-large-patch16-256 with ONNX weights to be c
|
|
| 7 |
|
| 8 |
## Usage (Transformers.js)
|
| 9 |
|
| 10 |
-
If you haven't already, you can install the [Transformers.js](https://huggingface.co/docs/transformers.js) JavaScript library from [NPM](https://www.npmjs.com/package/@
|
| 11 |
```bash
|
| 12 |
-
npm i @
|
| 13 |
```
|
| 14 |
|
| 15 |
**Example:** Zero-shot image classification w/ `Xenova/siglip-large-patch16-256`:
|
| 16 |
```js
|
| 17 |
-
import { pipeline } from '@
|
| 18 |
|
| 19 |
const classifier = await pipeline('zero-shot-image-classification', 'Xenova/siglip-large-patch16-256');
|
| 20 |
const url = 'http://images.cocodataset.org/val2017/000000039769.jpg';
|
|
@@ -31,7 +31,7 @@ console.log(output);
|
|
| 31 |
**Example:** Compute text embeddings with `SiglipTextModel`.
|
| 32 |
|
| 33 |
```javascript
|
| 34 |
-
import { AutoTokenizer, SiglipTextModel } from '@
|
| 35 |
|
| 36 |
// Load tokenizer and text model
|
| 37 |
const tokenizer = await AutoTokenizer.from_pretrained('Xenova/siglip-large-patch16-256');
|
|
@@ -54,7 +54,7 @@ const { pooler_output } = await text_model(text_inputs);
|
|
| 54 |
**Example:** Compute vision embeddings with `SiglipVisionModel`.
|
| 55 |
|
| 56 |
```javascript
|
| 57 |
-
import { AutoProcessor, SiglipVisionModel, RawImage} from '@
|
| 58 |
|
| 59 |
// Load processor and vision model
|
| 60 |
const processor = await AutoProcessor.from_pretrained('Xenova/siglip-large-patch16-256');
|
|
|
|
| 7 |
|
| 8 |
## Usage (Transformers.js)
|
| 9 |
|
| 10 |
+
If you haven't already, you can install the [Transformers.js](https://huggingface.co/docs/transformers.js) JavaScript library from [NPM](https://www.npmjs.com/package/@huggingface/transformers) using:
|
| 11 |
```bash
|
| 12 |
+
npm i @huggingface/transformers
|
| 13 |
```
|
| 14 |
|
| 15 |
**Example:** Zero-shot image classification w/ `Xenova/siglip-large-patch16-256`:
|
| 16 |
```js
|
| 17 |
+
import { pipeline } from '@huggingface/transformers';
|
| 18 |
|
| 19 |
const classifier = await pipeline('zero-shot-image-classification', 'Xenova/siglip-large-patch16-256');
|
| 20 |
const url = 'http://images.cocodataset.org/val2017/000000039769.jpg';
|
|
|
|
| 31 |
**Example:** Compute text embeddings with `SiglipTextModel`.
|
| 32 |
|
| 33 |
```javascript
|
| 34 |
+
import { AutoTokenizer, SiglipTextModel } from '@huggingface/transformers';
|
| 35 |
|
| 36 |
// Load tokenizer and text model
|
| 37 |
const tokenizer = await AutoTokenizer.from_pretrained('Xenova/siglip-large-patch16-256');
|
|
|
|
| 54 |
**Example:** Compute vision embeddings with `SiglipVisionModel`.
|
| 55 |
|
| 56 |
```javascript
|
| 57 |
+
import { AutoProcessor, SiglipVisionModel, RawImage } from '@huggingface/transformers';
|
| 58 |
|
| 59 |
// Load processor and vision model
|
| 60 |
const processor = await AutoProcessor.from_pretrained('Xenova/siglip-large-patch16-256');
|