Buckets:

download
raw
26 kB
import{s as Fe,o as He,n as Ze}from"../chunks/scheduler.85c25b89.js";import{S as Ae,i as Re,g as m,s,r as u,A as Se,h as d,f as n,c as l,j as ne,u as h,x as I,k as oe,y as c,a as r,v as g,d as f,t as M,w as b}from"../chunks/index.c9bcf812.js";import{T as qe}from"../chunks/Tip.d8f753fa.js";import{D as je}from"../chunks/Docstring.e86a2d02.js";import{C as ye}from"../chunks/CodeBlock.c004bd26.js";import{E as Ee}from"../chunks/ExampleCodeBlock.a3eef607.js";import{H as q}from"../chunks/getInferenceSnippets.5ea0a804.js";function Ye(x){let a,y="Execute <code>optimum-cli export neuron --help</code> to display all command line options and their description.";return{c(){a=m("p"),a.innerHTML=y},l(p){a=d(p,"P",{"data-svelte-h":!0}),I(a)!=="svelte-1nu66vu"&&(a.innerHTML=y)},m(p,i){r(p,a,i)},p:Ze,d(p){p&&n(a)}}}function Qe(x){let a,y="Example:",p,i,w;return i=new ye({props:{code:"ZnJvbSUyMHRyYW5zZm9ybWVycyUyMGltcG9ydCUyMEF1dG9Qcm9jZXNzb3IlMEFmcm9tJTIwb3B0aW11bS5uZXVyb24lMjBpbXBvcnQlMjBOZXVyb25DTElQTW9kZWwlMEElMEFwcm9jZXNzb3IlMjAlM0QlMjBBdXRvUHJvY2Vzc29yLmZyb21fcHJldHJhaW5lZCglMjJvcHRpbXVtJTJGY2xpcC12aXQtYmFzZS1wYXRjaDMyLW5ldXJvbnglMjIpJTBBbW9kZWwlMjAlM0QlMjBOZXVyb25DTElQTW9kZWwuZnJvbV9wcmV0cmFpbmVkKCUyMm9wdGltdW0lMkZjbGlwLXZpdC1iYXNlLXBhdGNoMzItbmV1cm9ueCUyMiklMEElMEF1cmwlMjAlM0QlMjAlMjJodHRwJTNBJTJGJTJGaW1hZ2VzLmNvY29kYXRhc2V0Lm9yZyUyRnZhbDIwMTclMkYwMDAwMDAwMzk3NjkuanBnJTIyJTBBaW1hZ2UlMjAlM0QlMjBJbWFnZS5vcGVuKHJlcXVlc3RzLmdldCh1cmwlMkMlMjBzdHJlYW0lM0RUcnVlKS5yYXcpJTBBaW5wdXRzJTIwJTNEJTIwcHJvY2Vzc29yKHRleHQlM0QlNUIlMjJhJTIwcGhvdG8lMjBvZiUyMGElMjBjYXQlMjIlMkMlMjAlMjJhJTIwcGhvdG8lMjBvZiUyMGElMjBkb2clMjIlNUQlMkMlMjBpbWFnZXMlM0RpbWFnZSUyQyUyMHJldHVybl90ZW5zb3JzJTNEJTIycHQlMjIlMkMlMjBwYWRkaW5nJTNEVHJ1ZSklMEElMEFvdXRwdXRzJTIwJTNEJTIwbW9kZWwoKippbnB1dHMpJTBBbG9naXRzX3Blcl9pbWFnZSUyMCUzRCUyMG91dHB1dHMubG9naXRzX3Blcl9pbWFnZSUyMCUyMCUyMyUyMHRoaXMlMjBpcyUyMHRoZSUyMGltYWdlLXRleHQlMjBzaW1pbGFyaXR5JTIwc2NvcmUlMEFwcm9icyUyMCUzRCUyMGxvZ2l0c19wZXJfaW1hZ2Uuc29mdG1heChkaW0lM0QxKQ==",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoProcessor
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> optimum.neuron <span class="hljs-keyword">import</span> NeuronCLIPModel
<span class="hljs-meta">&gt;&gt;&gt; </span>processor = AutoProcessor.from_pretrained(<span class="hljs-string">&quot;optimum/clip-vit-base-patch32-neuronx&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>model = NeuronCLIPModel.from_pretrained(<span class="hljs-string">&quot;optimum/clip-vit-base-patch32-neuronx&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw)
<span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(text=[<span class="hljs-string">&quot;a photo of a cat&quot;</span>, <span class="hljs-string">&quot;a photo of a dog&quot;</span>], images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs)
<span class="hljs-meta">&gt;&gt;&gt; </span>logits_per_image = outputs.logits_per_image <span class="hljs-comment"># this is the image-text similarity score</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>probs = logits_per_image.softmax(dim=<span class="hljs-number">1</span>)`,wrap:!1}}),{c(){a=m("p"),a.textContent=y,p=s(),u(i.$$.fragment)},l(o){a=d(o,"P",{"data-svelte-h":!0}),I(a)!=="svelte-11lpom8"&&(a.textContent=y),p=l(o),h(i.$$.fragment,o)},m(o,j){r(o,a,j),r(o,p,j),g(i,o,j),w=!0},p:Ze,i(o){w||(f(i.$$.fragment,o),w=!0)},o(o){M(i.$$.fragment,o),w=!1},d(o){o&&(n(a),n(p)),b(i,o)}}}function De(x){let a,y="Example:",p,i,w;return i=new ye({props:{code:"aW1wb3J0JTIwcmVxdWVzdHMlMEFmcm9tJTIwUElMJTIwaW1wb3J0JTIwSW1hZ2UlMEFmcm9tJTIwb3B0aW11bS5uZXVyb24lMjBpbXBvcnQlMjBOZXVyb25DTElQRm9ySW1hZ2VDbGFzc2lmaWNhdGlvbiUwQWZyb20lMjB0cmFuc2Zvcm1lcnMlMjBpbXBvcnQlMjBBdXRvSW1hZ2VQcm9jZXNzb3IlMEElMEF1cmwlMjAlM0QlMjAlMjJodHRwJTNBJTJGJTJGaW1hZ2VzLmNvY29kYXRhc2V0Lm9yZyUyRnZhbDIwMTclMkYwMDAwMDAwMzk3NjkuanBnJTIyJTBBaW1hZ2UlMjAlM0QlMjBJbWFnZS5vcGVuKHJlcXVlc3RzLmdldCh1cmwlMkMlMjBzdHJlYW0lM0RUcnVlKS5yYXcpJTBBJTBBcHJlcHJvY2Vzc29yJTIwJTNEJTIwQXV0b0ltYWdlUHJvY2Vzc29yLmZyb21fcHJldHJhaW5lZCglMjJvcHRpbXVtJTJGY2xpcC12aXQtYmFzZS1wYXRjaDMyLWltYWdlLWNsYXNzaWZpY2F0aW9uLW5ldXJvbnglMjIpJTBBbW9kZWwlMjAlM0QlMjBOZXVyb25DTElQRm9ySW1hZ2VDbGFzc2lmaWNhdGlvbi5mcm9tX3ByZXRyYWluZWQoJTIyb3B0aW11bSUyRmNsaXAtdml0LWJhc2UtcGF0Y2gzMi1pbWFnZS1jbGFzc2lmaWNhdGlvbi1uZXVyb254JTIyKSUwQSUwQWlucHV0cyUyMCUzRCUyMHByZXByb2Nlc3NvcihpbWFnZXMlM0RpbWFnZSUyQyUyMHJldHVybl90ZW5zb3JzJTNEJTIycHQlMjIpJTBBJTBBb3V0cHV0cyUyMCUzRCUyMG1vZGVsKCoqaW5wdXRzKSUwQWxvZ2l0cyUyMCUzRCUyMG91dHB1dHMubG9naXRzJTBBcHJlZGljdGVkX2xhYmVsJTIwJTNEJTIwbG9naXRzLmFyZ21heCgtMSkuaXRlbSgp",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> optimum.neuron <span class="hljs-keyword">import</span> NeuronCLIPForImageClassification
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoImageProcessor
<span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw)
<span class="hljs-meta">&gt;&gt;&gt; </span>preprocessor = AutoImageProcessor.from_pretrained(<span class="hljs-string">&quot;optimum/clip-vit-base-patch32-image-classification-neuronx&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>model = NeuronCLIPForImageClassification.from_pretrained(<span class="hljs-string">&quot;optimum/clip-vit-base-patch32-image-classification-neuronx&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>inputs = preprocessor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs)
<span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits
<span class="hljs-meta">&gt;&gt;&gt; </span>predicted_label = logits.argmax(-<span class="hljs-number">1</span>).item()`,wrap:!1}}),{c(){a=m("p"),a.textContent=y,p=s(),u(i.$$.fragment)},l(o){a=d(o,"P",{"data-svelte-h":!0}),I(a)!=="svelte-11lpom8"&&(a.textContent=y),p=l(o),h(i.$$.fragment,o)},m(o,j){r(o,a,j),r(o,p,j),g(i,o,j),w=!0},p:Ze,i(o){w||(f(i.$$.fragment,o),w=!0)},o(o){M(i.$$.fragment,o),w=!1},d(o){o&&(n(a),n(p)),b(i,o)}}}function Ke(x){let a,y,p,i,w,o,j,se,X,Be=`The CLIP model was proposed in <a href="https://arxiv.org/abs/2103.00020" rel="nofollow">Learning Transferable Visual Models From Natural Language Supervision</a> by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh,
Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever. CLIP
(Contrastive Language-Image Pre-Training) is a neural network trained on a variety of (image, text) pairs. It can be
instructed in natural language to predict the most relevant text snippet, given an image, without directly optimizing
for the task, similarly to the zero-shot capabilities of GPT-2 and 3.`,le,W,re,L,Xe='To deploy 🤗 <a href="https://huggingface.co/docs/transformers/index" rel="nofollow">Transformers</a> models on Neuron devices, you first need to compile the models and export them to a serialized format for inference. Below are two approaches to compile the model, you can choose the one that best suits your needs. Here we take the <code>feature-extraction</code> as an example:',ie,P,pe,V,We="You can export the model using the Optimum command-line interface as follows:",ce,U,me,k,de,G,ue,z,he,E,ge,T,F,Te,Y,Le="Bare CLIP Model without any specific head on top, used for the task “feature-extraction”.",Je,Q,Pe=`This model inherits from <code>~neuron.modeling.NeuronTracedModel</code>. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving)`,Ie,_,H,ve,D,Ve="The <code>NeuronCLIPModel</code> forward method, overrides the <code>__call__</code> special method. Accepts only the inputs traced during the compilation step. Any additional inputs provided during inference will be ignored. To include extra inputs, recompile the model with those inputs specified.",$e,Z,fe,A,Me,J,R,_e,K,Ue="CLIP vision encoder with an image classification head on top (a linear layer on top of the pooled final hidden states of the patch tokens) e.g. for ImageNet.",Ce,O,Ge=`This model inherits from <code>~neuron.modeling.NeuronTracedModel</code>. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving)`,Ne,C,S,xe,ee,ze="The <code>NeuronCLIPForImageClassification</code> forward method, overrides the <code>__call__</code> special method. Accepts only the inputs traced during the compilation step. Any additional inputs provided during inference will be ignored. To include extra inputs, recompile the model with those inputs specified.",ke,B,be,ae,we;return w=new q({props:{title:"CLIP",local:"clip",headingTag:"h1"}}),j=new q({props:{title:"Overview",local:"overview",headingTag:"h2"}}),W=new q({props:{title:"Export to Neuron",local:"export-to-neuron",headingTag:"h2"}}),P=new q({props:{title:"Option 1: CLI",local:"option-1-cli",headingTag:"h3"}}),U=new ye({props:{code:"b3B0aW11bS1jbGklMjBleHBvcnQlMjBuZXVyb24lMjAtLW1vZGVsJTIwb3BlbmFpJTJGY2xpcC12aXQtYmFzZS1wYXRjaDMyJTIwLS10YXNrJTIwZmVhdHVyZS1leHRyYWN0aW9uJTIwLS10ZXh0X2JhdGNoX3NpemUlMjAyJTIwLS1zZXF1ZW5jZV9sZW5ndGglMjA3NyUyMC0taW1hZ2VfYmF0Y2hfc2l6ZSUyMDElMjAtLW51bV9jaGFubmVscyUyMDMlMjAtLXdpZHRoJTIwMjI0JTIwLS1oZWlnaHQlMjAyMjQlMjBjbGlwX2ZlYXR1cmVfZXh0cmFjdGlvbl9uZXVyb254JTJG",highlighted:'optimum-cli <span class="hljs-built_in">export</span> neuron --model openai/clip-vit-base-patch32 --task feature-extraction --text_batch_size 2 --sequence_length 77 --image_batch_size 1 --num_channels 3 --width 224 --height 224 clip_feature_extraction_neuronx/',wrap:!1}}),k=new qe({props:{warning:!1,$$slots:{default:[Ye]},$$scope:{ctx:x}}}),G=new q({props:{title:"Option 2: Python API",local:"option-2-python-api",headingTag:"h3"}}),z=new ye({props:{code:"ZnJvbSUyMG9wdGltdW0ubmV1cm9uJTIwaW1wb3J0JTIwTmV1cm9uQ0xJUE1vZGVsJTBBJTBBaW5wdXRfc2hhcGVzJTIwJTNEJTIwJTdCJTIydGV4dF9iYXRjaF9zaXplJTIyJTNBJTIwMiUyQyUyMCUyMnNlcXVlbmNlX2xlbmd0aCUyMiUzQSUyMDc3JTJDJTIwJTIyaW1hZ2VfYmF0Y2hfc2l6ZSUyMiUzQSUyMDElMkMlMjAlMjJudW1fY2hhbm5lbHMlMjIlM0ElMjAzJTJDJTIwJTIyd2lkdGglMjIlM0ElMjAyMjQlMkMlMjAlMjJoZWlnaHQlMjIlM0ElMjAyMjQlN0QlMEFjb21waWxlcl9hcmdzJTIwJTNEJTIwJTdCJTIyYXV0b19jYXN0JTIyJTNBJTIwJTIybWF0bXVsJTIyJTJDJTIwJTIyYXV0b19jYXN0X3R5cGUlMjIlM0ElMjAlMjJiZjE2JTIyJTdEJTBBbmV1cm9uX21vZGVsJTIwJTNEJTIwTmV1cm9uQ0xJUE1vZGVsLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJvcGVuYWklMkZjbGlwLXZpdC1iYXNlLXBhdGNoMzIlMjIlMkMlMEElMjAlMjAlMjAlMjBleHBvcnQlM0RUcnVlJTJDJTBBJTIwJTIwJTIwJTIwKippbnB1dF9zaGFwZXMlMkMlMEElMjAlMjAlMjAlMjAqKmNvbXBpbGVyX2FyZ3MlMkMlMEEpJTBBJTIzJTIwU2F2ZSUyMGxvY2FsbHklMEFuZXVyb25fbW9kZWwuc2F2ZV9wcmV0cmFpbmVkKCUyMmNsaXBfZmVhdHVyZV9leHRyYWN0aW9uX25ldXJvbnglMkYlMjIpJTBBJTBBJTIzJTIwVXBsb2FkJTIwdG8lMjB0aGUlMjBIdWdnaW5nRmFjZSUyMEh1YiUwQW5ldXJvbl9tb2RlbC5wdXNoX3RvX2h1YiglMEElMjAlMjAlMjAlMjAlMjJjbGlwX2ZlYXR1cmVfZXh0cmFjdGlvbl9uZXVyb254JTJGJTIyJTJDJTIwcmVwb3NpdG9yeV9pZCUzRCUyMm9wdGltdW0lMkZjbGlwLXZpdC1iYXNlLXBhdGNoMzItbmV1cm9ueCUyMiUyMCUyMCUyMyUyMFJlcGxhY2UlMjB3aXRoJTIweW91ciUyMEhGJTIwSHViJTIwcmVwbyUyMGlkJTBBKQ==",highlighted:`<span class="hljs-keyword">from</span> optimum.neuron <span class="hljs-keyword">import</span> NeuronCLIPModel
input_shapes = {<span class="hljs-string">&quot;text_batch_size&quot;</span>: <span class="hljs-number">2</span>, <span class="hljs-string">&quot;sequence_length&quot;</span>: <span class="hljs-number">77</span>, <span class="hljs-string">&quot;image_batch_size&quot;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&quot;num_channels&quot;</span>: <span class="hljs-number">3</span>, <span class="hljs-string">&quot;width&quot;</span>: <span class="hljs-number">224</span>, <span class="hljs-string">&quot;height&quot;</span>: <span class="hljs-number">224</span>}
compiler_args = {<span class="hljs-string">&quot;auto_cast&quot;</span>: <span class="hljs-string">&quot;matmul&quot;</span>, <span class="hljs-string">&quot;auto_cast_type&quot;</span>: <span class="hljs-string">&quot;bf16&quot;</span>}
neuron_model = NeuronCLIPModel.from_pretrained(
<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>,
export=<span class="hljs-literal">True</span>,
**input_shapes,
**compiler_args,
)
<span class="hljs-comment"># Save locally</span>
neuron_model.save_pretrained(<span class="hljs-string">&quot;clip_feature_extraction_neuronx/&quot;</span>)
<span class="hljs-comment"># Upload to the HuggingFace Hub</span>
neuron_model.push_to_hub(
<span class="hljs-string">&quot;clip_feature_extraction_neuronx/&quot;</span>, repository_id=<span class="hljs-string">&quot;optimum/clip-vit-base-patch32-neuronx&quot;</span> <span class="hljs-comment"># Replace with your HF Hub repo id</span>
)`,wrap:!1}}),E=new q({props:{title:"NeuronCLIPModel",local:"optimum.neuron.NeuronCLIPModel",headingTag:"h2"}}),F=new je({props:{name:"class optimum.neuron.NeuronCLIPModel",anchor:"optimum.neuron.NeuronCLIPModel",parameters:[{name:"model",val:": ScriptModule"},{name:"config",val:": PretrainedConfig"},{name:"model_save_dir",val:": str | pathlib.Path | tempfile.TemporaryDirectory | None = None"},{name:"model_file_name",val:": str | None = None"},{name:"preprocessors",val:": list | None = None"},{name:"neuron_config",val:": NeuronDefaultConfig | None = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"optimum.neuron.NeuronCLIPModel.config",description:`<strong>config</strong> (<code>transformers.PretrainedConfig</code>) &#x2014; <a href="https://huggingface.co/docs/transformers/main_classes/configuration#transformers.PretrainedConfig" rel="nofollow">PretrainedConfig</a> is the Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the <code>optimum.neuron.modeling.NeuronTracedModel.from_pretrained</code> method to load the model weights.`,name:"config"},{anchor:"optimum.neuron.NeuronCLIPModel.model",description:'<strong>model</strong> (<code>torch.jit._script.ScriptModule</code>) &#x2014; <a href="https://pytorch.org/docs/stable/generated/torch.jit.ScriptModule.html" rel="nofollow">torch.jit._script.ScriptModule</a> is the TorchScript module with embedded NEFF(Neuron Executable File Format) compiled by neuron(x) compiler.',name:"model"}],source:"https://github.com/huggingface/optimum-neuron/blob/v0.3.0/optimum/neuron/models/inference/clip/modeling_clip.py#L44"}}),H=new je({props:{name:"forward",anchor:"optimum.neuron.NeuronCLIPModel.forward",parameters:[{name:"input_ids",val:": Tensor"},{name:"pixel_values",val:": Tensor"},{name:"attention_mask",val:": Tensor"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"optimum.neuron.NeuronCLIPModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014;
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using <a href="https://huggingface.co/docs/transformers/autoclass_tutorial#autotokenizer" rel="nofollow"><code>AutoTokenizer</code></a>.
See <a href="https://huggingface.co/docs/transformers/main_classes/tokenizer#transformers.PreTrainedTokenizerBase.encode" rel="nofollow"><code>PreTrainedTokenizer.encode</code></a> and
<a href="https://huggingface.co/docs/transformers/main_classes/tokenizer#transformers.PreTrainedTokenizerBase.__call__" rel="nofollow"><code>PreTrainedTokenizer.__call__</code></a> for details.
<a href="https://huggingface.co/docs/transformers/glossary#input-ids" rel="nofollow">What are input IDs?</a>`,name:"input_ids"},{anchor:"optimum.neuron.NeuronCLIPModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor | None</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014;
Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:<ul>
<li>1 for tokens that are <strong>not masked</strong>,</li>
<li>0 for tokens that are <strong>masked</strong>.
<a href="https://huggingface.co/docs/transformers/glossary#attention-mask" rel="nofollow">What are attention masks?</a></li>
</ul>`,name:"attention_mask"},{anchor:"optimum.neuron.NeuronCLIPModel.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.Tensor | None</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014;
Pixel values corresponding to the images in the current batch.
Pixel values can be obtained from encoded images using <a href="https://huggingface.co/docs/transformers/en/model_doc/auto#transformers.AutoImageProcessor" rel="nofollow"><code>AutoImageProcessor</code></a>.`,name:"pixel_values"}],source:"https://github.com/huggingface/optimum-neuron/blob/v0.3.0/optimum/neuron/models/inference/clip/modeling_clip.py#L53"}}),Z=new Ee({props:{anchor:"optimum.neuron.NeuronCLIPModel.forward.example",$$slots:{default:[Qe]},$$scope:{ctx:x}}}),A=new q({props:{title:"NeuronCLIPForImageClassification",local:"optimum.neuron.NeuronCLIPForImageClassification",headingTag:"h2"}}),R=new je({props:{name:"class optimum.neuron.NeuronCLIPForImageClassification",anchor:"optimum.neuron.NeuronCLIPForImageClassification",parameters:[{name:"model",val:": ScriptModule"},{name:"config",val:": PretrainedConfig"},{name:"model_save_dir",val:": str | pathlib.Path | tempfile.TemporaryDirectory | None = None"},{name:"model_file_name",val:": str | None = None"},{name:"preprocessors",val:": list | None = None"},{name:"neuron_config",val:": NeuronDefaultConfig | None = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"optimum.neuron.NeuronCLIPForImageClassification.config",description:`<strong>config</strong> (<code>transformers.PretrainedConfig</code>) &#x2014; <a href="https://huggingface.co/docs/transformers/main_classes/configuration#transformers.PretrainedConfig" rel="nofollow">PretrainedConfig</a> is the Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the <code>optimum.neuron.modeling.NeuronTracedModel.from_pretrained</code> method to load the model weights.`,name:"config"},{anchor:"optimum.neuron.NeuronCLIPForImageClassification.model",description:'<strong>model</strong> (<code>torch.jit._script.ScriptModule</code>) &#x2014; <a href="https://pytorch.org/docs/stable/generated/torch.jit.ScriptModule.html" rel="nofollow">torch.jit._script.ScriptModule</a> is the TorchScript module with embedded NEFF(Neuron Executable File Format) compiled by neuron(x) compiler.',name:"model"}],source:"https://github.com/huggingface/optimum-neuron/blob/v0.3.0/optimum/neuron/models/inference/clip/modeling_clip.py#L96"}}),S=new je({props:{name:"forward",anchor:"optimum.neuron.NeuronCLIPForImageClassification.forward",parameters:[{name:"pixel_values",val:": Tensor"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"optimum.neuron.NeuronCLIPForImageClassification.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.Tensor | None</code> of shape <code>(batch_size, num_channels, height, width)</code>, defaults to <code>None</code>) &#x2014;
Pixel values corresponding to the images in the current batch.
Pixel values can be obtained from encoded images using <a href="https://huggingface.co/docs/transformers/en/model_doc/auto#transformers.AutoImageProcessor" rel="nofollow"><code>AutoImageProcessor</code></a>.`,name:"pixel_values"}],source:"https://github.com/huggingface/optimum-neuron/blob/v0.3.0/optimum/neuron/models/inference/clip/modeling_clip.py#L112"}}),B=new Ee({props:{anchor:"optimum.neuron.NeuronCLIPForImageClassification.forward.example",$$slots:{default:[De]},$$scope:{ctx:x}}}),{c(){a=m("meta"),y=s(),p=m("p"),i=s(),u(w.$$.fragment),o=s(),u(j.$$.fragment),se=s(),X=m("p"),X.innerHTML=Be,le=s(),u(W.$$.fragment),re=s(),L=m("p"),L.innerHTML=Xe,ie=s(),u(P.$$.fragment),pe=s(),V=m("p"),V.textContent=We,ce=s(),u(U.$$.fragment),me=s(),u(k.$$.fragment),de=s(),u(G.$$.fragment),ue=s(),u(z.$$.fragment),he=s(),u(E.$$.fragment),ge=s(),T=m("div"),u(F.$$.fragment),Te=s(),Y=m("p"),Y.textContent=Le,Je=s(),Q=m("p"),Q.innerHTML=Pe,Ie=s(),_=m("div"),u(H.$$.fragment),ve=s(),D=m("p"),D.innerHTML=Ve,$e=s(),u(Z.$$.fragment),fe=s(),u(A.$$.fragment),Me=s(),J=m("div"),u(R.$$.fragment),_e=s(),K=m("p"),K.textContent=Ue,Ce=s(),O=m("p"),O.innerHTML=Ge,Ne=s(),C=m("div"),u(S.$$.fragment),xe=s(),ee=m("p"),ee.innerHTML=ze,ke=s(),u(B.$$.fragment),be=s(),ae=m("p"),this.h()},l(e){const t=Se("svelte-u9bgzb",document.head);a=d(t,"META",{name:!0,content:!0}),t.forEach(n),y=l(e),p=d(e,"P",{}),ne(p).forEach(n),i=l(e),h(w.$$.fragment,e),o=l(e),h(j.$$.fragment,e),se=l(e),X=d(e,"P",{"data-svelte-h":!0}),I(X)!=="svelte-1dawxo2"&&(X.innerHTML=Be),le=l(e),h(W.$$.fragment,e),re=l(e),L=d(e,"P",{"data-svelte-h":!0}),I(L)!=="svelte-huz3tp"&&(L.innerHTML=Xe),ie=l(e),h(P.$$.fragment,e),pe=l(e),V=d(e,"P",{"data-svelte-h":!0}),I(V)!=="svelte-87nkn2"&&(V.textContent=We),ce=l(e),h(U.$$.fragment,e),me=l(e),h(k.$$.fragment,e),de=l(e),h(G.$$.fragment,e),ue=l(e),h(z.$$.fragment,e),he=l(e),h(E.$$.fragment,e),ge=l(e),T=d(e,"DIV",{class:!0});var v=ne(T);h(F.$$.fragment,v),Te=l(v),Y=d(v,"P",{"data-svelte-h":!0}),I(Y)!=="svelte-12hypg"&&(Y.textContent=Le),Je=l(v),Q=d(v,"P",{"data-svelte-h":!0}),I(Q)!=="svelte-t21i2g"&&(Q.innerHTML=Pe),Ie=l(v),_=d(v,"DIV",{class:!0});var N=ne(_);h(H.$$.fragment,N),ve=l(N),D=d(N,"P",{"data-svelte-h":!0}),I(D)!=="svelte-d0734s"&&(D.innerHTML=Ve),$e=l(N),h(Z.$$.fragment,N),N.forEach(n),v.forEach(n),fe=l(e),h(A.$$.fragment,e),Me=l(e),J=d(e,"DIV",{class:!0});var $=ne(J);h(R.$$.fragment,$),_e=l($),K=d($,"P",{"data-svelte-h":!0}),I(K)!=="svelte-80tjla"&&(K.textContent=Ue),Ce=l($),O=d($,"P",{"data-svelte-h":!0}),I(O)!=="svelte-t21i2g"&&(O.innerHTML=Ge),Ne=l($),C=d($,"DIV",{class:!0});var te=ne(C);h(S.$$.fragment,te),xe=l(te),ee=d(te,"P",{"data-svelte-h":!0}),I(ee)!=="svelte-ifttel"&&(ee.innerHTML=ze),ke=l(te),h(B.$$.fragment,te),te.forEach(n),$.forEach(n),be=l(e),ae=d(e,"P",{}),ne(ae).forEach(n),this.h()},h(){oe(a,"name","hf:doc:metadata"),oe(a,"content",Oe),oe(_,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),oe(T,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),oe(C,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),oe(J,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,t){c(document.head,a),r(e,y,t),r(e,p,t),r(e,i,t),g(w,e,t),r(e,o,t),g(j,e,t),r(e,se,t),r(e,X,t),r(e,le,t),g(W,e,t),r(e,re,t),r(e,L,t),r(e,ie,t),g(P,e,t),r(e,pe,t),r(e,V,t),r(e,ce,t),g(U,e,t),r(e,me,t),g(k,e,t),r(e,de,t),g(G,e,t),r(e,ue,t),g(z,e,t),r(e,he,t),g(E,e,t),r(e,ge,t),r(e,T,t),g(F,T,null),c(T,Te),c(T,Y),c(T,Je),c(T,Q),c(T,Ie),c(T,_),g(H,_,null),c(_,ve),c(_,D),c(_,$e),g(Z,_,null),r(e,fe,t),g(A,e,t),r(e,Me,t),r(e,J,t),g(R,J,null),c(J,_e),c(J,K),c(J,Ce),c(J,O),c(J,Ne),c(J,C),g(S,C,null),c(C,xe),c(C,ee),c(C,ke),g(B,C,null),r(e,be,t),r(e,ae,t),we=!0},p(e,[t]){const v={};t&2&&(v.$$scope={dirty:t,ctx:e}),k.$set(v);const N={};t&2&&(N.$$scope={dirty:t,ctx:e}),Z.$set(N);const $={};t&2&&($.$$scope={dirty:t,ctx:e}),B.$set($)},i(e){we||(f(w.$$.fragment,e),f(j.$$.fragment,e),f(W.$$.fragment,e),f(P.$$.fragment,e),f(U.$$.fragment,e),f(k.$$.fragment,e),f(G.$$.fragment,e),f(z.$$.fragment,e),f(E.$$.fragment,e),f(F.$$.fragment,e),f(H.$$.fragment,e),f(Z.$$.fragment,e),f(A.$$.fragment,e),f(R.$$.fragment,e),f(S.$$.fragment,e),f(B.$$.fragment,e),we=!0)},o(e){M(w.$$.fragment,e),M(j.$$.fragment,e),M(W.$$.fragment,e),M(P.$$.fragment,e),M(U.$$.fragment,e),M(k.$$.fragment,e),M(G.$$.fragment,e),M(z.$$.fragment,e),M(E.$$.fragment,e),M(F.$$.fragment,e),M(H.$$.fragment,e),M(Z.$$.fragment,e),M(A.$$.fragment,e),M(R.$$.fragment,e),M(S.$$.fragment,e),M(B.$$.fragment,e),we=!1},d(e){e&&(n(y),n(p),n(i),n(o),n(se),n(X),n(le),n(re),n(L),n(ie),n(pe),n(V),n(ce),n(me),n(de),n(ue),n(he),n(ge),n(T),n(fe),n(Me),n(J),n(be),n(ae)),n(a),b(w,e),b(j,e),b(W,e),b(P,e),b(U,e),b(k,e),b(G,e),b(z,e),b(E,e),b(F),b(H),b(Z),b(A,e),b(R),b(S),b(B)}}}const Oe='{"title":"CLIP","local":"clip","sections":[{"title":"Overview","local":"overview","sections":[],"depth":2},{"title":"Export to Neuron","local":"export-to-neuron","sections":[{"title":"Option 1: CLI","local":"option-1-cli","sections":[],"depth":3},{"title":"Option 2: Python API","local":"option-2-python-api","sections":[],"depth":3}],"depth":2},{"title":"NeuronCLIPModel","local":"optimum.neuron.NeuronCLIPModel","sections":[],"depth":2},{"title":"NeuronCLIPForImageClassification","local":"optimum.neuron.NeuronCLIPForImageClassification","sections":[],"depth":2}],"depth":1}';function et(x){return He(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class it extends Ae{constructor(a){super(),Re(this,a,et,Ke,Fe,{})}}export{it as component};

Xet Storage Details

Size:
26 kB
·
Xet hash:
7ccc4c014e935afb7ac2dd49cf39647416d4c01aa2d88a76c433f4aec7f3dc4c

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.