Buckets:
| import{s as $e,o as we,n as Me}from"../chunks/scheduler.85c25b89.js";import{S as Je,i as ve,g as b,s as l,r as p,A as Te,h as j,f as o,c as a,j as le,u,x,k as ae,y as v,a as s,v as d,d as h,t as g,w as f}from"../chunks/index.c9bcf812.js";import{T as _e}from"../chunks/Tip.d8f753fa.js";import{D as ye}from"../chunks/Docstring.e86a2d02.js";import{C as ue}from"../chunks/CodeBlock.c004bd26.js";import{E as Ne}from"../chunks/ExampleCodeBlock.a3eef607.js";import{H as S}from"../chunks/getInferenceSnippets.5ea0a804.js";function xe(z){let n,w="Execute <code>optimum-cli export neuron --help</code> to display all command line options and their description.";return{c(){n=b("p"),n.innerHTML=w},l(i){n=j(i,"P",{"data-svelte-h":!0}),x(n)!=="svelte-1nu66vu"&&(n.innerHTML=w)},m(i,c){s(i,n,c)},p:Me,d(i){i&&o(n)}}}function Ze(z){let n,w="Example:",i,c,y;return c=new ue({props:{code:"aW1wb3J0JTIwcmVxdWVzdHMlMEFmcm9tJTIwUElMJTIwaW1wb3J0JTIwSW1hZ2UlMEFmcm9tJTIwb3B0aW11bS5uZXVyb24lMjBpbXBvcnQlMjBOZXVyb25Zb2xvc0Zvck9iamVjdERldGVjdGlvbiUwQWZyb20lMjB0cmFuc2Zvcm1lcnMlMjBpbXBvcnQlMjBBdXRvSW1hZ2VQcm9jZXNzb3IlMEElMEF1cmwlMjAlM0QlMjAlMjJodHRwJTNBJTJGJTJGaW1hZ2VzLmNvY29kYXRhc2V0Lm9yZyUyRnZhbDIwMTclMkYwMDAwMDAwMzk3NjkuanBnJTIyJTBBaW1hZ2UlMjAlM0QlMjBJbWFnZS5vcGVuKHJlcXVlc3RzLmdldCh1cmwlMkMlMjBzdHJlYW0lM0RUcnVlKS5yYXcpJTBBJTBBcHJlcHJvY2Vzc29yJTIwJTNEJTIwQXV0b0ltYWdlUHJvY2Vzc29yLmZyb21fcHJldHJhaW5lZCglMjJvcHRpbXVtJTJGeW9sb3MtdGlueS1uZXVyb254LWJzMSUyMiklMEFtb2RlbCUyMCUzRCUyME5ldXJvbllvbG9zRm9yT2JqZWN0RGV0ZWN0aW9uLmZyb21fcHJldHJhaW5lZCglMjJvcHRpbXVtJTJGeW9sb3MtdGlueS1uZXVyb254LWJzMSUyMiklMEElMEFpbnB1dHMlMjAlM0QlMjBwcmVwcm9jZXNzb3IoaW1hZ2VzJTNEaW1hZ2UlMkMlMjByZXR1cm5fdGVuc29ycyUzRCUyMnB0JTIyKSUwQSUwQW91dHB1dHMlMjAlM0QlMjBtb2RlbCgqKmlucHV0cyklMEF0YXJnZXRfc2l6ZXMlMjAlM0QlMjB0b3JjaC50ZW5zb3IoJTVCaW1hZ2Uuc2l6ZSU1QiUzQSUzQS0xJTVEJTVEKSUwQXJlc3VsdHMlMjAlM0QlMjBpbWFnZV9wcm9jZXNzb3IucG9zdF9wcm9jZXNzX29iamVjdF9kZXRlY3Rpb24ob3V0cHV0cyUyQyUyMHRocmVzaG9sZCUzRDAuOSUyQyUyMHRhcmdldF9zaXplcyUzRHRhcmdldF9zaXplcyklNUIwJTVE",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> requests | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> optimum.neuron <span class="hljs-keyword">import</span> NeuronYolosForObjectDetection | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoImageProcessor | |
| <span class="hljs-meta">>>> </span>url = <span class="hljs-string">"http://images.cocodataset.org/val2017/000000039769.jpg"</span> | |
| <span class="hljs-meta">>>> </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) | |
| <span class="hljs-meta">>>> </span>preprocessor = AutoImageProcessor.from_pretrained(<span class="hljs-string">"optimum/yolos-tiny-neuronx-bs1"</span>) | |
| <span class="hljs-meta">>>> </span>model = NeuronYolosForObjectDetection.from_pretrained(<span class="hljs-string">"optimum/yolos-tiny-neuronx-bs1"</span>) | |
| <span class="hljs-meta">>>> </span>inputs = preprocessor(images=image, return_tensors=<span class="hljs-string">"pt"</span>) | |
| <span class="hljs-meta">>>> </span>outputs = model(**inputs) | |
| <span class="hljs-meta">>>> </span>target_sizes = torch.tensor([image.size[::-<span class="hljs-number">1</span>]]) | |
| <span class="hljs-meta">>>> </span>results = image_processor.post_process_object_detection(outputs, threshold=<span class="hljs-number">0.9</span>, target_sizes=target_sizes)[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){n=b("p"),n.textContent=w,i=l(),p(c.$$.fragment)},l(r){n=j(r,"P",{"data-svelte-h":!0}),x(n)!=="svelte-11lpom8"&&(n.textContent=w),i=a(r),u(c.$$.fragment,r)},m(r,M){s(r,n,M),s(r,i,M),d(c,r,M),y=!0},p:Me,i(r){y||(h(c.$$.fragment,r),y=!0)},o(r){g(c.$$.fragment,r),y=!1},d(r){r&&(o(n),o(i)),f(c,r)}}}function We(z){let n,w,i,c,y,r,M,G,Z,de=`The YOLOS model was proposed in <a href="https://arxiv.org/abs/2106.00666" rel="nofollow">You Only Look at One Sequence: Rethinking Transformer in Vision through Object Detection</a> by Yuxin Fang, Bencheng Liao, Xinggang Wang, Jiemin Fang, Jiyang Qi, Rui Wu, Jianwei Niu, Wenyu Liu. | |
| YOLOS proposes to just leverage the plain <a href="vit">Vision Transformer (ViT)</a> for object detection, inspired by DETR. It turns out that a base-sized encoder-only Transformer can also achieve 42 AP on COCO, similar to DETR and much more complex frameworks such as Faster R-CNN.`,L,W,P,X,he='To deploy 🤗 <a href="https://huggingface.co/docs/transformers/index" rel="nofollow">Transformers</a> models on Neuron devices, you first need to compile the models and export them to a serialized format for inference. Below are two approaches to compile the model, you can choose the one that best suits your needs. Here we take the <code>feature-extraction</code> as an example:',A,V,D,k,ge="You can export the model using the Optimum command-line interface as follows:",Q,C,q,_,K,H,ee,I,te,U,oe,m,R,re,Y,fe="Neuron Model with object detection heads on top, for tasks such as COCO detection.",ie,E,be=`This model inherits from <code>~neuron.modeling.NeuronTracedModel</code>. Check the superclass documentation for the generic methods the | |
| library implements for all its model (such as downloading or saving)`,ce,J,B,me,O,je="The <code>NeuronYolosForObjectDetection</code> forward method, overrides the <code>__call__</code> special method. Accepts only the inputs traced during the compilation step. Any additional inputs provided during inference will be ignored. To include extra inputs, recompile the model with those inputs specified.",pe,N,ne,F,se;return y=new S({props:{title:"YOLOS",local:"yolos",headingTag:"h1"}}),M=new S({props:{title:"Overview",local:"overview",headingTag:"h2"}}),W=new S({props:{title:"Export to Neuron",local:"export-to-neuron",headingTag:"h2"}}),V=new S({props:{title:"Option 1: CLI",local:"option-1-cli",headingTag:"h3"}}),C=new ue({props:{code:"b3B0aW11bS1jbGklMjBleHBvcnQlMjBuZXVyb24lMjAtLW1vZGVsJTIwaHVzdHZsJTJGeW9sb3MtdGlueSUyMC0tdGFzayUyMG9iamVjdC1kZXRlY3Rpb24lMjAtLWJhdGNoX3NpemUlMjAxJTIweW9sb3Nfb2JqZWN0X2RldGVjdGlvbl9uZXVyb254JTJG",highlighted:'optimum-cli <span class="hljs-built_in">export</span> neuron --model hustvl/yolos-tiny --task object-detection --batch_size 1 yolos_object_detection_neuronx/',wrap:!1}}),_=new _e({props:{warning:!1,$$slots:{default:[xe]},$$scope:{ctx:z}}}),H=new S({props:{title:"Option 2: Python API",local:"option-2-python-api",headingTag:"h3"}}),I=new ue({props:{code:"ZnJvbSUyMG9wdGltdW0ubmV1cm9uJTIwaW1wb3J0JTIwTmV1cm9uTW9kZWxGb3JPYmplY3REZXRlY3Rpb24lMEFmcm9tJTIwdHJhbnNmb3JtZXJzJTIwaW1wb3J0JTIwQXV0b0ltYWdlUHJvY2Vzc29yJTBBJTBBJTBBcHJlcHJvY2Vzc29yJTIwJTNEJTIwQXV0b0ltYWdlUHJvY2Vzc29yLmZyb21fcHJldHJhaW5lZCglMjJodXN0dmwlMkZ5b2xvcy10aW55JTIyKSUwQW5ldXJvbl9tb2RlbCUyMCUzRCUyME5ldXJvbk1vZGVsRm9yT2JqZWN0RGV0ZWN0aW9uLmZyb21fcHJldHJhaW5lZCglMjJodXN0dmwlMkZ5b2xvcy10aW55JTIyJTJDJTIwZXhwb3J0JTNEVHJ1ZSUyQyUyMGJhdGNoX3NpemUlM0QxKSUwQSUwQW5ldXJvbl9tb2RlbC5zYXZlX3ByZXRyYWluZWQoJTIyeW9sb3Nfb2JqZWN0X2RldGVjdGlvbl9uZXVyb254JTIyKSUwQW5ldXJvbl9tb2RlbC5wdXNoX3RvX2h1YiglMEElMjAlMjAlMjAlMjAlMjJ5b2xvc19vYmplY3RfZGV0ZWN0aW9uX25ldXJvbnglMjIlMkMlMjByZXBvc2l0b3J5X2lkJTNEJTIyb3B0aW11bSUyRnlvbG9zLXRpbnktbmV1cm9ueC1iczElMjIlMjAlMjAlMjMlMjBSZXBsYWNlJTIwd2l0aCUyMHlvdXIlMjBIRiUyMEh1YiUyMHJlcG8lMjBpZCUwQSk=",highlighted:`<span class="hljs-keyword">from</span> optimum.neuron <span class="hljs-keyword">import</span> NeuronModelForObjectDetection | |
| <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoImageProcessor | |
| preprocessor = AutoImageProcessor.from_pretrained(<span class="hljs-string">"hustvl/yolos-tiny"</span>) | |
| neuron_model = NeuronModelForObjectDetection.from_pretrained(<span class="hljs-string">"hustvl/yolos-tiny"</span>, export=<span class="hljs-literal">True</span>, batch_size=<span class="hljs-number">1</span>) | |
| neuron_model.save_pretrained(<span class="hljs-string">"yolos_object_detection_neuronx"</span>) | |
| neuron_model.push_to_hub( | |
| <span class="hljs-string">"yolos_object_detection_neuronx"</span>, repository_id=<span class="hljs-string">"optimum/yolos-tiny-neuronx-bs1"</span> <span class="hljs-comment"># Replace with your HF Hub repo id</span> | |
| )`,wrap:!1}}),U=new S({props:{title:"NeuronYolosForObjectDetection",local:"optimum.neuron.NeuronYolosForObjectDetection",headingTag:"h2"}}),R=new ye({props:{name:"class optimum.neuron.NeuronYolosForObjectDetection",anchor:"optimum.neuron.NeuronYolosForObjectDetection",parameters:[{name:"model",val:": ScriptModule"},{name:"config",val:": PretrainedConfig"},{name:"model_save_dir",val:": str | pathlib.Path | tempfile.TemporaryDirectory | None = None"},{name:"model_file_name",val:": str | None = None"},{name:"preprocessors",val:": list | None = None"},{name:"neuron_config",val:": NeuronDefaultConfig | None = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"optimum.neuron.NeuronYolosForObjectDetection.config",description:`<strong>config</strong> (<code>transformers.PretrainedConfig</code>) — <a href="https://huggingface.co/docs/transformers/main_classes/configuration#transformers.PretrainedConfig" rel="nofollow">PretrainedConfig</a> is the Model configuration class with all the parameters of the model. | |
| Initializing with a config file does not load the weights associated with the model, only the | |
| configuration. Check out the <code>optimum.neuron.modeling.NeuronTracedModel.from_pretrained</code> method to load the model weights.`,name:"config"},{anchor:"optimum.neuron.NeuronYolosForObjectDetection.model",description:'<strong>model</strong> (<code>torch.jit._script.ScriptModule</code>) — <a href="https://pytorch.org/docs/stable/generated/torch.jit.ScriptModule.html" rel="nofollow">torch.jit._script.ScriptModule</a> is the TorchScript module with embedded NEFF(Neuron Executable File Format) compiled by neuron(x) compiler.',name:"model"}],source:"https://github.com/huggingface/optimum-neuron/blob/v0.3.0/optimum/neuron/models/inference/yolos/modeling_yolos.py#L37"}}),B=new ye({props:{name:"forward",anchor:"optimum.neuron.NeuronYolosForObjectDetection.forward",parameters:[{name:"pixel_values",val:": Tensor"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"optimum.neuron.NeuronYolosForObjectDetection.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.Tensor | None</code> of shape <code>(batch_size, num_channels, height, width)</code>, defaults to <code>None</code>) — | |
| Pixel values corresponding to the images in the current batch. | |
| Pixel values can be obtained from encoded images using <a href="https://huggingface.co/docs/transformers/en/model_doc/auto#transformers.AutoImageProcessor" rel="nofollow"><code>AutoImageProcessor</code></a>.`,name:"pixel_values"}],source:"https://github.com/huggingface/optimum-neuron/blob/v0.3.0/optimum/neuron/models/inference/yolos/modeling_yolos.py#L53"}}),N=new Ne({props:{anchor:"optimum.neuron.NeuronYolosForObjectDetection.forward.example",$$slots:{default:[Ze]},$$scope:{ctx:z}}}),{c(){n=b("meta"),w=l(),i=b("p"),c=l(),p(y.$$.fragment),r=l(),p(M.$$.fragment),G=l(),Z=b("p"),Z.innerHTML=de,L=l(),p(W.$$.fragment),P=l(),X=b("p"),X.innerHTML=he,A=l(),p(V.$$.fragment),D=l(),k=b("p"),k.textContent=ge,Q=l(),p(C.$$.fragment),q=l(),p(_.$$.fragment),K=l(),p(H.$$.fragment),ee=l(),p(I.$$.fragment),te=l(),p(U.$$.fragment),oe=l(),m=b("div"),p(R.$$.fragment),re=l(),Y=b("p"),Y.textContent=fe,ie=l(),E=b("p"),E.innerHTML=be,ce=l(),J=b("div"),p(B.$$.fragment),me=l(),O=b("p"),O.innerHTML=je,pe=l(),p(N.$$.fragment),ne=l(),F=b("p"),this.h()},l(e){const t=Te("svelte-u9bgzb",document.head);n=j(t,"META",{name:!0,content:!0}),t.forEach(o),w=a(e),i=j(e,"P",{}),le(i).forEach(o),c=a(e),u(y.$$.fragment,e),r=a(e),u(M.$$.fragment,e),G=a(e),Z=j(e,"P",{"data-svelte-h":!0}),x(Z)!=="svelte-98a8ak"&&(Z.innerHTML=de),L=a(e),u(W.$$.fragment,e),P=a(e),X=j(e,"P",{"data-svelte-h":!0}),x(X)!=="svelte-huz3tp"&&(X.innerHTML=he),A=a(e),u(V.$$.fragment,e),D=a(e),k=j(e,"P",{"data-svelte-h":!0}),x(k)!=="svelte-87nkn2"&&(k.textContent=ge),Q=a(e),u(C.$$.fragment,e),q=a(e),u(_.$$.fragment,e),K=a(e),u(H.$$.fragment,e),ee=a(e),u(I.$$.fragment,e),te=a(e),u(U.$$.fragment,e),oe=a(e),m=j(e,"DIV",{class:!0});var $=le(m);u(R.$$.fragment,$),re=a($),Y=j($,"P",{"data-svelte-h":!0}),x(Y)!=="svelte-1jokdk4"&&(Y.textContent=fe),ie=a($),E=j($,"P",{"data-svelte-h":!0}),x(E)!=="svelte-t21i2g"&&(E.innerHTML=be),ce=a($),J=j($,"DIV",{class:!0});var T=le(J);u(B.$$.fragment,T),me=a(T),O=j(T,"P",{"data-svelte-h":!0}),x(O)!=="svelte-1xamqu"&&(O.innerHTML=je),pe=a(T),u(N.$$.fragment,T),T.forEach(o),$.forEach(o),ne=a(e),F=j(e,"P",{}),le(F).forEach(o),this.h()},h(){ae(n,"name","hf:doc:metadata"),ae(n,"content",Xe),ae(J,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),ae(m,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,t){v(document.head,n),s(e,w,t),s(e,i,t),s(e,c,t),d(y,e,t),s(e,r,t),d(M,e,t),s(e,G,t),s(e,Z,t),s(e,L,t),d(W,e,t),s(e,P,t),s(e,X,t),s(e,A,t),d(V,e,t),s(e,D,t),s(e,k,t),s(e,Q,t),d(C,e,t),s(e,q,t),d(_,e,t),s(e,K,t),d(H,e,t),s(e,ee,t),d(I,e,t),s(e,te,t),d(U,e,t),s(e,oe,t),s(e,m,t),d(R,m,null),v(m,re),v(m,Y),v(m,ie),v(m,E),v(m,ce),v(m,J),d(B,J,null),v(J,me),v(J,O),v(J,pe),d(N,J,null),s(e,ne,t),s(e,F,t),se=!0},p(e,[t]){const $={};t&2&&($.$$scope={dirty:t,ctx:e}),_.$set($);const T={};t&2&&(T.$$scope={dirty:t,ctx:e}),N.$set(T)},i(e){se||(h(y.$$.fragment,e),h(M.$$.fragment,e),h(W.$$.fragment,e),h(V.$$.fragment,e),h(C.$$.fragment,e),h(_.$$.fragment,e),h(H.$$.fragment,e),h(I.$$.fragment,e),h(U.$$.fragment,e),h(R.$$.fragment,e),h(B.$$.fragment,e),h(N.$$.fragment,e),se=!0)},o(e){g(y.$$.fragment,e),g(M.$$.fragment,e),g(W.$$.fragment,e),g(V.$$.fragment,e),g(C.$$.fragment,e),g(_.$$.fragment,e),g(H.$$.fragment,e),g(I.$$.fragment,e),g(U.$$.fragment,e),g(R.$$.fragment,e),g(B.$$.fragment,e),g(N.$$.fragment,e),se=!1},d(e){e&&(o(w),o(i),o(c),o(r),o(G),o(Z),o(L),o(P),o(X),o(A),o(D),o(k),o(Q),o(q),o(K),o(ee),o(te),o(oe),o(m),o(ne),o(F)),o(n),f(y,e),f(M,e),f(W,e),f(V,e),f(C,e),f(_,e),f(H,e),f(I,e),f(U,e),f(R),f(B),f(N)}}}const Xe='{"title":"YOLOS","local":"yolos","sections":[{"title":"Overview","local":"overview","sections":[],"depth":2},{"title":"Export to Neuron","local":"export-to-neuron","sections":[{"title":"Option 1: CLI","local":"option-1-cli","sections":[],"depth":3},{"title":"Option 2: Python API","local":"option-2-python-api","sections":[],"depth":3}],"depth":2},{"title":"NeuronYolosForObjectDetection","local":"optimum.neuron.NeuronYolosForObjectDetection","sections":[],"depth":2}],"depth":1}';function Ve(z){return we(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class ze extends Je{constructor(n){super(),ve(this,n,Ve,We,$e,{})}}export{ze as component}; | |
Xet Storage Details
- Size:
- 15.1 kB
- Xet hash:
- e758a6182a4c7370f03d7aefb53b956afcbcc0dc9e535e16ff94f167569f15c5
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.