Buckets:
| - sections: | |
| - local: index | |
| title: ๐ค Optimum | |
| - local: installation | |
| title: Installation | |
| - local: quicktour | |
| title: Quick tour | |
| - local: notebooks | |
| title: Notebooks | |
| - sections: | |
| - local: concept_guides/quantization | |
| title: Quantization | |
| title: Conceptual guides | |
| title: Overview | |
| - sections: | |
| - local: onnxruntime/overview | |
| title: Overview | |
| - local: onnxruntime/quickstart | |
| title: Quick tour | |
| - isExpanded: false | |
| sections: | |
| - local: onnxruntime/usage_guides/pipelines | |
| title: Inference pipelines | |
| - local: onnxruntime/usage_guides/models | |
| title: Models for inference | |
| - local: onnxruntime/usage_guides/optimization | |
| title: How to apply graph optimization | |
| - local: onnxruntime/usage_guides/quantization | |
| title: How to apply dynamic and static quantization | |
| - local: onnxruntime/usage_guides/trainer | |
| title: How to accelerate training | |
| - local: onnxruntime/usage_guides/gpu | |
| title: Accelerated inference on NVIDIA GPUs | |
| title: How-to guides | |
| - isExpanded: false | |
| sections: | |
| - local: onnxruntime/concept_guides/onnx | |
| title: ONNX ๐ค ONNX Runtime | |
| title: Conceptual guides | |
| - isExpanded: false | |
| sections: | |
| - local: onnxruntime/package_reference/modeling_ort | |
| title: ONNX Runtime Models | |
| - local: onnxruntime/package_reference/configuration | |
| title: Configuration | |
| - local: onnxruntime/package_reference/optimization | |
| title: Optimization | |
| - local: onnxruntime/package_reference/quantization | |
| title: Quantization | |
| - local: onnxruntime/package_reference/trainer | |
| title: Trainer | |
| title: Reference | |
| title: ONNX Runtime | |
| - sections: | |
| - local: exporters/overview | |
| title: Overview | |
| - local: exporters/task_manager | |
| title: The TasksManager | |
| - sections: | |
| - local: exporters/onnx/overview | |
| title: Overview | |
| - sections: | |
| - local: exporters/onnx/usage_guides/export_a_model | |
| title: Export a model to ONNX | |
| - local: exporters/onnx/usage_guides/contribute | |
| title: Add support for exporting an architecture to ONNX | |
| title: How-to guides | |
| - isExpanded: false | |
| sections: | |
| - local: exporters/onnx/package_reference/configuration | |
| title: ONNX configurations | |
| - local: exporters/onnx/package_reference/export | |
| title: Export functions | |
| title: Reference | |
| title: ONNX | |
| - sections: | |
| - local: exporters/tflite/overview | |
| title: Overview | |
| - sections: | |
| - local: exporters/tflite/usage_guides/export_a_model | |
| title: Export a model to TFLite | |
| - local: exporters/tflite/usage_guides/contribute | |
| title: Add support for exporting an architecture to TFLite | |
| title: How-to guides | |
| - isExpanded: false | |
| sections: | |
| - local: exporters/tflite/package_reference/configuration | |
| title: ONNX configurations | |
| - local: exporters/tflite/package_reference/export | |
| title: Export functions | |
| title: Reference | |
| title: TFLite | |
| title: Exporters | |
| - sections: | |
| - local: torch_fx/overview | |
| title: Overview | |
| - isExpanded: false | |
| sections: | |
| - local: torch_fx/usage_guides/optimization | |
| title: Optimization | |
| title: How-to guides | |
| - isExpanded: false | |
| sections: | |
| - local: torch_fx/concept_guides/symbolic_tracer | |
| title: Symbolic tracer | |
| title: Conceptual guides | |
| - isExpanded: false | |
| sections: | |
| - local: torch_fx/package_reference/optimization | |
| title: Optimization | |
| title: Reference | |
| title: Torch FX | |
| - sections: | |
| - local: bettertransformer/overview | |
| title: Overview | |
| - isExpanded: false | |
| sections: | |
| - local: bettertransformer/tutorials/convert | |
| title: Convert Transformers models to use BetterTransformer | |
| - local: bettertransformer/tutorials/contribute | |
| title: How to add support for new architectures? | |
| title: Tutorials | |
| title: BetterTransformer | |
| - isExpanded: false | |
| sections: | |
| - local: graphcore/index | |
| title: ๐ค Optimum Graphcore | |
| - local: graphcore/quickstart | |
| title: Quickstart | |
| - local: graphcore/ipu_config | |
| title: IPU Configuration | |
| - local: graphcore/trainer | |
| title: IPU Trainer | |
| - local: graphcore/add_support_for_new_model | |
| title: Add support for an architecutre | |
| title: Optimum Graphcore | |
| - isExpanded: false | |
| sections: | |
| - local: habana/index | |
| title: ๐ค Optimum Habana | |
| - local: habana/installation | |
| title: Installation | |
| - local: habana/quickstart | |
| title: Quickstart | |
| - sections: | |
| - local: habana/tutorials/overview | |
| title: Overview | |
| - local: habana/tutorials/single_hpu | |
| title: Single-HPU Training | |
| - local: habana/tutorials/distributed | |
| title: Distributed Training | |
| - local: habana/tutorials/stable_diffusion | |
| title: Stable Diffusion | |
| title: Tutorials | |
| - sections: | |
| - local: habana/usage_guides/overview | |
| title: Overview | |
| - local: habana/usage_guides/pretraining | |
| title: Pretraining Transformers | |
| - local: habana/usage_guides/accelerate_training | |
| title: Accelerating Training | |
| - local: habana/usage_guides/accelerate_inference | |
| title: Accelerating Inference | |
| - local: habana/usage_guides/deepspeed | |
| title: How to use DeepSpeed | |
| - local: habana/usage_guides/multi_node_training | |
| title: Multi-node Training | |
| title: How-To Guides | |
| - sections: | |
| - local: habana/concept_guides/hpu | |
| title: What are Habana's Gaudi and HPUs? | |
| title: Conceptual Guides | |
| - sections: | |
| - local: habana/package_reference/trainer | |
| title: Gaudi Trainer | |
| - local: habana/package_reference/gaudi_config | |
| title: Gaudi Configuration | |
| - local: habana/package_reference/stable_diffusion_pipeline | |
| title: Gaudi Stable Diffusion Pipeline | |
| - local: habana/package_reference/distributed_runner | |
| title: Distributed Runner | |
| title: Reference | |
| title: Optimum Habana | |
| - isExpanded: false | |
| sections: | |
| - local: intel/index | |
| title: ๐ค Optimum Intel | |
| - sections: | |
| - local: intel/optimization_inc | |
| title: Optimization | |
| - local: intel/distributed_training | |
| title: Distributed Training | |
| - local: intel/reference_inc | |
| title: Reference | |
| title: Neural Compressor | |
| - sections: | |
| - local: intel/inference | |
| title: Models for inference | |
| - local: intel/optimization_ov | |
| title: Optimization | |
| - local: intel/reference_ov | |
| title: Reference | |
| title: OpenVINO | |
| title: Optimum Intel | |
| - isExpanded: false | |
| sections: | |
| - local: utils/dummy_input_generators | |
| title: Dummy input generators | |
| - local: utils/normalized_config | |
| title: Normalized configurations | |
| title: Utilities | |
Xet Storage Details
- Size:
- 6.61 kB
- Xet hash:
- ebda8fcdab27299c1e60615905a1787c32c70752a389b2014562e366955c6a83
ยท
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.