8000 pytorch/README.md at master · ShowLang/pytorch · GitHub
[go: up one dir, main page]
More Web Proxy on the site http://driver.im/
Skip to content
{"payload":{"allShortcutsEnabled":false,"fileTree":{"":{"items":[{"name":"cmake","path":"cmake","contentType":"directory"},{"name":"docs","path":"docs","contentType":"directory"},{"name":"test","path":"test","contentType":"directory"},{"name":"tools","path":"tools","contentType":"directory"},{"name":"torch","path":"torch","contentType":"directory"},{"name":".gitignore","path":".gitignore","contentType":"file"},{"name":".travis.yml","path":".travis.yml","contentType":"file"},{"name":"Dockerfile","path":"Dockerfile","contentType":"file"},{"name":"LICENSE","path":"LICENSE","contentType":"file"},{"name":"README.md","path":"README.md","contentType":"file"},{"name":"requirements.txt","path":"requirements.txt","contentType":"file"},{"name":"setup.py","path":"setup.py","contentType":"file"},{"name":"tox.ini","path":"tox.ini","contentType":"file"}],"totalCount":13}},"fileTreeProcessingTime":7.498601,"foldersToFetch":[],"incompleteFileTree":false,"repo":{"id":82630000,"defaultBranch":"master","name":"pytorch","ownerLogin":"ShowLang","currentUserCanPush":false,"isFork":true,"isEmpty":false,"createdAt":"2017-02-21T03:06:55.000Z","ownerAvatar":"https://avatars.githubusercontent.com/u/19920962?v=4","public":true,"private":false,"isOrgOwned":false},"codeLineWrapEnabled":false,"symbolsExpanded":false,"treeExpanded":true,"refInfo":{"name":"master","listCacheKey":"v0:1613865732.488919","canEdit":false,"refType":"branch","currentOid":"01bd43037d141932e7c2394afe15e51931dde0c2"},"path":"README.md","currentUser":null,"blob":{"rawLines":null,"stylingDirectives":null,"colorizedLines":null,"csv":null,"csvError":null,"dependabotInfo":{"showConfigurationBanner":false,"configFilePath":null,"networkDependabotPath":"/ShowLang/pytorch/network/updates","dismissConfigurationNoticePath":"/settings/dismiss-notice/dependabot_configuration_notice","configurationNoticeDismissed":null},"displayName":"README.md","displayUrl":"https://github.com/ShowLang/pytorch/blob/master/README.md?raw=true","headerInfo":{"blobSize":"11.2 KB","deleteTooltip":"You must be signed in to make or propose changes","editTooltip":"You must be signed in to make or propose changes","ghDesktopPath":"https://desktop.github.com","isGitLfs":false,"onBranch":true,"shortPath":"52de9cd","siteNavLoginPath":"/login?return_to=https%3A%2F%2Fgithub.com%2FShowLang%2Fpytorch%2Fblob%2Fmaster%2FREADME.md","isCSV":false,"isRichtext":true,"toc":[{"level":2,"text":"More about PyTorch","anchor":"more-about-pytorch","htmlText":"More about PyTorch"},{"level":3,"text":"A GPU-ready Tensor library","anchor":"a-gpu-ready-tensor-library","htmlText":"A GPU-ready Tensor library"},{"level":3,"text":"Dynamic Neural Networks: Tape based Autograd","anchor":"dynamic-neural-networks-tape-based-autograd","htmlText":"Dynamic Neural Networks: Tape based Autograd"},{"level":3,"text":"Python first","anchor":"python-first","htmlText":"Python first"},{"level":3,"text":"Imperative experiences","anchor":"imperative-experiences","htmlText":"Imperative experiences"},{"level":3,"text":"Fast and Lean","anchor":"fast-and-lean","htmlText":"Fast and Lean"},{"level":3,"text":"Extensions without pain","anchor":"extensions-without-pain","htmlText":"Extensions without pain"},{"level":2,"text":"Installation","anchor":"installation","htmlText":"Installation"},{"level":3,"text":"Binaries","anchor":"binaries","htmlText":"Binaries"},{"level":3,"text":"From source","anchor":"from-source","htmlText":"From source"},{"level":4,"text":"Install optional dependencies","anchor":"install-optional-dependencies","htmlText":"Install optional dependencies"},{"level":4,"text":"Install PyTorch","anchor":"install-pytorch","htmlText":"Install PyTorch"},{"level":3,"text":"Docker image","anchor":"docker-image","htmlText":"Docker image"},{"level":2,"text":"Getting Started","anchor":"getting-started","htmlText":"Getting Started"},{"level":2,"text":"Communication","anchor":"communication","htmlText":"Communication"},{"level":2,"text":"Releases and Contributing","anchor":"releases-and-contributing","htmlText":"Releases and Contributing"},{"level":2,"text":"The Team","anchor":"the-team","htmlText":"The Team"}],"lineInfo":{"truncatedLoc":"231","truncatedSloc":"165"},"mode":"file"},"image":false,"isCodeownersFile":null,"isPlain":false,"isValidLegacyIssueTemplate":false,"issueTemplate":null,"discussionTemplate":null,"language":"Markdown","languageID":222,"large":false,"planSupportInfo":{"repoIsFork":null,"repoOwnedByCurrentUser":null,"requestFullPath":"/ShowLang/pytorch/blob/master/README.md","showFreeOrgGatedFeatureMessage":null,"showPlanSupportBanner":null,"upgradeDataAttributes":null,"upgradePath":null},"publishBannersInfo":{"dismissActionNoticePath":"/settings/dismiss-notice/publish_action_from_dockerfile","releasePath":"/ShowLang/pytorch/releases/new?marketplace=true","showPublishActionBanner":false},"rawBlobUrl":"https://github.com/ShowLang/pytorch/raw/refs/heads/master/README.md","renderImageOrRaw":false,"richText":"\u003carticle class=\"markdown-body entry-content container-lg\" itemprop=\"text\"\u003e\u003cp align=\"center\" dir=\"auto\"\u003e\u003ca target=\"_blank\" rel=\"noopener noreferrer\" href=\"/ShowLang/pytorch/blob/master/docs/source/_static/img/pytorch-logo-dark.png\"\u003e\u003cimg width=\"40%\" src=\"/ShowLang/pytorch/raw/master/docs/source/_static/img/pytorch-logo-dark.png\" style=\"max-width: 100%;\"\u003e\u003c/a\u003e\u003c/p\u003e\n\u003chr\u003e\n\u003cp dir=\"auto\"\u003ePyTorch is a python package that provides two high-level features:\u003c/p\u003e\n\u003cul dir=\"auto\"\u003e\n\u003cli\u003eTensor computation (like numpy) with strong GPU acceleration\u003c/li\u003e\n\u003cli\u003eDeep Neural Networks built on a tape-based autograd system\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp dir=\"auto\"\u003eYou can reuse your favorite python packages such as numpy, scipy and Cython to extend PyTorch when needed.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eWe are in an early-release Beta. Expect some adventures and rough edges.\u003c/p\u003e\n\u003cul dir=\"auto\"\u003e\n\u003cli\u003e\u003ca href=\"#more-about-pytorch\"\u003eMore About PyTorch\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#installation\"\u003eInstallation\u003c/a\u003e\n\u003cul dir=\"auto\"\u003e\n\u003cli\u003e\u003ca href=\"#binaries\"\u003eBinaries\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#from-source\"\u003eFrom source\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#docker-image\"\u003eDocker image\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#getting-started\"\u003eGetting Started\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#communication\"\u003eCommunication\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#releases-and-contributing\"\u003eReleases and Contributing\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#the-team\"\u003eThe Team\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cmarkdown-accessiblity-table\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eSystem\u003c/th\u003e\n\u003cth\u003ePython\u003c/th\u003e\n\u003cth\u003eStatus\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eLinux CPU\u003c/td\u003e\n\u003ctd\u003e2.7.8, 2.7, 3.5, nightly\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"https://travis-ci.org/pytorch/pytorch\" rel=\"nofollow\"\u003e\u003cimg src=\"https://camo.githubusercontent.com/20ac8b300ba51085300ee69540af0ef68359993cef655215b6d6f3da37b70afb/68747470733a2f2f7472617669732d63692e6f72672f7079746f7263682f7079746f7263682e7376673f6272616e63683d6d6173746572\" alt=\"Build Status\" data-canonical-src=\"https://travis-ci.org/pytorch/pytorch.svg?branch=master\" style=\"max-width: 100%;\"\u003e\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eLinux GPU\u003c/td\u003e\n\u003ctd\u003e2.7\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"https://build.pytorch.org/job/pytorch-master-py2\" rel=\"nofollow\"\u003e\u003cimg src=\"https://camo.githubusercontent.com/b023d6a41bf9c36e1b2e59f7222c7c21906cffd57d65c8db8ae24db059b8b62b/687474703a2f2f6275696c642e7079746f7263682e6f72673a383038302f6275696c645374617475732f69636f6e3f6a6f623d7079746f7263682d6d61737465722d707932\" alt=\"Build Status\" data-canonical-src=\"http://build.pytorch.org:8080/buildStatus/icon?job=pytorch-master-py2\" style=\"max-width: 100%;\"\u003e\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eLinux GPU\u003c/td\u003e\n\u003ctd\u003e3.5\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"https://build.pytorch.org/job/pytorch-master-py3\" rel=\"nofollow\"\u003e\u003cimg src=\"https://camo.githubusercontent.com/17f0696e0c3b90a40a98cc588445e0e8dd36f19e6ffa0330e35a7a529ff616ff/687474703a2f2f6275696c642e7079746f7263682e6f72673a383038302f6275696c645374617475732f69636f6e3f6a6f623d7079746f7263682d6d61737465722d707933\" alt=\"Build Status\" data-canonical-src=\"http://build.pytorch.org:8080/buildStatus/icon?job=pytorch-master-py3\" style=\"max-width: 100%;\"\u003e\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\u003c/markdown-accessiblity-table\u003e\n\u003cdiv class=\"markdown-heading\" dir=\"auto\"\u003e\u003ch2 tabindex=\"-1\" class=\"heading-element\" dir=\"auto\"\u003eMore about PyTorch\u003c/h2\u003e\u003ca id=\"user-content-more-about-pytorch\" class=\"anchor\" aria-label=\"Permalink: More about PyTorch\" href=\"#more-about-pytorch\"\u003e\u003csvg class=\"octicon octicon-link\" viewBox=\"0 0 16 16\" version=\"1.1\" width=\"16\" height=\"16\" aria-hidden=\"true\"\u003e\u003cpath d=\"m7.775 3.275 1.25-1.25a3.5 3.5 0 1 1 4.95 4.95l-2.5 2.5a3.5 3.5 0 0 1-4.95 0 .751.751 0 0 1 .018-1.042.751.751 0 0 1 1.042-.018 1.998 1.998 0 0 0 2.83 0l2.5-2.5a2.002 2.002 0 0 0-2.83-2.83l-1.25 1.25a.751.751 0 0 1-1.042-.018.751.751 0 0 1-.018-1.042Zm-4.69 9.64a1.998 1.998 0 0 0 2.83 0l1.25-1.25a.751.751 0 0 1 1.042.018.751.751 0 0 1 .018 1.042l-1.25 1.25a3.5 3.5 0 1 1-4.95-4.95l2.5-2.5a3.5 3.5 0 0 1 4.95 0 .751.751 0 0 1-.018 1.042.751.751 0 0 1-1.042.018 1.998 1.998 0 0 0-2.83 0l-2.5 2.5a1.998 1.998 0 0 0 0 2.83Z\"\u003e\u003c/path\u003e\u003c/svg\u003e\u003c/a\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003eAt a granular level, PyTorch is a library that consists of the following components:\u003c/p\u003e\n\u003cmarkdown-accessiblity-table\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e_\u003c/th\u003e\n\u003cth\u003e_\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003etorch\u003c/td\u003e\n\u003ctd\u003ea Tensor library like NumPy, with strong GPU support\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003etorch.autograd\u003c/td\u003e\n\u003ctd\u003ea tape based automatic differentiation library that supports all differentiable Tensor operations in torch\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003etorch.nn\u003c/td\u003e\n\u003ctd\u003ea neural networks library deeply integrated with autograd designed for maximum flexibility\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003etorch.optim\u003c/td\u003e\n\u003ctd\u003ean optimization package to be used with torch.nn with standard optimization methods such as SGD, RMSProp, LBFGS, Adam etc.\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003etorch.multiprocessing\u003c/td\u003e\n\u003ctd\u003epython multiprocessing, but with magical memory sharing of torch Tensors across processes. Useful for data loading and hogwild training.\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003etorch.utils\u003c/td\u003e\n\u003ctd\u003eDataLoader, Trainer and other utility functions for convenience\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003etorch.legacy(.nn/.optim)\u003c/td\u003e\n\u003ctd\u003elegacy code that has been ported over from torch for backward compatibility reasons\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\u003c/markdown-accessiblity-table\u003e\n\u003cp dir=\"auto\"\u003eUsually one uses PyTorch either as:\u003c/p\u003e\n\u003cul dir=\"auto\"\u003e\n\u003cli\u003eA replacement for numpy to use the power of GPUs.\u003c/li\u003e\n\u003cli\u003ea deep learning research platform that provides maximum flexibility and speed\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp dir=\"auto\"\u003eElaborating further:\u003c/p\u003e\n\u003cdiv class=\"markdown-heading\" dir=\"auto\"\u003e\u003ch3 tabindex=\"-1\" class=\"heading-element\" dir=\"auto\"\u003eA GPU-ready Tensor library\u003c/h3\u003e\u003ca id=\"user-content-a-gpu-ready-tensor-library\" class=\"anchor\" aria-label=\"Permalink: A GPU-ready Tensor library\" href=\"#a-gpu-ready-tensor-library\"\u003e\u003csvg class=\"octicon octicon-link\" viewBox=\"0 0 16 16\" version=\"1.1\" width=\"16\" height=\"16\" aria-hidden=\"true\"\u003e\u003cpath d=\"m7.775 3.275 1.25-1.25a3.5 3.5 0 1 1 4.95 4.95l-2.5 2.5a3.5 3.5 0 0 1-4.95 0 .751.751 0 0 1 .018-1.042.751.751 0 0 1 1.042-.018 1.998 1.998 0 0 0 2.83 0l2.5-2.5a2.002 2.002 0 0 0-2.83-2.83l-1.25 1.25a.751.751 0 0 1-1.042-.018.751.751 0 0 1-.018-1.042Zm-4.69 9.64a1.998 1.998 0 0 0 2.83 0l1.25-1.25a.751.751 0 0 1 1.042.018.751.751 0 0 1 .018 1.042l-1.25 1.25a3.5 3.5 0 1 1-4.95-4.95l2.5-2.5a3.5 3.5 0 0 1 4.95 0 .751.751 0 0 1-.018 1.042.751.751 0 0 1-1.042.018 1.998 1.998 0 0 0-2.83 0l-2.5 2.5a1.998 1.998 0 0 0 0 2.83Z\"\u003e\u003c/path\u003e\u003c/svg\u003e\u003c/a\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003eIf you use numpy, then you have used Tensors (a.k.a ndarray).\u003c/p\u003e\n\u003cp align=\"center\" dir=\"auto\"\u003e\u003ca target=\"_blank\" rel=\"noopener noreferrer\" href=\"/ShowLang/pytorch/blob/master/docs/source/_static/img/tensor_illustration.png\"\u003e\u003cimg width=\"30%\" src=\"/ShowLang/pytorch/raw/master/docs/source/_static/img/tensor_illustration.png\" style=\"max-width: 100%;\"\u003e\u003c/a\u003e\u003c/p\u003e\n\u003cp dir=\"auto\"\u003ePyTorch provides Tensors that can live either on the CPU or the GPU, and accelerate\ncompute by a huge amount.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eWe provide a wide variety of tensor routines to accelerate and fit your scientific computation needs\nsuch as slicing, indexing, math operations, linear algebra, reductions.\nAnd they are fast!\u003c/p\u003e\n\u003cdiv class=\"markdown-heading\" dir=\"auto\"\u003e\u003ch3 tabindex=\"-1\" class=\"heading-element\" dir=\"auto\"\u003eDynamic Neural Networks: Tape based Autograd\u003c/h3\u003e\u003ca id=\"user-content-dynamic-neural-networks-tape-based-autograd\" class=\"anchor\" aria-label=\"Permalink: Dynamic Neural Networks: Tape based Autograd\" href=\"#dynamic-neural-networks-tape-based-autograd\"\u003e\u003csvg class=\"octicon octicon-link\" viewBox=\"0 0 16 16\" version=\"1.1\" width=\"16\" height=\"16\" aria-hidden=\"true\"\u003e\u003cpath d=\"m7.775 3.275 1.25-1.25a3.5 3.5 0 1 1 4.95 4.95l-2.5 2.5a3.5 3.5 0 0 1-4.95 0 .751.751 0 0 1 .018-1.042.751.751 0 0 1 1.042-.018 1.998 1.998 0 0 0 2.83 0l2.5-2.5a2.002 2.002 0 0 0-2.83-2.83l-1.25 1.25a.751.751 0 0 1-1.042-.018.751.751 0 0 1-.018-1.042Zm-4.69 9.64a1.998 1.998 0 0 0 2.83 0l1.25-1.25a.751.751 0 0 1 1.042.018.751.751 0 0 1 .018 1.042l-1.25 1.25a3.5 3.5 0 1 1-4.95-4.95l2.5-2.5a3.5 3.5 0 0 1 4.95 0 .751.751 0 0 1-.018 1.042.751.751 0 0 1-1.042.018 1.998 1.998 0 0 0-2.83 0l-2.5 2.5a1.998 1.998 0 0 0 0 2.83Z\"\u003e\u003c/path\u003e\u003c/svg\u003e\u003c/a\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003ePyTorch has a unique way of building neural networks: using and replaying a tape recorder.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eMost frameworks such as \u003ccode\u003eTensorFlow\u003c/code\u003e, \u003ccode\u003eTheano\u003c/code\u003e, \u003ccode\u003eCaffe\u003c/code\u003e and \u003ccode\u003eCNTK\u003c/code\u003e have a static view of the world.\nOne has to build a neural network, and reuse the same structure again and again.\nChanging the way the network behaves means that one has to start from scratch.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eWith PyTorch, we use a technique called Reverse-mode auto-differentiation, which allows you to\nchange the way your network behaves arbitrarily with zero lag or overhead. Our inspiration comes\nfrom several research papers on this topic, as well as current and past work such as\n\u003ca href=\"https://github.com/twitter/torch-autograd\"\u003eautograd\u003c/a\u003e,\n\u003ca href=\"https://github.com/HIPS/autograd\"\u003eautograd\u003c/a\u003e,\n\u003ca href=\"http://chainer.org\" rel=\"nofollow\"\u003eChainer\u003c/a\u003e, etc.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eWhile this technique is not unique to PyTorch, it's one of the fastest implementations of it to date.\nYou get the best of speed and flexibility for your crazy research.\u003c/p\u003e\n\u003cp align=\"center\" dir=\"auto\"\u003e\u003ca target=\"_blank\" rel=\"noopener noreferrer\" href=\"/ShowLang/pytorch/blob/master/docs/source/_static/img/dynamic_graph.gif\"\u003e\u003cimg width=\"80%\" src=\"/ShowLang/pytorch/raw/master/docs/source/_static/img/dynamic_graph.gif\" data-animated-image=\"\" style=\"max-width: 100%;\"\u003e\u003c/a\u003e\u003c/p\u003e\n\u003cdiv class=\"markdown-heading\" dir=\"auto\"\u003e\u003ch3 tabindex=\"-1\" class=\"heading-element\" dir=\"auto\"\u003ePython first\u003c/h3\u003e\u003ca id=\"user-content-python-first\" class=\"anchor\" aria-label=\"Permalink: Python first\" href=\"#python-first\"\u003e\u003csvg class=\"octicon octicon-link\" viewBox=\"0 0 16 16\" version=\"1.1\" width=\"16\" height=\"16\" aria-hidden=\"true\"\u003e\u003cpath d=\"m7.775 3.275 1.25-1.25a3.5 3.5 0 1 1 4.95 4.95l-2.5 2.5a3.5 3.5 0 0 1-4.95 0 .751.751 0 0 1 .018-1.042.751.751 0 0 1 1.042-.018 1.998 1.998 0 0 0 2.83 0l2.5-2.5a2.002 2.002 0 0 0-2.83-2.83l-1.25 1.25a.751.751 0 0 1-1.042-.018.751.751 0 0 1-.018-1.042Zm-4.69 9.64a1.998 1.998 0 0 0 2.83 0l1.25-1.25a.751.751 0 0 1 1.042.018.751.751 0 0 1 .018 1.042l-1.25 1.25a3.5 3.5 0 1 1-4.95-4.95l2.5-2.5a3.5 3.5 0 0 1 4.95 0 .751.751 0 0 1-.018 1.042.751.751 0 0 1-1.042.018 1.998 1.998 0 0 0-2.83 0l-2.5 2.5a1.998 1.998 0 0 0 0 2.83Z\"\u003e\u003c/path\u003e\u003c/svg\u003e\u003c/a\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003ePyTorch is not a Python binding into a monolothic C++ framework.\nIt is built to be deeply integrated into Python.\nYou can use it naturally like you would use numpy / scipy / scikit-learn etc.\nYou can write your new neural network layers in Python itself, using your favorite libraries\nand use packages such as Cython and Numba.\nOur goal is to not reinvent the wheel where appropriate.\u003c/p\u003e\n\u003cdiv class=\"markdown-heading\" dir=\"auto\"\u003e\u003ch3 tabindex=\"-1\" class=\"heading-element\" dir=\"auto\"\u003eImperative experiences\u003c/h3\u003e\u003ca id=\"user-content-imperative-experiences\" class=\"anchor\" aria-label=\"Permalink: Imperative experiences\" href=\"#imperative-experiences\"\u003e\u003csvg class=\"octicon octicon-link\" viewBox=\"0 0 16 16\" version=\"1.1\" width=\"16\" height=\"16\" aria-hidden=\"true\"\u003e\u003cpath d=\"m7.775 3.275 1.25-1.25a3.5 3.5 0 1 1 4.95 4.95l-2.5 2.5a3.5 3.5 0 0 1-4.95 0 .751.751 0 0 1 .018-1.042.751.751 0 0 1 1.042-.018 1.998 1.998 0 0 0 2.83 0l2.5-2.5a2.002 2.002 0 0 0-2.83-2.83l-1.25 1.25a.751.751 0 0 1-1.042-.018.751.751 0 0 1-.018-1.042Zm-4.69 9.64a1.998 1.998 0 0 0 2.83 0l1.25-1.25a.751.751 0 0 1 1.042.018.751.751 0 0 1 .018 1.042l-1.25 1.25a3.5 3.5 0 1 1-4.95-4.95l2.5-2.5a3.5 3.5 0 0 1 4.95 0 .751.751 0 0 1-.018 1.042.751.751 0 0 1-1.042.018 1.998 1.998 0 0 0-2.83 0l-2.5 2.5a1.998 1.998 0 0 0 0 2.83Z\"\u003e\u003c/path\u003e\u003c/svg\u003e\u003c/a\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003ePyTorch is designed to be intuitive, linear in thought and easy to use.\nWhen you execute a line of code, it gets executed. There isn't an asynchronous view of the world.\nWhen you drop into a debugger, or receive error messages and stack traces, understanding them is straight-forward.\nThe stack-trace points to exactly where your code was defined.\nWe hope you never spend hours debugging your code because of bad stack traces or asynchronous and opaque execution engines.\u003c/p\u003e\n\u003cdiv class=\"markdown-heading\" dir=\"auto\"\u003e\u003ch3 tabindex=\"-1\" class=\"heading-element\" dir=\"auto\"\u003eFast and Lean\u003c/h3\u003e\u003ca id=\"user-content-fast-and-lean\" class=\"anchor\" aria-label=\"Permalink: Fast and Lean\" href=\"#fast-and-lean\"\u003e\u003csvg class=\"octicon octicon-link\" viewBox=\"0 0 16 16\" version=\"1.1\" width=\"16\" height=\"16\" aria-hidden=\"true\"\u003e\u003cpath d=\"m7.775 3.275 1.25-1.25a3.5 3.5 0 1 1 4.95 4.95l-2.5 2.5a3.5 3.5 0 0 1-4.95 0 .751.751 0 0 1 .018-1.042.751.751 0 0 1 1.042-.018 1.998 1.998 0 0 0 2.83 0l2.5-2.5a2.002 2.002 0 0 0-2.83-2.83l-1.25 1.25a.751.751 0 0 1-1.042-.018.751.751 0 0 1-.018-1.042Zm-4.69 9.64a1.998 1.998 0 0 0 2.83 0l1.25-1.25a.751.751 0 0 1 1.042.018.751.751 0 0 1 .018 1.042l-1.25 1.25a3.5 3.5 0 1 1-4.95-4.95l2.5-2.5a3.5 3.5 0 0 1 4.95 0 .751.751 0 0 1-.018 1.042.751.751 0 0 1-1.042.018 1.998 1.998 0 0 0-2.83 0l-2.5 2.5a1.998 1.998 0 0 0 0 2.83Z\"\u003e\u003c/path\u003e\u003c/svg\u003e\u003c/a\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003ePyTorch has minimal framework overhead. We integrate acceleration libraries\nsuch as Intel MKL and NVIDIA (CuDNN, NCCL) to maximize speed.\nAt the core, its CPU and GPU Tensor and Neural Network backends\n(TH, THC, THNN, THCUNN) are written as independent libraries with a C99 API.\u003cbr\u003e\nThey are mature and have been tested for years.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eHence, PyTorch is quite fast -- whether you run small or large neural networks.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eThe memory usage in PyTorch is extremely efficient compared to Torch or some of the alternatives.\nWe've written custom memory allocators for the GPU to make sure that\nyour deep learning models are maximally memory efficient.\nThis enables you to train bigger deep learning models than before.\u003c/p\u003e\n\u003cdiv class=\"markdown-heading\" dir=\"auto\"\u003e\u003ch3 tabindex=\"-1\" class=\"heading-element\" dir=\"auto\"\u003eExtensions without pain\u003c/h3\u003e\u003ca id=\"user-content-extensions-without-pain\" class=\"anchor\" aria-label=\"Permalink: Extensions without pain\" href=\"#extensions-without-pain\"\u003e\u003csvg class=\"octicon octicon-link\" viewBox=\"0 0 16 16\" version=\"1.1\" width=\"16\" height=\"16\" aria-hidden=\"true\"\u003e\u003cpath d=\"m7.775 3.275 1.25-1.25a3.5 3.5 0 1 1 4.95 4.95l-2.5 2.5a3.5 3.5 0 0 1-4.95 0 .751.751 0 0 1 .018-1.042.751.751 0 0 1 1.042-.018 1.998 1.998 0 0 0 2.83 0l2.5-2.5a2.002 2.002 0 0 0-2.83-2.83l-1.25 1.25a.751.751 0 0 1-1.042-.018.751.751 0 0 1-.018-1.042Zm-4.69 9.64a1.998 1.998 0 0 0 2.83 0l1.25-1.25a.751.751 0 0 1 1.042.018.751.751 0 0 1 .018 1.042l-1.25 1.25a3.5 3.5 0 1 1-4.95-4.95l2.5-2.5a3.5 3.5 0 0 1 4.95 0 .751.751 0 0 1-.018 1.042.751.751 0 0 1-1.042.018 1.998 1.998 0 0 0-2.83 0l-2.5 2.5a1.998 1.998 0 0 0 0 2.83Z\"\u003e\u003c/path\u003e\u003c/svg\u003e\u003c/a\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003eWriting new neural network modules, or interfacing with PyTorch's Tensor API was designed to be straight-forward\nand with minimal abstractions.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eYou can write new neural network layers in Python using the torch API\n\u003ca href=\"https://github.com/pytorch/tutorials/blob/master/Creating%20extensions%20using%20numpy%20and%20scipy.ipynb\"\u003eor your favorite numpy based libraries such as SciPy\u003c/a\u003e.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eIf you want to write your layers in C/C++, we provide an extension API based on\n\u003ca href=\"http://cffi.readthedocs.io/en/latest/\" rel=\"nofollow\"\u003ecffi\u003c/a\u003e that is efficient and with minimal boilerplate.\u003cbr\u003e\nThere is no wrapper code that needs to be written. \u003ca href=\"https://github.com/pytorch/extension-ffi\"\u003eYou can see an example here\u003c/a\u003e.\u003c/p\u003e\n\u003cdiv class=\"markdown-heading\" dir=\"auto\"\u003e\u003ch2 tabindex=\"-1\" class=\"heading-element\" dir=\"auto\"\u003eInstallation\u003c/h2\u003e\u003ca id=\"user-content-installation\" class=\"anchor\" aria-label=\"Permalink: Installation\" href=\"#installation\"\u003e\u003csvg class=\"octicon octicon-link\" viewBox=\"0 0 16 16\" version=\"1.1\" width=\"16\" height=\"16\" aria-hidden=\"true\"\u003e\u003cpath d=\"m7.775 3.275 1.25-1.25a3.5 3.5 0 1 1 4.95 4.95l-2.5 2.5a3.5 3.5 0 0 1-4.95 0 .751.751 0 0 1 .018-1.042.751.751 0 0 1 1.042-.018 1.998 1.998 0 0 0 2.83 0l2.5-2.5a2.002 2.002 0 0 0-2.83-2.83l-1.25 1.25a.751.751 0 0 1-1.042-.018.751.751 0 0 1-.018-1.042Zm-4.69 9.64a1.998 1.998 0 0 0 2.83 0l1.25-1.25a.751.751 0 0 1 1.042.018.751.751 0 0 1 .018 1.042l-1.25 1.25a3.5 3.5 0 1 1-4.95-4.95l2.5-2.5a3.5 3.5 0 0 1 4.95 0 .751.751 0 0 1-.018 1.042.751.751 0 0 1-1.042.018 1.998 1.998 0 0 0-2.83 0l-2.5 2.5a1.998 1.998 0 0 0 0 2.83Z\"\u003e\u003c/path\u003e\u003c/svg\u003e\u003c/a\u003e\u003c/div\u003e\n\u003cdiv class=\"markdown-heading\" dir=\"auto\"\u003e\u003ch3 tabindex=\"-1\" class=\"heading-element\" dir=\"auto\"\u003eBinaries\u003c/h3\u003e\u003ca id=\"user-content-binaries\" class=\"anchor\" aria-label=\"Permalink: Binaries\" href=\"#binaries\"\u003e\u003csvg class=\"octicon octicon-link\" viewBox=\"0 0 16 16\" version=\"1.1\" width=\"16\" height=\"16\" aria-hidden=\"true\"\u003e\u003cpath d=\"m7.775 3.275 1.25-1.25a3.5 3.5 0 1 1 4.95 4.95l-2.5 2.5a3.5 3.5 0 0 1-4.95 0 .751.751 0 0 1 .018-1.042.751.751 0 0 1 1.042-.018 1.998 1.998 0 0 0 2.83 0l2.5-2.5a2.002 2.002 0 0 0-2.83-2.83l-1.25 1.25a.751.751 0 0 1-1.042-.018.751.751 0 0 1-.018-1.042Zm-4.69 9.64a1.998 1.998 0 0 0 2.83 0l1.25-1.25a.751.751 0 0 1 1.042.018.751.751 0 0 1 .018 1.042l-1.25 1.25a3.5 3.5 0 1 1-4.95-4.95l2.5-2.5a3.5 3.5 0 0 1 4.95 0 .751.751 0 0 1-.018 1.042.751.751 0 0 1-1.042.018 1.998 1.998 0 0 0-2.83 0l-2.5 2.5a1.998 1.998 0 0 0 0 2.83Z\"\u003e\u003c/path\u003e\u003c/svg\u003e\u003c/a\u003e\u003c/div\u003e\n\u003cul dir=\"auto\"\u003e\n\u003cli\u003eAnaconda\u003c/li\u003e\n\u003c/ul\u003e\n\u003cdiv class=\"highlight highlight-source-shell notranslate position-relative overflow-auto\" dir=\"auto\" data-snippet-clipboard-copy-content=\"conda install pytorch torchvision -c soumith\"\u003e\u003cpre\u003econda install pytorch torchvision -c soumith\u003c/pre\u003e\u003c/div\u003e\n\u003cdiv class=\"markdown-heading\" dir=\"auto\"\u003e\u003ch3 tabindex=\"-1\" class=\"heading-element\" dir=\"auto\"\u003eFrom source\u003c/h3\u003e\u003ca id=\"user-content-from-source\" class=\"anchor\" aria-label=\"Permalink: From source\" href=\"#from-source\"\u003e\u003csvg class=\"octicon octicon-link\" viewBox=\"0 0 16 16\" version=\"1.1\" width=\"16\" height=\"16\" aria-hidden=\"true\"\u003e\u003cpath d=\"m7.775 3.275 1.25-1.25a3.5 3.5 0 1 1 4.95 4.95l-2.5 2.5a3.5 3.5 0 0 1-4.95 0 .751.751 0 0 1 .018-1.042.751.751 0 0 1 1.042-.018 1.998 1.998 0 0 0 2.83 0l2.5-2.5a2.002 2.002 0 0 0-2.83-2.83l-1.25 1.25a.751.751 0 0 1-1.042-.018.751.751 0 0 1-.018-1.042Zm-4.69 9.64a1.998 1.998 0 0 0 2.83 0l1.25-1.25a.751.751 0 0 1 1.042.018.751.751 0 0 1 .018 1.042l-1.25 1.25a3.5 3.5 0 1 1-4.95-4.95l2.5-2.5a3.5 3.5 0 0 1 4.95 0 .751.751 0 0 1-.018 1.042.751.751 0 0 1-1.042.018 1.998 1.998 0 0 0-2.83 0l-2.5 2.5a1.998 1.998 0 0 0 0 2.83Z\"\u003e\u003c/path\u003e\u003c/svg\u003e\u003c/a\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003eIf you are installing from source, we highly recommend installing an \u003ca href=\"https://www.continuum.io/downloads\" rel=\"nofollow\"\u003eAnaconda\u003c/a\u003e environment.\nYou will get a high-quality BLAS library (MKL) and you get a controlled compiler version regardless of your Linux distro.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eOnce you have \u003ca href=\"https://www.continuum.io/downloads\" rel=\"nofollow\"\u003eanaconda\u003c/a\u003e installed, here are the instructions.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eIf you want to compile with CUDA support, install\u003c/p\u003e\n\u003cul dir=\"auto\"\u003e\n\u003cli\u003e\u003ca href=\"https://developer.nvidia.com/cuda-downloads\" rel=\"nofollow\"\u003eNVIDIA CUDA\u003c/a\u003e 7.5 or above\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://developer.nvidia.com/cudnn\" rel=\"nofollow\"\u003eNVIDIA CuDNN\u003c/a\u003e v5.x\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp dir=\"auto\"\u003eIf you want to disable CUDA support, export environment variable \u003ccode\u003eNO_CUDA=1\u003c/code\u003e.\u003c/p\u003e\n\u003cdiv class=\"markdown-heading\" dir=\"auto\"\u003e\u003ch4 tabindex=\"-1\" class=\"heading-element\" dir=\"auto\"\u003eInstall optional dependencies\u003c/h4\u003e\u003ca id=\"user-content-install-optional-dependencies\" class=\"anchor\" aria-label=\"Permalink: Install optional dependencies\" href=\"#install-optional-dependencies\"\u003e\u003csvg class=\"octicon octicon-link\" viewBox=\"0 0 16 16\" version=\"1.1\" width=\"16\" height=\"16\" aria-hidden=\"true\"\u003e\u003cpath d=\"m7.775 3.275 1.25-1.25a3.5 3.5 0 1 1 4.95 4.95l-2.5 2.5a3.5 3.5 0 0 1-4.95 0 .751.751 0 0 1 .018-1.042.751.751 0 0 1 1.042-.018 1.998 1.998 0 0 0 2.83 0l2.5-2.5a2.002 2.002 0 0 0-2.83-2.83l-1.25 1.25a.751.751 0 0 1-1.042-.018.751.751 0 0 1-.018-1.042Zm-4.69 9.64a1.998 1.998 0 0 0 2.83 0l1.25-1.25a.751.751 0 0 1 1.042.018.751.751 0 0 1 .018 1.042l-1.25 1.25a3.5 3.5 0 1 1-4.95-4.95l2.5-2.5a3.5 3.5 0 0 1 4.95 0 .751.751 0 0 1-.018 1.042.751.751 0 0 1-1.042.018 1.998 1.998 0 0 0-2.83 0l-2.5 2.5a1.998 1.998 0 0 0 0 2.83Z\"\u003e\u003c/path\u003e\u003c/svg\u003e\u003c/a\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003eOn Linux\u003c/p\u003e\n\u003cdiv class=\"highlight highlight-source-shell notranslate position-relative overflow-auto\" dir=\"auto\" data-snippet-clipboard-copy-content=\"export CMAKE_PREFIX_PATH=[anaconda root directory]\n\n# Install basic dependencies\nconda install numpy mkl setuptools cmake gcc cffi\n\n# Add LAPACK support for the GPU\nconda install -c soumith magma-cuda75 # or magma-cuda80 if CUDA 8.0\"\u003e\u003cpre\u003e\u003cspan class=\"pl-k\"\u003eexport\u003c/span\u003e CMAKE_PREFIX_PATH=[anaconda root directory]\n\n\u003cspan class=\"pl-c\"\u003e\u003cspan class=\"pl-c\"\u003e#\u003c/span\u003e Install basic dependencies\u003c/span\u003e\nconda install numpy mkl setuptools cmake gcc cffi\n\n\u003cspan class=\"pl-c\"\u003e\u003cspan class=\"pl-c\"\u003e#\u003c/span\u003e Add LAPACK support for the GPU\u003c/span\u003e\nconda install -c soumith magma-cuda75 \u003cspan class=\"pl-c\"\u003e\u003cspan class=\"pl-c\"\u003e#\u003c/span\u003e or magma-cuda80 if CUDA 8.0\u003c/span\u003e\u003c/pre\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003eOn OSX\u003c/p\u003e\n\u003cdiv class=\"highlight highlight-source-shell notranslate position-relative overflow-auto\" dir=\"auto\" data-snippet-clipboard-copy-content=\"export CMAKE_PREFIX_PATH=[anaconda root directory]\nconda install numpy setuptools cmake cffi\"\u003e\u003cpre\u003e\u003cspan class=\"pl-k\"\u003eexport\u003c/span\u003e CMAKE_PREFIX_PATH=[anaconda root directory]\nconda install numpy setuptools cmake cffi\u003c/pre\u003e\u003c/div\u003e\n\u003cdiv class=\"markdown-heading\" dir=\"auto\"\u003e\u003ch4 tabindex=\"-1\" class=\"heading-element\" dir=\"auto\"\u003eInstall PyTorch\u003c/h4\u003e\u003ca id=\"user-content-install-pytorch\" class=\"anchor\" aria-label=\"Permalink: Install PyTorch\" href=\"#install-pytorch\"\u003e\u003csvg class=\"octicon octicon-link\" viewBox=\"0 0 16 16\" version=\"1.1\" width=\"16\" height=\"16\" aria-hidden=\"true\"\u003e\u003cpath d=\"m7.775 3.275 1.25-1.25a3.5 3.5 0 1 1 4.95 4.95l-2.5 2.5a3.5 3.5 0 0 1-4.95 0 .751.751 0 0 1 .018-1.042.751.751 0 0 1 1.042-.018 1.998 1.998 0 0 0 2.83 0l2.5-2.5a2.002 2.002 0 0 0-2.83-2.83l-1.25 1.25a.751.751 0 0 1-1.042-.018.751.751 0 0 1-.018-1.042Zm-4.69 9.64a1.998 1.998 0 0 0 2.83 0l1.25-1.25a.751.751 0 0 1 1.042.018.751.751 0 0 1 .018 1.042l-1.25 1.25a3.5 3.5 0 1 1-4.95-4.95l2.5-2.5a3.5 3.5 0 0 1 4.95 0 .751.751 0 0 1-.018 1.042.751.751 0 0 1-1.042.018 1.998 1.998 0 0 0-2.83 0l-2.5 2.5a1.998 1.998 0 0 0 0 2.83Z\"\u003e\u003c/path\u003e\u003c/svg\u003e\u003c/a\u003e\u003c/div\u003e\n\u003cdiv class=\"highlight highlight-source-shell notranslate position-relative overflow-auto\" dir=\"auto\" data-snippet-clipboard-copy-content=\"export MACOSX_DEPLOYMENT_TARGET=10.9 # if OSX\npip install -r requirements.txt\npython setup.py install\"\u003e\u003cpre\u003e\u003cspan class=\"pl-k\"\u003eexport\u003c/span\u003e MACOSX_DEPLOYMENT_TARGET=10.9 \u003cspan class=\"pl-c\"\u003e\u 7540 003cspan class=\"pl-c\"\u003e#\u003c/span\u003e if OSX\u003c/span\u003e\npip install -r requirements.txt\npython setup.py install\u003c/pre\u003e\u003c/div\u003e\n\u003cdiv class=\"markdown-heading\" dir=\"auto\"\u003e\u003ch3 tabindex=\"-1\" class=\"heading-element\" dir=\"auto\"\u003eDocker image\u003c/h3\u003e\u003ca id=\"user-content-docker-image\" class=\"anchor\" aria-label=\"Permalink: Docker image\" href=\"#docker-image\"\u003e\u003csvg class=\"octicon octicon-link\" viewBox=\"0 0 16 16\" version=\"1.1\" width=\"16\" height=\"16\" aria-hidden=\"true\"\u003e\u003cpath d=\"m7.775 3.275 1.25-1.25a3.5 3.5 0 1 1 4.95 4.95l-2.5 2.5a3.5 3.5 0 0 1-4.95 0 .751.751 0 0 1 .018-1.042.751.751 0 0 1 1.042-.018 1.998 1.998 0 0 0 2.83 0l2.5-2.5a2.002 2.002 0 0 0-2.83-2.83l-1.25 1.25a.751.751 0 0 1-1.042-.018.751.751 0 0 1-.018-1.042Zm-4.69 9.64a1.998 1.998 0 0 0 2.83 0l1.25-1.25a.751.751 0 0 1 1.042.018.751.751 0 0 1 .018 1.042l-1.25 1.25a3.5 3.5 0 1 1-4.95-4.95l2.5-2.5a3.5 3.5 0 0 1 4.95 0 .751.751 0 0 1-.018 1.042.751.751 0 0 1-1.042.018 1.998 1.998 0 0 0-2.83 0l-2.5 2.5a1.998 1.998 0 0 0 0 2.83Z\"\u003e\u003c/path\u003e\u003c/svg\u003e\u003c/a\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003eDockerfiles are supplied to build images with cuda support and cudnn v5 and cudnn v6 RC. Build them as usual\u003c/p\u003e\n\u003cdiv class=\"snippet-clipboard-content notranslate position-relative overflow-auto\" data-snippet-clipboard-copy-content=\"docker build . -t pytorch-cudnnv5 \"\u003e\u003cpre class=\"notranslate\"\u003e\u003ccode\u003edocker build . -t pytorch-cudnnv5 \n\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003eor\u003c/p\u003e\n\u003cdiv class=\"snippet-clipboard-content notranslate position-relative overflow-auto\" data-snippet-clipboard-copy-content=\"docker build . -t pytorch-cudnnv6 -f tools/docker/Dockerfile-v6\"\u003e\u003cpre class=\"notranslate\"\u003e\u003ccode\u003edocker build . -t pytorch-cudnnv6 -f tools/docker/Dockerfile-v6\n\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003eand run them with nvidia-docker:\u003c/p\u003e\n\u003cdiv class=\"snippet-clipboard-content notranslate position-relative overflow-auto\" data-snippet-clipboard-copy-content=\"nvidia-docker run --rm -ti --ipc=host pytorch-cudnnv5\"\u003e\u003cpre class=\"notranslate\"\u003e\u003ccode\u003envidia-docker run --rm -ti --ipc=host pytorch-cudnnv5\n\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003ePlease note that pytorch uses shared memory to share data between processes, so if torch multiprocessing is used (e.g.\nfor multithreaded data loaders) the default shared memory segment size that container runs with is not enough, and you\nshould increase shared memory size either with --ipc=host or --shm-size command line options to nvidia-docker run.\u003c/p\u003e\n\u003cdiv class=\"markdown-heading\" dir=\"auto\"\u003e\u003ch2 tabindex=\"-1\" class=\"heading-element\" dir=\"auto\"\u003eGetting Started\u003c/h2\u003e\u003ca id=\"user-content-getting-started\" class=\"anchor\" aria-label=\"Permalink: Getting Started\" href=\"#getting-started\"\u003e\u003csvg class=\"octicon octicon-link\" viewBox=\"0 0 16 16\" version=\"1.1\" width=\"16\" height=\"16\" aria-hidden=\"true\"\u003e\u003cpath d=\"m7.775 3.275 1.25-1.25a3.5 3.5 0 1 1 4.95 4.95l-2.5 2.5a3.5 3.5 0 0 1-4.95 0 .751.751 0 0 1 .018-1.042.751.751 0 0 1 1.042-.018 1.998 1.998 0 0 0 2.83 0l2.5-2.5a2.002 2.002 0 0 0-2.83-2.83l-1.25 1.25a.751.751 0 0 1-1.042-.018.751.751 0 0 1-.018-1.042Zm-4.69 9.64a1.998 1.998 0 0 0 2.83 0l1.25-1.25a.751.751 0 0 1 1.042.018.751.751 0 0 1 .018 1.042l-1.25 1.25a3.5 3.5 0 1 1-4.95-4.95l2.5-2.5a3.5 3.5 0 0 1 4.95 0 .751.751 0 0 1-.018 1.042.751.751 0 0 1-1.042.018 1.998 1.998 0 0 0-2.83 0l-2.5 2.5a1.998 1.998 0 0 0 0 2.83Z\"\u003e\u003c/path\u003e\u003c/svg\u003e\u003c/a\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003eThree pointers to get you started:\u003c/p\u003e\n\u003cul dir=\"auto\"\u003e\n\u003cli\u003e\u003ca href=\"https://github.com/pytorch/tutorials\"\u003eTutorials: notebooks to get you started with understanding and using PyTorch\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://github.com/pytorch/examples\"\u003eExamples: easy to understand pytorch code across all domains\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eThe API Reference: \u003ca href=\"http://pytorch.org/docs/\" rel=\"nofollow\"\u003ehttp://pytorch.org/docs/\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cdiv class=\"markdown-heading\" dir=\"auto\"\u003e\u003ch2 tabindex=\"-1\" class=\"heading-element\" dir=\"auto\"\u003eCommunication\u003c/h2\u003e\u003ca id=\"user-content-communication\" class=\"anchor\" aria-label=\"Permalink: Communication\" href=\"#communication\"\u003e\u003csvg class=\"octicon octicon-link\" viewBox=\"0 0 16 16\" version=\"1.1\" width=\"16\" height=\"16\" aria-hidden=\"true\"\u003e\u003cpath d=\"m7.775 3.275 1.25-1.25a3.5 3.5 0 1 1 4.95 4.95l-2.5 2.5a3.5 3.5 0 0 1-4.95 0 .751.751 0 0 1 .018-1.042.751.751 0 0 1 1.042-.018 1.998 1.998 0 0 0 2.83 0l2.5-2.5a2.002 2.002 0 0 0-2.83-2.83l-1.25 1.25a.751.751 0 0 1-1.042-.018.751.751 0 0 1-.018-1.042Zm-4.69 9.64a1.998 1.998 0 0 0 2.83 0l1.25-1.25a.751.751 0 0 1 1.042.018.751.751 0 0 1 .018 1.042l-1.25 1.25a3.5 3.5 0 1 1-4.95-4.95l2.5-2.5a3.5 3.5 0 0 1 4.95 0 .751.751 0 0 1-.018 1.042.751.751 0 0 1-1.042.018 1.998 1.998 0 0 0-2.83 0l-2.5 2.5a1.998 1.998 0 0 0 0 2.83Z\"\u003e\u003c/path\u003e\u003c/svg\u003e\u003c/a\u003e\u003c/div\u003e\n\u003cul dir=\"auto\"\u003e\n\u003cli\u003eforums: discuss implementations, research, etc. \u003ca href=\"http://discuss.pytorch.org\" rel=\"nofollow\"\u003ehttp://discuss.pytorch.org\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003egithub issues: bug reports, feature requests, install issues, RFCs, thoughts, etc.\u003c/li\u003e\n\u003cli\u003eslack: general chat, online discussions, collaboration etc. \u003ca href=\"https://pytorch.slack.com/\" rel=\"nofollow\"\u003ehttps://pytorch.slack.com/\u003c/a\u003e . If you need a slack invite, ping us at \u003ca href=\"mailto:soumith@pytorch.org\"\u003esoumith@pytorch.org\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003enewsletter: no-noise, one-way email newsletter with important announcements about pytorch. You can sign-up here: \u003ca href=\"http://eepurl.com/cbG0rv\" rel=\"nofollow\"\u003ehttp://eepurl.com/cbG0rv\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cdiv class=\"markdown-heading\" dir=\"auto\"\u003e\u003ch2 tabindex=\"-1\" class=\"heading-element\" dir=\"auto\"\u003eReleases and Contributing\u003c/h2\u003e\u003ca id=\"user-content-releases-and-contributing\" class=\"anchor\" aria-label=\"Permalink: Releases and Contributing\" href=\"#releases-and-contributing\"\u003e\u003csvg class=\"octicon octicon-link\" viewBox=\"0 0 16 16\" version=\"1.1\" width=\"16\" height=\"16\" aria-hidden=\"true\"\u003e\u003cpath d=\"m7.775 3.275 1.25-1.25a3.5 3.5 0 1 1 4.95 4.95l-2.5 2.5a3.5 3.5 0 0 1-4.95 0 .751.751 0 0 1 .018-1.042.751.751 0 0 1 1.042-.018 1.998 1.998 0 0 0 2.83 0l2.5-2.5a2.002 2.002 0 0 0-2.83-2.83l-1.25 1.25a.751.751 0 0 1-1.042-.018.751.751 0 0 1-.018-1.042Zm-4.69 9.64a1.998 1.998 0 0 0 2.83 0l1.25-1.25a.751.751 0 0 1 1.042.018.751.751 0 0 1 .018 1.042l-1.25 1.25a3.5 3.5 0 1 1-4.95-4.95l2.5-2.5a3.5 3.5 0 0 1 4.95 0 .751.751 0 0 1-.018 1.042.751.751 0 0 1-1.042.018 1.998 1.998 0 0 0-2.83 0l-2.5 2.5a1.998 1.998 0 0 0 0 2.83Z\"\u003e\u003c/path\u003e\u003c/svg\u003e\u003c/a\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003ePyTorch has a 90 day release cycle (major releases).\nIt's current state is Beta (v0.1.6), we expect no obvious bugs. Please let us know if you encounter a bug by \u003ca href=\"https://github.com/pytorch/pytorch/issues\"\u003efiling an issue\u003c/a\u003e.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eWe appreciate all contributions. If you are planning to contribute back bug-fixes, please do so without any further discussion.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eIf you plan to contribute new features, utility functions or extensions to the core, please first open an issue and discuss the feature with us.\nSending a PR without discussion might end up resulting in a rejected PR, because we might be taking the core in a different direction than you might be aware of.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003e\u003cstrong\u003eFor the next release cycle, these are the 3 big features we are planning to add:\u003c/strong\u003e\u003c/p\u003e\n\u003col dir=\"auto\"\u003e\n\u003cli\u003e\u003ca href=\"https://github.com/pytorch/pytorch/issues/241\" data-hovercard-type=\"issue\" data-hovercard-url=\"/pytorch/pytorch/issues/241/hovercard\"\u003eDistributed PyTorch\u003c/a\u003e (a draft implementation is present in this \u003ca href=\"https://github.com/apaszke/pytorch-dist\"\u003ebranch\u003c/a\u003e )\u003c/li\u003e\n\u003cli\u003eBackward of Backward - Backpropagating through the optimization process itself. Some past and recent papers such as\n\u003ca href=\"http://yann.lecun.com/exdb/publis/pdf/drucker-lecun-91.pdf\" rel=\"nofollow\"\u003eDouble Backprop\u003c/a\u003e and \u003ca href=\"https://arxiv.org/abs/1611.02163\" rel=\"nofollow\"\u003eUnrolled GANs\u003c/a\u003e need this.\u003c/li\u003e\n\u003cli\u003eLazy Execution Engine for autograd - This will enable us to optionally introduce caching and JIT compilers to optimize autograd code.\u003c/li\u003e\n\u003c/ol\u003e\n\u003cdiv class=\"markdown-heading\" dir=\"auto\"\u003e\u003ch2 tabindex=\"-1\" class=\"heading-element\" dir=\"auto\"\u003eThe Team\u003c/h2\u003e\u003ca id=\"user-content-the-team\" class=\"anchor\" aria-label=\"Permalink: The Team\" href=\"#the-team\"\u003e\u003csvg class=\"octicon octicon-link\" viewBox=\"0 0 16 16\" version=\"1.1\" width=\"16\" height=\"16\" aria-hidden=\"true\"\u003e\u003cpath d=\"m7.775 3.275 1.25-1.25a3.5 3.5 0 1 1 4.95 4.95l-2.5 2.5a3.5 3.5 0 0 1-4.95 0 .751.751 0 0 1 .018-1.042.751.751 0 0 1 1.042-.018 1.998 1.998 0 0 0 2.83 0l2.5-2.5a2.002 2.002 0 0 0-2.83-2.83l-1.25 1.25a.751.751 0 0 1-1.042-.018.751.751 0 0 1-.018-1.042Zm-4.69 9.64a1.998 1.998 0 0 0 2.83 0l1.25-1.25a.751.751 0 0 1 1.042.018.751.751 0 0 1 .018 1.042l-1.25 1.25a3.5 3.5 0 1 1-4.95-4.95l2.5-2.5a3.5 3.5 0 0 1 4.95 0 .751.751 0 0 1-.018 1.042.751.751 0 0 1-1.042.018 1.998 1.998 0 0 0-2.83 0l-2.5 2.5a1.998 1.998 0 0 0 0 2.83Z\"\u003e\u003c/path\u003e\u003c/svg\u003e\u003c/a\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003ePyTorch is a community driven project with several skillful engineers and researchers contributing to it.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003ePyTorch is currently maintained by \u003ca href=\"https://apaszke.github.io/\" rel=\"nofollow\"\u003eAdam Paszke\u003c/a\u003e, \u003ca href=\"https://github.com/colesbury\"\u003eSam Gross\u003c/a\u003e and \u003ca href=\"http://soumith.ch\" rel=\"nofollow\"\u003eSoumith Chintala\u003c/a\u003e with major contributions coming from 10s of talented individuals in various forms and means. A non-exhaustive but growing list needs to mention: Sergey Zagoruyko, Adam Lerer, Francisco Massa, Andreas Kopf, James Bradbury, Zeming Lin, Yuandong Tian, Guillaume Lample, Marat Dukhan, Natalia Gimelshein.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eNote: this project is unrelated to \u003ca href=\"https://github.com/hughperkins/pytorch\"\u003ehughperkins/pytorch\u003c/a\u003e with the same name. Hugh is a valuable contributor in the Torch community and has helped with many things Torch and PyTorch.\u003c/p\u003e\n\u003c/article\u003e","renderedFileInfo":null,"shortPath":null,"symbolsEnabled":true,"tabSize":8,"topBannersInfo":{"overridingGlobalFundingFile":false,"globalPreferredFundingPath":null,"showInvalidCitationWarning":false,"citationHelpUrl":"https://docs.github.com/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-citation-files","actionsOnboardingTip":null},"truncated":false,"viewable":true,"workflowRedirectUrl":null,"symbols":{"timed_out":false,"not_analyzed":false,"symbols":[{"name":"More about PyTorch","kind":"section_2","ident_start":1406,"ident_end":1424,"extent_start":1403,"extent_end":6563,"fully_qualified_name":"More about PyTorch","ident_utf16":{"start":{"line_number":28,"utf16_col":3},"end":{"line_number":28,"utf16_col":21}},"extent_utf16":{"start":{"line_number":28,"utf16_col":0},"end":{"line_number":127,"utf16_col":0}}},{"name":"A GPU-ready Tensor library","kind":"section_3","ident_start":2654,"ident_end":2680,"extent_start":2650,"extent_end":3144,"fully_qualified_name":"A GPU-ready Tensor library","ident_utf16":{"start":{"line_number":49,"utf16_col":4},"end":{"line_number":49,"utf16_col":30}},"extent_utf16":{"start":{"line_number":49,"utf16_col":0},"end":{"line_number":62,"utf16_col":0}}},{"name":"Dynamic Neural Networks: Tape based Autograd","kind":"section_3","ident_start":3148,"ident_end":3192,"extent_start":3144,"extent_end":4221,"fully_qualified_name":"Dynamic Neural Networks: Tape based Autograd","ident_utf16":{"start":{"line_number":62,"utf16_col":4},"end":{"line_number":62,"utf16_col":48}},"extent_utf16":{"start":{"line_number":62,"utf16_col":0},"end":{"line_number":82,"utf16_col":0}}},{"name":"Python first","kind":"section_3","ident_start":4225,"ident_end":4237,"extent_start":4221,"extent_end":4625,"fully_qualified_name":"Python first","ident_utf16":{"start":{"line_number":82,"utf16_col":4},"end":{"line_number":82,"utf16_col":16}},"extent_utf16":{"start":{"line_number":82,"utf16_col":0},"end":{"line_number":91,"utf16_col":0}}},{"name":"Imperative experiences","kind":"section_3","ident_start":4629,"ident_end":4651,"extent_start":4625,"extent_end":5126,"fully_qualified_name":"Imperative experiences","ident_utf16":{"start":{"line_number":91,"utf16_col":4},"end":{"line_number":91,"utf16_col":26}},"extent_utf16":{"start":{"line_number":91,"utf16_col":0},"end":{"line_number":99,"utf16_col":0}}},{"name":"Fast and Lean","kind":"section_3","ident_start":5130,"ident_end":5143,"extent_start":5126,"extent_end":5852,"fully_qualified_name":"Fast and Lean","ident_utf16":{"start":{"line_number":99,"utf16_col":4},"end":{"line_number":99,"utf16_col":17}},"extent_utf16":{"start":{"line_number":99,"utf16_col":0},"end":{"line_number":114,"utf16_col":0}}},{"name":"Extensions without pain","kind":"section_3","ident_start":5856,"ident_end":5879,"extent_start":5852,"extent_end":6563,"fully_qualified_name":"Extensions without pain","ident_utf16":{"start":{"line_number":114,"utf16_col":4},"end":{"line_number":114,"utf16_col":27}},"extent_utf16":{"start":{"line_number":114,"utf16_col":0},"end":{"line_number":127,"utf16_col":0}}},{"name":"Installation","kind":"section_2","ident_start":6566,"ident_end":6578,"extent_start":6563,"extent_end":8530,"fully_qualified_name":"Installation","ident_utf16":{"start":{"line_number":127,"utf16_col":3},"end":{"line_number":127,"utf16_col":15}},"extent_utf16":{"start":{"line_number":127,"utf16_col":0},"end":{"line_number":193,"utf16_col":0}}},{"name":"Binaries","kind":"section_3","ident_start":6584,"ident_end":6592,"extent_start":6580,"extent_end":6662,"fully_qualified_name":"Binaries","ident_utf16":{"start":{"line_number":129,"utf16_col":4},"end":{"line_number":129,"utf16_col":12}},"extent_utf16":{"start":{"line_number":129,"utf16_col":0},"end":{"line_number":135,"utf16_col":0}}},{"name":"From source","kind":"section_3","ident_start":6666,"ident_end":6677,"extent_start":6662,"extent_end":7833,"fully_qualified_name":"From source","ident_utf16":{"start":{"line_number":135,"utf16_col":4},"end":{"line_number":135,"utf16_col":15}},"extent_utf16":{"start":{"line_number":135,"utf16_col":0},"end":{"line_number":174,"utf16_col":0}}},{"name":"Install optional dependencies","kind":"section_4","ident_start":7298,"ident_end":7327,"extent_start":7293,"extent_end":7697,"fully_qualified_name":"Install optional dependencies","ident_utf16":{"start":{"line_number":148,"utf16_col":5},"end":{"line_number":148,"utf16_col":34}},"extent_utf16":{"start":{"line_number":148,"utf16_col":0},"end":{"line_number":167,"utf16_col":0}}},{"name":"Install PyTorch","kind":"section_4","ident_start":7702,"ident_end":7717,"extent_start":7697,"extent_end":7833,"fully_qualified_name":"Install PyTorch","ident_utf16":{"start":{"line_number":167,"utf16_col":5},"end":{"line_number":167,"utf16_col":20}},"extent_utf16":{"start":{"line_number":167,"utf16_col":0},"end":{"line_number":174,"utf16_col":0}}},{"name":"Docker image","kind":"section_3","ident_start":7837,"ident_end":7849,"extent_start":7833,"extent_end":8530,"fully_qualified_name":"Docker image","ident_utf16":{"start":{"line_number":174,"utf16_col":4},"end":{"line_number":174,"utf16_col":16}},"extent_utf16":{"start":{"line_number":174,"utf16_col":0},"end":{"line_number":193,"utf16_col":0}}},{"name":"Getting Started","kind":"section_2","ident_start":8533,"ident_end":8548,"extent_start":8530,"extent_end":8881,"fully_qualified_name":"Getting Started","ident_utf16":{"start":{"line_number":193,"utf16_col":3},"end":{"line_number":193,"utf16_col":18}},"extent_utf16":{"start":{"line_number":193,"utf16_col":0},"end":{"line_number":200,"utf16_col":0}}},{"name":"Communication","kind":"section_2","ident_start":8884,"ident_end":8897,"extent_start":8881,"extent_end":9351,"fully_qualified_name":"Communication","ident_utf16":{"start":{"line_number":200,"utf16_col":3},"end":{"line_number":200,"utf16_col":16}},"extent_utf16":{"start":{"line_number":200,"utf16_col":0},"end":{"line_number":206,"utf16_col":0}}},{"name":"Releases and Contributing","kind":"section_2","ident_start":9354,"ident_end":9379,"extent_start":9351,"extent_end":10701,"fully_qualified_name":"Releases and Contributing","ident_utf16":{"start":{"line_number":206,"utf16_col":3},"end":{"line_number":206,"utf16_col":28}},"extent_utf16":{"start":{"line_number":206,"utf16_col":0},"end":{"line_number":224,"utf16_col":0}}},{"name":"The Team","kind":"section_2","ident_start":10704,"ident_end":10712,"extent_start":10701,"extent_end":11507,"fully_qualified_name":"The Team","ident_utf16":{"start":{"line_number":224,"utf16_col":3},"end":{"line_number":224,"utf16_col":11}},"extent_utf16":{"start":{"line_number":224,"utf16_col":0},"end":{"line_number":231,"utf16_col":0}}}]}},"copilotInfo":null,"copilotAccessAllowed":false,"modelsAccessAllowed":false,"modelsRepoIntegrationEnabled":false,"csrf_tokens":{"/ShowLang/pytorch/branches":{"post":"mrtXEfTL8uETd0Xv78oCl9g2ScLMtKkC9oliUmia4ZQp8OLE25io6ctB9SLnjLuQf0sMo1F8gth7oHYkC2aZig"},"/repos/preferences":{"post":"dqx4o_3-Jic13mUfkvMTp_kpY_b99Iqsz_ks0iO1ymG3qXCcX8N9dA5m7OLoQUIleoGmSzVPukwKgDB4Qx4T6g"}}},"title":"pytorch/README.md at master · ShowLang/pytorch","appPayload":{"helpUrl":"https://docs.github.com","findFileWorkerPath":"/assets-cdn/worker/find-file-worker-7d7eb7c71814.js","findInFileWorkerPath":"/assets-cdn/worker/find-in-file-worker-1ae9fa256942.js","githubDevUrl":null,"enabled_features":{"code_nav_ui_events":false,"react_blob_overlay":false,"accessible_code_button":true,"github_models_repo_integration":false}}}
0