mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-14 20:57:59 +00:00
Compare commits
389 commits
ciflow/xpu
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| b8d4d64f04 | |||
| 096a336cc1 | |||
| fe30c02f9f | |||
| 0962bb8dfe | |||
| 3b6fb62cf1 | |||
| 9c0fd892ab | |||
| cda59165b2 | |||
| a5ba4e6d6f | |||
| 11f2314bf7 | |||
| dea9212685 | |||
| 89210c26ab | |||
| 2f37447653 | |||
| f0b53b689b | |||
| 932056fbf9 | |||
| 16b7da68c1 | |||
| fd4c798bfe | |||
| 4bfa6a7162 | |||
| dd36f3a071 | |||
| bf27d8fb2b | |||
| 97cbc3d97c | |||
| a5855e1357 | |||
| dd832a69bd | |||
| 1b093184f4 | |||
| fd0bcf93ad | |||
| 60b95f86d2 | |||
| 38189f6fd5 | |||
| 5a1c75a5bc | |||
| a00eae9cb0 | |||
| 2421e36f51 | |||
| 367af4879b | |||
| 689e6de63d | |||
| bb5ebad147 | |||
| 5f5cabb9c1 | |||
| 215b531ead | |||
| 488f72f6c0 | |||
| f47a8cece8 | |||
| e15d073416 | |||
| 86dfb5e442 | |||
| 25f88ddb99 | |||
| 58fc46413a | |||
| 0a5016fd36 | |||
| 56e0fa3d09 | |||
| c3776fea12 | |||
| b381fd96bf | |||
| dda4facd6d | |||
| 437c7e27fa | |||
| 5ba9946bdb | |||
| d9c1957d84 | |||
| fda6e5a60c | |||
| 88d344442e | |||
| 4dbb7219d0 | |||
| 73fbb35536 | |||
| 07069ffa88 | |||
| 972eb57bf1 | |||
| 589b9d7eed | |||
| cc7f2c82ea | |||
| 4536b78abe | |||
| 0d10b8d95a | |||
| c250e86b2e | |||
| c6c80bb53f | |||
| 98df2ff800 | |||
| 88118f11da | |||
| 0bc1af7ab5 | |||
| 716f8ddc6b | |||
| 9db265ed39 | |||
| 91ea5bc986 | |||
| ada51b46f6 | |||
| e5f69df05c | |||
| 9eb71c71d4 | |||
| e28aefac5a | |||
| 75eaebd1b6 | |||
| 47b95ce3e9 | |||
| 028df3b96a | |||
| a06b106858 | |||
| 2adb421b43 | |||
| b23f374d1c | |||
| a08f760c65 | |||
| f6e889082d | |||
| e8158ffff1 | |||
| c881ab9a51 | |||
| 77adb40c4b | |||
| 8ba85098fa | |||
| 856f61404a | |||
| 23c71cab65 | |||
| ab6aff7632 | |||
| 1eda50e9df | |||
| 0a40441a5b | |||
| f8c1ff72d3 | |||
| d5f562be45 | |||
| 8ca4411d09 | |||
| 7b510d451e | |||
| 7d50fd1995 | |||
| de2460ae1e | |||
| 9fe75ed59b | |||
| 205280ffc8 | |||
| 8ab9129fe7 | |||
| 3a53fd218d | |||
| 4b5899b407 | |||
| 949ddacc73 | |||
| 7c026ede9d | |||
| 18a944006d | |||
| acfad10356 | |||
| 0f41e2c943 | |||
| e8c5027694 | |||
| 9978038498 | |||
| 96f759c632 | |||
| 23dc7ee222 | |||
| 00144d9ea7 | |||
| 455e0f5940 | |||
| 1ba4360aed | |||
| e5d70fe29b | |||
| db69de1b27 | |||
| b6338c813f | |||
| 64f8d95f81 | |||
| 41c2150554 | |||
| 11278043cf | |||
| 618df82e0d | |||
| 40c8ac07a4 | |||
| 9067111266 | |||
| c38475d861 | |||
| 97e70be7e7 | |||
| 60aafa3ff8 | |||
| 47a4abc318 | |||
| 2f2410d33f | |||
| 0aa5f251cd | |||
| f2844c75e8 | |||
| 64b7178d87 | |||
| 97db66d135 | |||
| 9fb64b447d | |||
| 8d19da3473 | |||
| a0cf0a32b2 | |||
| ac8a76c4b7 | |||
| 4d187e9177 | |||
| 02c848d226 | |||
| 0396c0c674 | |||
| 27c7979d8c | |||
| 35f57fbe06 | |||
| 4fcdb6763a | |||
| e7945070c6 | |||
| e7cb6710b0 | |||
| ae61d85dbc | |||
| 52b45659c3 | |||
| 459d7b848c | |||
| 71bbb3c045 | |||
| 99d039627b | |||
| 4bc7165c13 | |||
| 7c9b50b8f5 | |||
| 53bf20e07c | |||
| 166c8c2559 | |||
| 703aaec281 | |||
| 7b1045e51b | |||
| 8ccb1cf185 | |||
| 441891c64b | |||
| fbdfd3db54 | |||
| c1e1406efa | |||
| 8f78aba2c4 | |||
| 8dde1a3822 | |||
| fb9d2a5292 | |||
| 6082d992b9 | |||
| 582f61dbde | |||
| cfcb5877ff | |||
| c9ffd46f42 | |||
| 17eb2d11db | |||
| 06bf848561 | |||
| 6b924515f6 | |||
| 37ae138e45 | |||
| 094cc050ba | |||
| ae181724e5 | |||
| 54be7f9c1f | |||
| 21370fec85 | |||
| f46e203938 | |||
| 31bfdd6216 | |||
| 5e816d1968 | |||
| fffe52bb3b | |||
| 82858df048 | |||
| 0161a00ebd | |||
| c7a0c787ee | |||
| 45c3eb46f2 | |||
| a2c1b09fe9 | |||
| da40844dae | |||
| eccce5da01 | |||
| cb5bd0cc60 | |||
| a7a0adf29e | |||
| 4e761395ee | |||
| 454383b3f7 | |||
| 1c23ac39ce | |||
| 28215f4e50 | |||
| 3b92998f84 | |||
| cc253f3804 | |||
| 6b1957bed9 | |||
| dfb1e51e2d | |||
| 07fd5465e5 | |||
| 86e1c644e6 | |||
| c3378eaeff | |||
| ab6eae7221 | |||
| c06a6d2a5b | |||
| 4696ce4d3e | |||
| 1d2afc23d3 | |||
| f3e7b4bd1e | |||
| 098fb19d86 | |||
| 5e07ccf4cf | |||
| 0a309091d8 | |||
| 95cdca7429 | |||
| e0b2f729a3 | |||
| c680a65076 | |||
| ba555800e0 | |||
| 1633fc2217 | |||
| 758cc0c31e | |||
| 7c70df136a | |||
| 7464970eb1 | |||
| 4d2af27222 | |||
| a0b635338b | |||
| d9f8571c36 | |||
| 9f2e7acce0 | |||
| a38bf106f4 | |||
| 14a7d502bb | |||
| d12adf2eea | |||
| daaead1715 | |||
| 070d8411c7 | |||
| 03f4b295ed | |||
| 88ef09311d | |||
| 64946cb1c1 | |||
| 72315fa98d | |||
| 1af4fe4ecb | |||
| 30a460ee5f | |||
| 69f3ad5cc7 | |||
| b8a6bc73c4 | |||
| b340e283b8 | |||
| 7be19238e2 | |||
| 2a8bc82605 | |||
| 1999fce1d6 | |||
| 4d2c864223 | |||
| 5bb59dd08c | |||
| 9b1402f245 | |||
| 1d4f08bffb | |||
| 16734719d8 | |||
| d87f4b57df | |||
| 1a14964beb | |||
| b3e40797a4 | |||
| bed3bc4093 | |||
| b53218ca92 | |||
| 7ecb4e4ef4 | |||
| 6bde058d37 | |||
| 5370cb6a01 | |||
| 1c220d698e | |||
| 1b0b73cea9 | |||
| d2901432f3 | |||
| ceb7d73dd3 | |||
| 3cdca53adc | |||
| 58363e35c6 | |||
| 1de2900cdf | |||
| 9af713ee99 | |||
| e6f879b8d4 | |||
| cbfc766eb6 | |||
| 00b389c4c5 | |||
| fff49454bf | |||
| de9b0f0cbf | |||
| 3227d2b5c9 | |||
| 18b9bc5347 | |||
| 1722c3f9ad | |||
| 65c788fd09 | |||
| d7f8a52f79 | |||
| 61e538d3b3 | |||
| d899ad2bbc | |||
| 05c675239a | |||
| 7bf51bfd08 | |||
| 9c88641d3f | |||
| 7ef0642e83 | |||
| 1279ddbca0 | |||
| 53bcbb1ca5 | |||
| 51b31edcec | |||
| 2685b28a80 | |||
| 02e2c3311e | |||
| 8b7c267a31 | |||
| 6d3b5beb5a | |||
| 0e941f3178 | |||
| c138c54cdd | |||
| 8cec6e17a5 | |||
| edc7afcb6a | |||
| acfe418dc9 | |||
| 3e187d8b53 | |||
| d7f16ed0d7 | |||
| 7729fc27e4 | |||
| 6466d5584f | |||
| b5e2f58c41 | |||
| 2069dca742 | |||
| 1aed454788 | |||
| 7cc4662d8a | |||
| 5d7cfef3b7 | |||
| 27cc911030 | |||
| 843eda5148 | |||
| b39a1f4ad2 | |||
| 08077db9b3 | |||
| a9aa428722 | |||
| 2aed355665 | |||
| 7fb8a17c5d | |||
| 666aea569d | |||
| 8e0810a044 | |||
| 096bbc1e37 | |||
| fb3b844110 | |||
| 307eedd107 | |||
| 4ca44b92dd | |||
| 05ff0f9615 | |||
| 92ba09d5c6 | |||
| 872758d0b6 | |||
| 7701d78f5e | |||
| b4f15683f3 | |||
| 8f36dd4663 | |||
| bd8ffd894d | |||
| 4cbdd8e197 | |||
| 9cd9e9c86b | |||
| c526c0da9f | |||
| 10a6376c10 | |||
| 0790de6746 | |||
| f6db02ea2c | |||
| dc95c27654 | |||
| 6cdf5aba3b | |||
| 16a51adb4d | |||
| f4192c9acb | |||
| 25e78c5c6e | |||
| 3dca5daf78 | |||
| 775728503e | |||
| 9eafe19418 | |||
| 929795bcea | |||
| 17c83b2523 | |||
| 48a60ce484 | |||
| f3db95c04d | |||
| 70898ed07f | |||
| 0e321fbb8a | |||
| 4af8d7c8ae | |||
| a1f2885f0b | |||
| fd0a569013 | |||
| d9242a8ebe | |||
| 8a8cd94720 | |||
| aa9f9da991 | |||
| fd7f0b498e | |||
| 1011736886 | |||
| ebd0871f1c | |||
| e833cadb5e | |||
| d55b60fada | |||
| 77e7f8cd4b | |||
| aaaf3e8f25 | |||
| 6329f35b03 | |||
| 33acdf243d | |||
| 5c85baf61f | |||
| 09eca0a896 | |||
| cef720c745 | |||
| 4c7f2784ee | |||
| 9038a0d558 | |||
| 0f9392009c | |||
| ee92cf2a8a | |||
| da1f74ff29 | |||
| f62f467391 | |||
| 9344d9d16b | |||
| 0c45aba1ae | |||
| 2cb60a4cd1 | |||
| 2323733af0 | |||
| a017c85ec6 | |||
| 6f337f6376 | |||
| 4bc1b755ee | |||
| bc7efb37d7 | |||
| b8027f0f96 | |||
| 3a0e75d9af | |||
| 45e603a242 | |||
| 4d99e5598a | |||
| 6181ba002b | |||
| 522335ca4b | |||
| fc8dccb9ab | |||
| 72ba352028 | |||
| d83eaf251e | |||
| 525e69c6f1 | |||
| a4e677ef16 | |||
| fca6d22957 | |||
| 99267bd9dc | |||
| bebf513ac6 | |||
| d4942d7f6e | |||
| 06d57552a7 | |||
| 6750eb3298 | |||
| aad0ff916b | |||
| fababcc7e1 | |||
| 60cffecc0a | |||
| 257fb73521 | |||
| 0e0e0a075e | |||
| 512be0049c | |||
| 5980b1984b | |||
| 8a4a2cbf80 | |||
| db3f5260e5 | |||
|
|
d0e70c4fd3 | ||
|
|
6f15a609d3 |
8 changed files with 944 additions and 223 deletions
59
.ci/pytorch/windows/cuda128.bat
Normal file
59
.ci/pytorch/windows/cuda128.bat
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
@echo off
|
||||
|
||||
set MODULE_NAME=pytorch
|
||||
|
||||
IF NOT EXIST "setup.py" IF NOT EXIST "%MODULE_NAME%" (
|
||||
call internal\clone.bat
|
||||
cd %~dp0
|
||||
) ELSE (
|
||||
call internal\clean.bat
|
||||
)
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
call internal\check_deps.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
REM Check for optional components
|
||||
|
||||
set USE_CUDA=
|
||||
set CMAKE_GENERATOR=Visual Studio 15 2017 Win64
|
||||
|
||||
IF "%NVTOOLSEXT_PATH%"=="" (
|
||||
IF EXIST "C:\Program Files\NVIDIA Corporation\NvToolsExt\lib\x64\nvToolsExt64_1.lib" (
|
||||
set NVTOOLSEXT_PATH=C:\Program Files\NVIDIA Corporation\NvToolsExt
|
||||
) ELSE (
|
||||
echo NVTX ^(Visual Studio Extension ^for CUDA^) ^not installed, failing
|
||||
exit /b 1
|
||||
)
|
||||
)
|
||||
|
||||
IF "%CUDA_PATH_V128%"=="" (
|
||||
IF EXIST "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8\bin\nvcc.exe" (
|
||||
set "CUDA_PATH_V128=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8"
|
||||
) ELSE (
|
||||
echo CUDA 12.8 not found, failing
|
||||
exit /b 1
|
||||
)
|
||||
)
|
||||
|
||||
IF "%BUILD_VISION%" == "" (
|
||||
set TORCH_CUDA_ARCH_LIST=5.0;6.0;6.1;7.0;7.5;8.0;8.6;9.0;10.0;12.0
|
||||
set TORCH_NVCC_FLAGS=-Xfatbin -compress-all
|
||||
) ELSE (
|
||||
set NVCC_FLAGS=-D__CUDA_NO_HALF_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=compute_80 -gencode=arch=compute_86,code=compute_86 -gencode=arch=compute_90,code=compute_90 -gencode=arch=compute_100,code=compute_100 -gencode=arch=compute_120,code=compute_120
|
||||
)
|
||||
|
||||
set "CUDA_PATH=%CUDA_PATH_V126%"
|
||||
set "PATH=%CUDA_PATH_V126%\bin;%PATH%"
|
||||
|
||||
:optcheck
|
||||
|
||||
call internal\check_opts.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
if exist "%NIGHTLIES_PYTORCH_ROOT%" cd %NIGHTLIES_PYTORCH_ROOT%\..
|
||||
call %~dp0\internal\copy.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
call %~dp0\internal\setup.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
|
@ -9,7 +9,8 @@ if "%CUDA_VERSION%" == "xpu" (
|
|||
exit /b 0
|
||||
)
|
||||
|
||||
set SRC_DIR=%NIGHTLIES_PYTORCH_ROOT%
|
||||
set SRC_DIR=%~dp0\..
|
||||
|
||||
if not exist "%SRC_DIR%\temp_build" mkdir "%SRC_DIR%\temp_build"
|
||||
|
||||
set /a CUDA_VER=%CUDA_VERSION%
|
||||
|
|
@ -23,9 +24,9 @@ set CUDNN_LIB_FOLDER="lib\x64"
|
|||
if exist "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%\bin\nvcc.exe" goto set_cuda_env_vars
|
||||
|
||||
if %CUDA_VER% EQU 118 goto cuda118
|
||||
if %CUDA_VER% EQU 121 goto cuda121
|
||||
if %CUDA_VER% EQU 124 goto cuda124
|
||||
if %CUDA_VER% EQU 126 goto cuda126
|
||||
if %CUDA_VER% EQU 128 goto cuda128
|
||||
|
||||
echo CUDA %CUDA_VERSION_STR% is not supported
|
||||
exit /b 1
|
||||
|
|
@ -111,6 +112,33 @@ xcopy /Y "%SRC_DIR%\temp_build\zlib\dll_x64\*.dll" "C:\Windows\System32"
|
|||
|
||||
goto cuda_common
|
||||
|
||||
:cuda128
|
||||
|
||||
set CUDA_INSTALL_EXE=cuda_12.8.0_571.96_windows.exe
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" (
|
||||
curl -k -L "https://ossci-windows.s3.amazonaws.com/%CUDA_INSTALL_EXE%" --output "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%"
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDA_SETUP_FILE=%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%"
|
||||
set "ARGS=cuda_profiler_api_12.8 thrust_12.8 nvcc_12.8 cuobjdump_12.8 nvprune_12.8 nvprof_12.8 cupti_12.8 cublas_12.8 cublas_dev_12.8 cudart_12.8 cufft_12.8 cufft_dev_12.8 curand_12.8 curand_dev_12.8 cusolver_12.8 cusolver_dev_12.8 cusparse_12.8 cusparse_dev_12.8 npp_12.8 npp_dev_12.8 nvrtc_12.8 nvrtc_dev_12.8 nvml_dev_12.8 nvjitlink_12.8 nvtx_12.8"
|
||||
)
|
||||
|
||||
set CUDNN_FOLDER=cudnn-windows-x86_64-9.7.0.66_cuda12-archive
|
||||
set CUDNN_LIB_FOLDER="lib"
|
||||
set "CUDNN_INSTALL_ZIP=%CUDNN_FOLDER%.zip"
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" (
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/%CUDNN_INSTALL_ZIP%" --output "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%"
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDNN_SETUP_FILE=%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%"
|
||||
)
|
||||
|
||||
@REM cuDNN 8.3+ required zlib to be installed on the path
|
||||
echo Installing ZLIB dlls
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/zlib123dllx64.zip" --output "%SRC_DIR%\temp_build\zlib123dllx64.zip"
|
||||
7z x "%SRC_DIR%\temp_build\zlib123dllx64.zip" -o"%SRC_DIR%\temp_build\zlib"
|
||||
xcopy /Y "%SRC_DIR%\temp_build\zlib\dll_x64\*.dll" "C:\Windows\System32"
|
||||
|
||||
goto cuda_common
|
||||
|
||||
:cuda_common
|
||||
:: NOTE: We only install CUDA if we don't have it installed already.
|
||||
:: With GHA runners these should be pre-installed as part of our AMI process
|
||||
|
|
|
|||
5
.github/scripts/windows/build_magma.bat
vendored
5
.github/scripts/windows/build_magma.bat
vendored
|
|
@ -35,7 +35,10 @@ cd magma
|
|||
mkdir build && cd build
|
||||
|
||||
set GPU_TARGET=All
|
||||
if "%CUVER_NODOT:~0,2%" == "12" (
|
||||
if "%CUVER_NODOT%" == "128" (
|
||||
set CUDA_ARCH_LIST=-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90 -gencode arch=compute_100,code=sm_100 -gencode arch=compute_120,code=sm_120
|
||||
)
|
||||
if "%CUVER_NODOT:~0,2%" == "12" if NOT "%CUVER_NODOT%" == "128" (
|
||||
set CUDA_ARCH_LIST=-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90
|
||||
)
|
||||
if "%CUVER_NODOT%" == "118" (
|
||||
|
|
|
|||
218
.github/scripts/windows/cuda_install.bat
vendored
218
.github/scripts/windows/cuda_install.bat
vendored
|
|
@ -1,218 +0,0 @@
|
|||
@echo on
|
||||
|
||||
if "%CUDA_VERSION%" == "cpu" (
|
||||
echo Skipping for CPU builds
|
||||
exit /b 0
|
||||
)
|
||||
if "%CUDA_VERSION%" == "xpu" (
|
||||
echo Skipping for XPU builds
|
||||
exit /b 0
|
||||
)
|
||||
|
||||
set SRC_DIR=%~dp0\..
|
||||
|
||||
if not exist "%SRC_DIR%\temp_build" mkdir "%SRC_DIR%\temp_build"
|
||||
|
||||
set /a CUDA_VER=%CUDA_VERSION%
|
||||
set CUDA_VER_MAJOR=%CUDA_VERSION:~0,-1%
|
||||
set CUDA_VER_MINOR=%CUDA_VERSION:~-1,1%
|
||||
set CUDA_VERSION_STR=%CUDA_VER_MAJOR%.%CUDA_VER_MINOR%
|
||||
set CUDNN_FOLDER="cuda"
|
||||
set CUDNN_LIB_FOLDER="lib\x64"
|
||||
|
||||
:: Skip all of this if we already have cuda installed
|
||||
if exist "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%\bin\nvcc.exe" goto set_cuda_env_vars
|
||||
|
||||
if %CUDA_VER% EQU 118 goto cuda118
|
||||
if %CUDA_VER% EQU 121 goto cuda121
|
||||
if %CUDA_VER% EQU 124 goto cuda124
|
||||
if %CUDA_VER% EQU 126 goto cuda126
|
||||
|
||||
echo CUDA %CUDA_VERSION_STR% is not supported
|
||||
exit /b 1
|
||||
|
||||
:cuda118
|
||||
|
||||
set CUDA_INSTALL_EXE=cuda_11.8.0_522.06_windows.exe
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" (
|
||||
curl -k -L "https://ossci-windows.s3.amazonaws.com/%CUDA_INSTALL_EXE%" --output "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%"
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDA_SETUP_FILE=%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%"
|
||||
set "ARGS=cuda_profiler_api_11.8 thrust_11.8 nvcc_11.8 cuobjdump_11.8 nvprune_11.8 nvprof_11.8 cupti_11.8 cublas_11.8 cublas_dev_11.8 cudart_11.8 cufft_11.8 cufft_dev_11.8 curand_11.8 curand_dev_11.8 cusolver_11.8 cusolver_dev_11.8 cusparse_11.8 cusparse_dev_11.8 npp_11.8 npp_dev_11.8 nvrtc_11.8 nvrtc_dev_11.8 nvml_dev_11.8 nvtx_11.8"
|
||||
)
|
||||
|
||||
set CUDNN_FOLDER=cudnn-windows-x86_64-9.5.0.50_cuda11-archive
|
||||
set CUDNN_LIB_FOLDER="lib"
|
||||
set "CUDNN_INSTALL_ZIP=%CUDNN_FOLDER%.zip"
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" (
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/%CUDNN_INSTALL_ZIP%" --output "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%"
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDNN_SETUP_FILE=%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%"
|
||||
)
|
||||
|
||||
@REM cuDNN 8.3+ required zlib to be installed on the path
|
||||
echo Installing ZLIB dlls
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/zlib123dllx64.zip" --output "%SRC_DIR%\temp_build\zlib123dllx64.zip"
|
||||
7z x "%SRC_DIR%\temp_build\zlib123dllx64.zip" -o"%SRC_DIR%\temp_build\zlib"
|
||||
xcopy /Y "%SRC_DIR%\temp_build\zlib\dll_x64\*.dll" "C:\Windows\System32"
|
||||
|
||||
goto cuda_common
|
||||
|
||||
:cuda121
|
||||
|
||||
set CUDA_INSTALL_EXE=cuda_12.1.1_531.14_windows.exe
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" (
|
||||
curl -k -L "https://ossci-windows.s3.amazonaws.com/%CUDA_INSTALL_EXE%" --output "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%"
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDA_SETUP_FILE=%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%"
|
||||
set "ARGS=cuda_profiler_api_12.1 thrust_12.1 nvcc_12.1 cuobjdump_12.1 nvprune_12.1 nvprof_12.1 cupti_12.1 cublas_12.1 cublas_dev_12.1 cudart_12.1 cufft_12.1 cufft_dev_12.1 curand_12.1 curand_dev_12.1 cusolver_12.1 cusolver_dev_12.1 cusparse_12.1 cusparse_dev_12.1 npp_12.1 npp_dev_12.1 nvrtc_12.1 nvrtc_dev_12.1 nvml_dev_12.1 nvjitlink_12.1 nvtx_12.1"
|
||||
)
|
||||
|
||||
set CUDNN_FOLDER=cudnn-windows-x86_64-9.5.0.50_cuda12-archive
|
||||
set CUDNN_LIB_FOLDER="lib"
|
||||
set "CUDNN_INSTALL_ZIP=%CUDNN_FOLDER%.zip"
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" (
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/%CUDNN_INSTALL_ZIP%" --output "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%"
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDNN_SETUP_FILE=%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%"
|
||||
)
|
||||
|
||||
@REM cuDNN 8.3+ required zlib to be installed on the path
|
||||
echo Installing ZLIB dlls
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/zlib123dllx64.zip" --output "%SRC_DIR%\temp_build\zlib123dllx64.zip"
|
||||
7z x "%SRC_DIR%\temp_build\zlib123dllx64.zip" -o"%SRC_DIR%\temp_build\zlib"
|
||||
xcopy /Y "%SRC_DIR%\temp_build\zlib\dll_x64\*.dll" "C:\Windows\System32"
|
||||
|
||||
goto cuda_common
|
||||
|
||||
:cuda124
|
||||
|
||||
set CUDA_INSTALL_EXE=cuda_12.4.0_551.61_windows.exe
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" (
|
||||
curl -k -L "https://ossci-windows.s3.amazonaws.com/%CUDA_INSTALL_EXE%" --output "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%"
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDA_SETUP_FILE=%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%"
|
||||
set "ARGS=cuda_profiler_api_12.4 thrust_12.4 nvcc_12.4 cuobjdump_12.4 nvprune_12.4 nvprof_12.4 cupti_12.4 cublas_12.4 cublas_dev_12.4 cudart_12.4 cufft_12.4 cufft_dev_12.4 curand_12.4 curand_dev_12.4 cusolver_12.4 cusolver_dev_12.4 cusparse_12.4 cusparse_dev_12.4 npp_12.4 npp_dev_12.4 nvrtc_12.4 nvrtc_dev_12.4 nvml_dev_12.4 nvjitlink_12.4 nvtx_12.4"
|
||||
)
|
||||
|
||||
set CUDNN_FOLDER=cudnn-windows-x86_64-9.5.0.50_cuda12-archive
|
||||
set CUDNN_LIB_FOLDER="lib"
|
||||
set "CUDNN_INSTALL_ZIP=%CUDNN_FOLDER%.zip"
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" (
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/%CUDNN_INSTALL_ZIP%" --output "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%"
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDNN_SETUP_FILE=%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%"
|
||||
)
|
||||
|
||||
@REM cuDNN 8.3+ required zlib to be installed on the path
|
||||
echo Installing ZLIB dlls
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/zlib123dllx64.zip" --output "%SRC_DIR%\temp_build\zlib123dllx64.zip"
|
||||
7z x "%SRC_DIR%\temp_build\zlib123dllx64.zip" -o"%SRC_DIR%\temp_build\zlib"
|
||||
xcopy /Y "%SRC_DIR%\temp_build\zlib\dll_x64\*.dll" "C:\Windows\System32"
|
||||
|
||||
goto cuda_common
|
||||
|
||||
:cuda126
|
||||
|
||||
set CUDA_INSTALL_EXE=cuda_12.6.2_560.94_windows.exe
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" (
|
||||
curl -k -L "https://ossci-windows.s3.amazonaws.com/%CUDA_INSTALL_EXE%" --output "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%"
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDA_SETUP_FILE=%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%"
|
||||
set "ARGS=cuda_profiler_api_12.6 thrust_12.6 nvcc_12.6 cuobjdump_12.6 nvprune_12.6 nvprof_12.6 cupti_12.6 cublas_12.6 cublas_dev_12.6 cudart_12.6 cufft_12.6 cufft_dev_12.6 curand_12.6 curand_dev_12.6 cusolver_12.6 cusolver_dev_12.6 cusparse_12.6 cusparse_dev_12.6 npp_12.6 npp_dev_12.6 nvrtc_12.6 nvrtc_dev_12.6 nvml_dev_12.6 nvjitlink_12.6 nvtx_12.6"
|
||||
)
|
||||
|
||||
set CUDNN_FOLDER=cudnn-windows-x86_64-9.5.0.50_cuda12-archive
|
||||
set CUDNN_LIB_FOLDER="lib"
|
||||
set "CUDNN_INSTALL_ZIP=%CUDNN_FOLDER%.zip"
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" (
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/%CUDNN_INSTALL_ZIP%" --output "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%"
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDNN_SETUP_FILE=%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%"
|
||||
)
|
||||
|
||||
@REM cuDNN 8.3+ required zlib to be installed on the path
|
||||
echo Installing ZLIB dlls
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/zlib123dllx64.zip" --output "%SRC_DIR%\temp_build\zlib123dllx64.zip"
|
||||
7z x "%SRC_DIR%\temp_build\zlib123dllx64.zip" -o"%SRC_DIR%\temp_build\zlib"
|
||||
xcopy /Y "%SRC_DIR%\temp_build\zlib\dll_x64\*.dll" "C:\Windows\System32"
|
||||
|
||||
goto cuda_common
|
||||
|
||||
:cuda_common
|
||||
:: NOTE: We only install CUDA if we don't have it installed already.
|
||||
:: With GHA runners these should be pre-installed as part of our AMI process
|
||||
:: If you cannot find the CUDA version you want to build for here then please
|
||||
:: add it @ https://github.com/pytorch/test-infra/tree/main/aws/ami/windows
|
||||
if not exist "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%\bin\nvcc.exe" (
|
||||
if not exist "%SRC_DIR%\temp_build\NvToolsExt.7z" (
|
||||
curl -k -L https://ossci-windows.s3.us-east-1.amazonaws.com/builder/NvToolsExt.7z --output "%SRC_DIR%\temp_build\NvToolsExt.7z"
|
||||
if errorlevel 1 exit /b 1
|
||||
)
|
||||
|
||||
if not exist "%SRC_DIR%\temp_build\gpu_driver_dlls.zip" (
|
||||
curl -k -L "https://ossci-windows.s3.us-east-1.amazonaws.com/builder/additional_dlls.zip" --output "%SRC_DIR%\temp_build\gpu_driver_dlls.zip"
|
||||
if errorlevel 1 exit /b 1
|
||||
)
|
||||
|
||||
echo Installing CUDA toolkit...
|
||||
7z x %CUDA_SETUP_FILE% -o"%SRC_DIR%\temp_build\cuda"
|
||||
pushd "%SRC_DIR%\temp_build\cuda"
|
||||
|
||||
sc config wuauserv start= disabled
|
||||
sc stop wuauserv
|
||||
sc query wuauserv
|
||||
|
||||
start /wait setup.exe -s %ARGS% -loglevel:6 -log:"%cd%/cuda_install_logs"
|
||||
echo %errorlevel%
|
||||
|
||||
popd
|
||||
|
||||
echo Installing VS integration...
|
||||
if "%VC_YEAR%" == "2019" (
|
||||
xcopy /Y "%SRC_DIR%\temp_build\cuda\CUDAVisualStudioIntegration\extras\visual_studio_integration\MSBuildExtensions\*.*" "C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\MSBuild\Microsoft\VC\v160\BuildCustomizations"
|
||||
)
|
||||
if "%VC_YEAR%" == "2022" (
|
||||
xcopy /Y "%SRC_DIR%\temp_build\cuda\CUDAVisualStudioIntegration\extras\visual_studio_integration\MSBuildExtensions\*.*" "C:\Program Files (x86)\Microsoft Visual Studio\2022\BuildTools\MSBuild\Microsoft\VC\v170\BuildCustomizations"
|
||||
)
|
||||
|
||||
echo Installing NvToolsExt...
|
||||
7z x %SRC_DIR%\temp_build\NvToolsExt.7z -o"%SRC_DIR%\temp_build\NvToolsExt"
|
||||
mkdir "%ProgramFiles%\NVIDIA Corporation\NvToolsExt\bin\x64"
|
||||
mkdir "%ProgramFiles%\NVIDIA Corporation\NvToolsExt\include"
|
||||
mkdir "%ProgramFiles%\NVIDIA Corporation\NvToolsExt\lib\x64"
|
||||
xcopy /Y "%SRC_DIR%\temp_build\NvToolsExt\bin\x64\*.*" "%ProgramFiles%\NVIDIA Corporation\NvToolsExt\bin\x64"
|
||||
xcopy /Y "%SRC_DIR%\temp_build\NvToolsExt\include\*.*" "%ProgramFiles%\NVIDIA Corporation\NvToolsExt\include"
|
||||
xcopy /Y "%SRC_DIR%\temp_build\NvToolsExt\lib\x64\*.*" "%ProgramFiles%\NVIDIA Corporation\NvToolsExt\lib\x64"
|
||||
|
||||
echo Installing cuDNN...
|
||||
7z x %CUDNN_SETUP_FILE% -o"%SRC_DIR%\temp_build\cudnn"
|
||||
xcopy /Y "%SRC_DIR%\temp_build\cudnn\%CUDNN_FOLDER%\bin\*.*" "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%\bin"
|
||||
xcopy /Y "%SRC_DIR%\temp_build\cudnn\%CUDNN_FOLDER%\%CUDNN_LIB_FOLDER%\*.*" "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%\lib\x64"
|
||||
xcopy /Y "%SRC_DIR%\temp_build\cudnn\%CUDNN_FOLDER%\include\*.*" "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%\include"
|
||||
|
||||
echo Installing GPU driver DLLs
|
||||
7z x %SRC_DIR%\temp_build\gpu_driver_dlls.zip -o"C:\Windows\System32"
|
||||
|
||||
echo Cleaning temp files
|
||||
rd /s /q "%SRC_DIR%\temp_build" || ver > nul
|
||||
|
||||
if not exist "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%\bin\nvcc.exe" (
|
||||
echo CUDA %CUDA_VERSION_STR% installed failed.
|
||||
echo --------- setup.exe.log -------
|
||||
type "%SRC_DIR%\temp_build\cuda\cuda_install_logs\LOG.setup.exe.log"
|
||||
echo --------- RunDll32.exe.log
|
||||
type "%SRC_DIR%\temp_build\cuda\cuda_install_logs\LOG.RunDll32.exe.log"
|
||||
exit /b 1
|
||||
)
|
||||
)
|
||||
|
||||
goto set_cuda_env_vars
|
||||
|
||||
:set_cuda_env_vars
|
||||
|
||||
echo Setting up environment...
|
||||
set "PATH=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%\bin;%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%\libnvvp;%PATH%"
|
||||
set "CUDA_PATH=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%"
|
||||
set "CUDA_PATH_V%CUDA_VER_MAJOR%_%CUDA_VER_MINOR%=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%"
|
||||
set "NVTOOLSEXT_PATH=%ProgramFiles%\NVIDIA Corporation\NvToolsExt"
|
||||
4
.github/workflows/build-magma-windows.yml
vendored
4
.github/workflows/build-magma-windows.yml
vendored
|
|
@ -22,7 +22,7 @@ jobs:
|
|||
runs-on: windows-2019
|
||||
strategy:
|
||||
matrix:
|
||||
cuda_version: ["126", "124", "118"]
|
||||
cuda_version: ["128", "126", "124", "118"]
|
||||
config: ["Release", "Debug"]
|
||||
env:
|
||||
CUDA_VERSION: ${{ matrix.cuda_version }}
|
||||
|
|
@ -33,7 +33,7 @@ jobs:
|
|||
- name: Enable MSVC dev commands to enable cl.exe # FYI incompatible with shell: bash
|
||||
uses: ilammy/msvc-dev-cmd@dd5e2fa0a7de1e7929605d9ecc020e749d9856a3
|
||||
- name: Install CUDA Toolkit
|
||||
run: .github/scripts/windows/cuda_install.bat
|
||||
run: .ci/pytorch/windows/internal/cuda_install.bat
|
||||
- name: Build MAGMA and push to S3
|
||||
run: .github/scripts/windows/build_magma.bat
|
||||
- name: Save as artifact
|
||||
|
|
|
|||
387
notes.md
Normal file
387
notes.md
Normal file
|
|
@ -0,0 +1,387 @@
|
|||
2014-01-01T08:34:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2014-01-01T18:22:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2014-01-01T19:32:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2014-01-01T05:53:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2014-01-07T18:58:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2014-01-07T23:21:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2014-01-24T07:05:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2014-01-24T23:53:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2014-01-24T04:16:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2014-01-30T22:47:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2014-02-01T06:23:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2014-02-01T02:35:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2014-02-01T23:18:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2014-04-03T01:39:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2014-04-03T07:39:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2014-04-03T06:23:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2014-04-03T23:10:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2014-04-19T04:05:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2014-04-19T06:24:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2014-04-19T00:56:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2014-04-19T07:15:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2014-04-22T17:59:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2014-04-22T08:19:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2014-04-22T02:46:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2014-05-10T02:21:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2014-05-10T03:47:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2014-05-10T03:54:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2014-05-26T18:49:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2014-05-26T01:20:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2014-06-17T21:15:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2014-06-17T02:33:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2014-06-17T03:03:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2014-07-24T17:06:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2014-07-24T18:07:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2014-07-24T17:45:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2014-08-14T08:37:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2014-08-14T23:15:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2014-08-14T02:24:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2014-09-15T07:29:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2014-09-15T04:19:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2014-09-30T19:25:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2014-09-30T01:10:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2014-11-13T17:28:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2014-11-13T00:38:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2014-11-13T04:22:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2014-12-17T00:05:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2014-12-17T04:39:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2014-12-17T04:07:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2015-01-20T23:25:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2015-01-20T21:42:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2015-01-20T18:16:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2015-02-25T20:10:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2015-02-25T18:43:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2015-02-25T21:29:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2015-03-18T04:51:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2015-03-29T19:07:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2015-03-29T19:49:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2015-03-29T23:51:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2015-09-18T02:22:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2015-09-18T22:58:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2015-09-21T08:29:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2015-09-21T01:56:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2015-10-24T03:41:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2015-10-24T02:26:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2015-10-24T03:46:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2015-12-08T03:48:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2015-12-08T19:24:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2015-12-08T21:12:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2015-12-08T05:14:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2015-12-20T04:23:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2015-12-20T17:04:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2015-12-20T03:44:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2015-12-20T20:20:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2016-02-04T21:52:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2016-02-04T04:33:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2016-02-04T23:28:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2016-02-04T23:54:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2016-04-26T23:19:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2016-04-26T08:33:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2016-04-27T04:38:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2016-08-04T03:37:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2016-08-04T23:42:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2016-08-04T23:32:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2016-08-04T02:06:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2016-11-17T17:04:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2016-11-22T23:19:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2016-12-27T20:56:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2016-12-27T01:28:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2016-12-27T02:07:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2016-12-27T01:21:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2017-01-08T03:46:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2017-01-15T17:00:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2017-01-15T03:35:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2017-01-15T05:38:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2017-01-15T22:35:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2017-01-25T22:51:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2017-01-25T06:53:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2017-01-25T07:37:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2017-01-25T03:18:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2017-02-22T21:56:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2017-02-22T04:16:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2017-04-25T01:34:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2017-04-25T03:42:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2017-04-25T04:17:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2017-08-02T06:09:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2017-08-02T22:07:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2017-08-05T05:46:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2017-08-28T22:00:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2017-10-24T04:13:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2017-10-24T08:57:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2017-10-24T04:21:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2017-11-18T19:53:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2017-11-25T00:43:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2017-12-02T03:36:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2017-12-02T03:40:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2017-12-02T17:54:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2017-12-22T21:46:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2018-02-03T21:32:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2018-02-19T03:32:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2018-02-19T22:55:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2018-02-19T00:19:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2018-02-19T22:30:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2018-02-20T18:41:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2018-02-20T22:26:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2018-02-20T07:25:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2018-02-20T17:47:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2018-04-27T21:58:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2018-04-27T21:30:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2018-06-10T20:39:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2018-06-10T03:20:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2018-06-10T17:33:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2018-06-10T00:44:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2018-06-17T05:13:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2018-07-03T01:23:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2018-07-03T21:50:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2018-07-03T21:29:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2018-09-20T07:14:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2018-10-25T07:11:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2018-10-25T22:23:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2018-10-25T06:39:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2018-11-14T01:50:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2018-11-14T05:21:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2018-11-14T03:18:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2018-11-14T23:58:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2018-12-16T02:43:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2018-12-16T20:49:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2018-12-16T18:48:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2018-12-16T00:27:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2019-01-02T17:12:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2019-01-02T01:30:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2019-01-02T23:01:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2019-01-13T05:16:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2019-01-13T20:16:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2019-02-25T02:42:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2019-03-03T00:07:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2019-03-20T18:03:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2019-03-20T07:23:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2019-06-09T00:07:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2019-06-09T20:00:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2019-07-28T18:48:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2019-08-10T04:41:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2019-08-11T03:11:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2019-08-11T00:13:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2019-08-11T19:17:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2019-08-11T08:42:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2019-08-15T03:49:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2019-08-15T18:14:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2019-08-15T08:18:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2019-09-11T02:25:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2019-09-11T07:23:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2019-10-29T06:48:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2019-10-29T17:11:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2019-10-29T06:53:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2019-10-29T08:29:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2019-10-31T06:38:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2019-10-31T06:53:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2019-11-09T23:09:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2019-11-24T18:24:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2019-11-24T04:31:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2019-11-24T04:17:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2020-03-09T17:17:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2020-03-09T20:25:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2020-03-09T20:36:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2020-03-09T04:27:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2020-03-15T20:26:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2020-03-22T05:13:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2020-03-22T07:22:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2020-03-22T23:54:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2020-03-22T18:22:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2020-04-06T21:46:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2020-04-06T20:06:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2020-04-06T05:04:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2020-04-06T19:30:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2020-04-09T18:45:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2020-04-09T20:59:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2020-05-03T04:55:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2020-05-03T05:52:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2020-05-03T04:18:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2020-05-22T19:13:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2020-06-04T07:08:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2020-06-04T00:56:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2020-06-04T06:29:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2020-06-04T21:17:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2020-06-06T22:09:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2020-06-23T08:36:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2020-06-23T18:38:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2020-07-05T04:10:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2020-07-05T19:20:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2020-07-05T18:22:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2020-09-07T08:06:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2020-09-08T19:15:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2020-09-11T04:42:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2020-09-11T02:28:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2020-10-14T07:57:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2020-10-14T04:18:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2020-10-14T07:37:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2020-10-30T22:56:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2020-10-30T02:10:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2020-11-18T20:40:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2020-11-18T04:54:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2020-11-18T07:41:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2020-11-23T07:35:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2020-11-23T19:54:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2020-11-23T02:12:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2020-12-20T23:21:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2020-12-20T18:00:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2020-12-20T18:26:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2021-02-05T22:47:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2021-02-05T02:20:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2021-02-05T19:48:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2021-02-25T08:58:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2021-02-25T17:07:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2021-02-25T04:52:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2021-03-07T02:13:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2021-03-07T21:14:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2021-03-07T03:59:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2021-03-07T04:34:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2021-03-09T05:36:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2021-03-09T21:49:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2021-03-09T18:27:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2021-03-09T05:50:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2021-03-26T19:07:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2021-05-06T00:45:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2021-06-11T05:02:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2021-06-11T04:40:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2021-07-31T08:45:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2021-08-14T19:06:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2021-08-29T22:40:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2021-08-29T08:04:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2021-08-29T18:44:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2021-09-12T20:55:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2021-09-12T00:14:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2021-09-12T07:41:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2021-10-25T21:56:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2021-10-25T08:18:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2021-10-25T19:29:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2021-10-29T19:38:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2021-11-06T02:44:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2021-11-08T03:55:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2021-11-08T18:07:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2021-11-08T00:23:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2021-11-08T23:05:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2021-11-15T08:46:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2021-11-15T02:41:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2021-11-15T08:22:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2021-11-18T18:09:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2021-11-18T19:19:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2021-11-27T07:25:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2021-11-27T01:18:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2021-11-27T06:04:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2021-11-27T18:24:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2021-12-28T06:08:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2021-12-30T21:26:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2021-12-30T00:13:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2022-02-03T17:51:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2022-02-03T21:15:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2022-02-03T18:37:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2022-03-06T20:41:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2022-03-06T06:47:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2022-03-06T20:19:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2022-03-09T19:05:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2022-03-09T07:31:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2022-03-09T21:04:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2022-03-09T05:43:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2022-03-25T17:20:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2022-03-25T00:45:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2022-03-25T21:07:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2022-04-05T00:37:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2022-04-05T22:58:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2022-04-05T00:57:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2022-04-09T19:58:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2022-04-09T19:42:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2022-04-09T17:34:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2022-04-09T05:07:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2022-05-15T02:00:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2022-05-15T08:07:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2022-05-31T22:57:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2022-05-31T07:15:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2022-05-31T07:24:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2022-06-05T06:15:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2022-06-05T07:41:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2022-06-05T17:44:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2022-06-06T08:27:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2022-06-06T19:43:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2022-06-06T23:50:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2022-06-21T22:02:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2022-06-21T02:04:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2022-06-21T03:42:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2022-07-25T22:08:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2022-07-25T20:35:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2022-07-25T22:07:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2022-08-26T23:36:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2022-08-26T05:15:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2022-11-12T00:49:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2022-11-12T23:40:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2022-11-12T21:53:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2022-11-12T00:41:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2022-12-05T08:09:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2022-12-05T18:44:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2022-12-13T21:15:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2022-12-13T22:29:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2022-12-13T03:31:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2022-12-13T07:29:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2022-12-22T21:48:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2022-12-22T01:19:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2022-12-22T18:38:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2023-01-18T04:49:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2023-01-18T07:10:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2023-02-15T20:39:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2023-03-21T21:55:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2023-03-21T01:10:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2023-03-21T18:09:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2023-03-21T19:10:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2023-06-24T06:40:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2023-06-24T21:17:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2023-06-24T23:24:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2023-06-24T17:59:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2023-06-28T03:26:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2023-06-28T08:25:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2023-07-30T00:04:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2023-10-02T07:42:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2023-10-02T19:59:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2023-10-02T19:36:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2023-10-02T05:27:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2024-01-08T22:34:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2024-01-08T06:44:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2024-01-08T08:46:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2024-02-01T07:18:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2024-02-01T23:24:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2024-02-01T19:39:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2024-02-01T17:49:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2024-03-04T05:22:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2024-03-04T21:52:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2024-01-24T03:33:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2024-01-24T08:02:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2024-01-24T03:17:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2024-03-04T20:30:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2024-03-04T18:48:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2024-03-04T22:31:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2024-03-04T07:55:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2024-06-05T23:31:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2024-06-05T04:59:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2024-06-05T17:16:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2024-06-05T07:10:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2024-06-07T07:24:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2024-07-10T03:09:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2024-07-20T07:01:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2024-07-20T22:50:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2024-07-20T04:05:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2024-07-20T17:10:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2024-07-21T23:29:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2024-07-21T00:51:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2024-09-20T01:01:00 - Refactor for clarity, might break a few tests though (pytorch)
|
||||
2024-09-20T18:01:00 - Quick fix, referencing a known issue from the official repo (pytorch)
|
||||
2024-09-29T02:56:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2024-09-29T07:14:00 - Late-night bugfix on financial RL environment (pytorch)
|
||||
2024-09-29T01:49:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2024-10-27T22:01:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2024-10-27T18:58:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2024-11-14T20:01:00 - Trying out boneh-franklin approach for IBE (ref. 2003 paper) (pytorch)
|
||||
2024-11-14T22:28:00 - Testing bigger LLM config, referencing 'Attention Is All You Need' (pytorch)
|
||||
2024-11-14T04:25:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2024-11-14T19:10:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
2024-11-23T23:51:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2024-12-23T18:44:00 - Minor doc updates: linking to article on quantization (pytorch)
|
||||
2024-12-23T08:16:00 - Experimenting with FPGA constraints (source: Trimberger 'Three Ages of FPGAs') (pytorch)
|
||||
2024-12-23T03:50:00 - Implementing approach from a new paper read last night (pytorch)
|
||||
39
test/typing/fail/arithmetic_ops.py
Normal file
39
test/typing/fail/arithmetic_ops.py
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
# flake8: noqa
|
||||
from typing import Any
|
||||
from typing_extensions import assert_type
|
||||
|
||||
from torch import randn, Tensor
|
||||
|
||||
|
||||
# See ../pass/arithmetic_ops.py for more information
|
||||
|
||||
TENSOR, INT, FLOAT = randn(3), 2, 1.5
|
||||
|
||||
assert_type(
|
||||
INT & TENSOR, # E: Unsupported operand types for & ("int" and "Tensor") [operator]
|
||||
Any,
|
||||
)
|
||||
assert_type(
|
||||
INT | TENSOR, # E: Unsupported operand types for | ("int" and "Tensor") [operator]
|
||||
Any,
|
||||
)
|
||||
assert_type(
|
||||
INT ^ TENSOR, # E: Unsupported operand types for ^ ("int" and "Tensor") [operator]
|
||||
Any,
|
||||
)
|
||||
|
||||
assert_type(
|
||||
FLOAT # E: Unsupported operand types for & ("float" and "Tensor") [operator]
|
||||
& TENSOR,
|
||||
Tensor,
|
||||
)
|
||||
assert_type(
|
||||
FLOAT # E: Unsupported operand types for | ("float" and "Tensor") [operator]
|
||||
| TENSOR,
|
||||
Tensor,
|
||||
)
|
||||
assert_type(
|
||||
FLOAT # E: Unsupported operand types for ^ ("float" and "Tensor") [operator]
|
||||
^ TENSOR,
|
||||
Tensor,
|
||||
)
|
||||
423
test/typing/pass/arithmetic_ops.py
Normal file
423
test/typing/pass/arithmetic_ops.py
Normal file
|
|
@ -0,0 +1,423 @@
|
|||
from typing import Any, Union
|
||||
from typing_extensions import assert_type, TypeAlias
|
||||
|
||||
from torch import randn, Tensor
|
||||
|
||||
|
||||
TENSOR, INT, FLOAT, BOOL = randn(3), 2, 1.5, True
|
||||
|
||||
# Test deduced types of arithmetic operations between tensors, ints, floats and bools
|
||||
# The expected type should always be `Tensor`: `Any` and `bool` below are wrong.
|
||||
# See https://github.com/pytorch/pytorch/issues/145838
|
||||
|
||||
# Unary ops
|
||||
|
||||
assert_type(+TENSOR, Tensor)
|
||||
assert_type(-TENSOR, Tensor)
|
||||
assert_type(~TENSOR, Tensor)
|
||||
|
||||
# Binary ops
|
||||
|
||||
assert_type(TENSOR == TENSOR, Tensor)
|
||||
assert_type(TENSOR != TENSOR, Tensor)
|
||||
assert_type(TENSOR < TENSOR, Tensor)
|
||||
assert_type(TENSOR > TENSOR, Tensor)
|
||||
assert_type(TENSOR <= TENSOR, Tensor)
|
||||
assert_type(TENSOR >= TENSOR, Tensor)
|
||||
assert_type(TENSOR + TENSOR, Tensor)
|
||||
assert_type(TENSOR - TENSOR, Tensor)
|
||||
assert_type(TENSOR * TENSOR, Tensor)
|
||||
assert_type(TENSOR // TENSOR, Any)
|
||||
assert_type(TENSOR / TENSOR, Tensor)
|
||||
assert_type(TENSOR % TENSOR, Tensor)
|
||||
assert_type(TENSOR**TENSOR, Any)
|
||||
assert_type(TENSOR << TENSOR, Tensor)
|
||||
assert_type(TENSOR >> TENSOR, Tensor)
|
||||
assert_type(TENSOR & TENSOR, Tensor)
|
||||
assert_type(TENSOR | TENSOR, Tensor)
|
||||
assert_type(TENSOR ^ TENSOR, Tensor)
|
||||
|
||||
assert_type(TENSOR == BOOL, Tensor)
|
||||
assert_type(TENSOR != BOOL, Tensor)
|
||||
assert_type(TENSOR < BOOL, Tensor)
|
||||
assert_type(TENSOR > BOOL, Tensor)
|
||||
assert_type(TENSOR <= BOOL, Tensor)
|
||||
assert_type(TENSOR >= BOOL, Tensor)
|
||||
assert_type(TENSOR + BOOL, Tensor)
|
||||
assert_type(TENSOR - BOOL, Tensor)
|
||||
assert_type(TENSOR * BOOL, Tensor)
|
||||
assert_type(TENSOR // BOOL, Any)
|
||||
assert_type(TENSOR / BOOL, Tensor)
|
||||
assert_type(TENSOR % BOOL, Tensor)
|
||||
assert_type(TENSOR**BOOL, Any)
|
||||
assert_type(TENSOR << BOOL, Tensor)
|
||||
assert_type(TENSOR >> BOOL, Tensor)
|
||||
assert_type(TENSOR & BOOL, Tensor)
|
||||
assert_type(TENSOR | BOOL, Tensor)
|
||||
assert_type(TENSOR ^ BOOL, Tensor)
|
||||
|
||||
assert_type(BOOL == TENSOR, bool)
|
||||
assert_type(BOOL != TENSOR, bool)
|
||||
assert_type(BOOL < TENSOR, Tensor)
|
||||
assert_type(BOOL > TENSOR, Tensor)
|
||||
assert_type(BOOL <= TENSOR, Tensor)
|
||||
assert_type(BOOL >= TENSOR, Tensor)
|
||||
assert_type(BOOL + TENSOR, Tensor)
|
||||
assert_type(BOOL - TENSOR, Any)
|
||||
assert_type(BOOL * TENSOR, Tensor)
|
||||
assert_type(BOOL // TENSOR, Any)
|
||||
assert_type(BOOL / TENSOR, Any)
|
||||
assert_type(BOOL % TENSOR, Any)
|
||||
assert_type(BOOL**TENSOR, Any)
|
||||
assert_type(BOOL << TENSOR, Any)
|
||||
assert_type(BOOL >> TENSOR, Any)
|
||||
assert_type(BOOL & TENSOR, Tensor)
|
||||
assert_type(BOOL | TENSOR, Tensor)
|
||||
assert_type(BOOL ^ TENSOR, Tensor)
|
||||
|
||||
assert_type(TENSOR == INT, Tensor)
|
||||
assert_type(TENSOR != INT, Tensor)
|
||||
assert_type(TENSOR < INT, Tensor)
|
||||
assert_type(TENSOR > INT, Tensor)
|
||||
assert_type(TENSOR <= INT, Tensor)
|
||||
assert_type(TENSOR >= INT, Tensor)
|
||||
assert_type(TENSOR + INT, Tensor)
|
||||
assert_type(TENSOR - INT, Tensor)
|
||||
assert_type(TENSOR * INT, Tensor)
|
||||
assert_type(TENSOR // INT, Any)
|
||||
assert_type(TENSOR / INT, Tensor)
|
||||
assert_type(TENSOR % INT, Tensor)
|
||||
assert_type(TENSOR**INT, Any)
|
||||
assert_type(TENSOR << INT, Tensor)
|
||||
assert_type(TENSOR >> INT, Tensor)
|
||||
assert_type(TENSOR & INT, Tensor)
|
||||
assert_type(TENSOR | INT, Tensor)
|
||||
assert_type(TENSOR ^ INT, Tensor)
|
||||
|
||||
assert_type(INT == TENSOR, bool)
|
||||
assert_type(INT != TENSOR, bool)
|
||||
assert_type(INT < TENSOR, Tensor)
|
||||
assert_type(INT > TENSOR, Tensor)
|
||||
assert_type(INT <= TENSOR, Tensor)
|
||||
assert_type(INT >= TENSOR, Tensor)
|
||||
assert_type(INT + TENSOR, Tensor)
|
||||
assert_type(INT - TENSOR, Any)
|
||||
assert_type(INT * TENSOR, Tensor)
|
||||
assert_type(INT // TENSOR, Any)
|
||||
assert_type(INT / TENSOR, Any)
|
||||
assert_type(INT % TENSOR, Any)
|
||||
assert_type(INT**TENSOR, Any)
|
||||
assert_type(INT << TENSOR, Any)
|
||||
assert_type(INT >> TENSOR, Any)
|
||||
assert_type(INT & TENSOR, Any) # type: ignore[operator]
|
||||
assert_type(INT | TENSOR, Any) # type: ignore[operator]
|
||||
assert_type(INT ^ TENSOR, Any) # type: ignore[operator]
|
||||
|
||||
assert_type(TENSOR == FLOAT, Tensor)
|
||||
assert_type(TENSOR != FLOAT, Tensor)
|
||||
assert_type(TENSOR < FLOAT, Tensor)
|
||||
assert_type(TENSOR > FLOAT, Tensor)
|
||||
assert_type(TENSOR <= FLOAT, Tensor)
|
||||
assert_type(TENSOR >= FLOAT, Tensor)
|
||||
assert_type(TENSOR + FLOAT, Tensor)
|
||||
assert_type(TENSOR - FLOAT, Tensor)
|
||||
assert_type(TENSOR * FLOAT, Tensor)
|
||||
assert_type(TENSOR // FLOAT, Any)
|
||||
assert_type(TENSOR / FLOAT, Tensor)
|
||||
assert_type(TENSOR % FLOAT, Tensor)
|
||||
assert_type(TENSOR**FLOAT, Any)
|
||||
assert_type(TENSOR << FLOAT, Tensor)
|
||||
assert_type(TENSOR >> FLOAT, Tensor)
|
||||
assert_type(TENSOR & FLOAT, Tensor)
|
||||
assert_type(TENSOR | FLOAT, Tensor)
|
||||
assert_type(TENSOR ^ FLOAT, Tensor)
|
||||
|
||||
assert_type(FLOAT == TENSOR, bool)
|
||||
assert_type(FLOAT != TENSOR, bool)
|
||||
assert_type(FLOAT < TENSOR, Tensor)
|
||||
assert_type(FLOAT > TENSOR, Tensor)
|
||||
assert_type(FLOAT <= TENSOR, Tensor)
|
||||
assert_type(FLOAT >= TENSOR, Tensor)
|
||||
assert_type(FLOAT + TENSOR, Tensor)
|
||||
assert_type(FLOAT - TENSOR, Any)
|
||||
assert_type(FLOAT * TENSOR, Tensor)
|
||||
assert_type(FLOAT // TENSOR, Any)
|
||||
assert_type(FLOAT / TENSOR, Any)
|
||||
assert_type(FLOAT % TENSOR, Any)
|
||||
assert_type(FLOAT**TENSOR, Any)
|
||||
assert_type(FLOAT << TENSOR, Any)
|
||||
assert_type(FLOAT >> TENSOR, Any)
|
||||
assert_type(FLOAT & TENSOR, Tensor) # type: ignore[operator]
|
||||
assert_type(FLOAT | TENSOR, Tensor) # type: ignore[operator]
|
||||
assert_type(FLOAT ^ TENSOR, Tensor) # type: ignore[operator]
|
||||
|
||||
|
||||
NUMBER: TypeAlias = Union[int, float, bool]
|
||||
|
||||
|
||||
class Binary:
|
||||
"""
|
||||
This class demonstrates what is possible by overriding every magic method
|
||||
relating to binary operations.
|
||||
"""
|
||||
|
||||
def __add__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __and__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __div__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __eq__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __floordiv__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __ge__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __gt__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __le__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __lshift__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __lt__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __mod__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __mul__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __ne__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __or__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __pow__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __radd__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __rand__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __rdiv__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __rfloordiv__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __rlshift__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __rmod__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __rmul__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __ror__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __rpow__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __rrshift__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __rshift__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __rsub__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __rtruediv__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __rxor__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __sub__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __truediv__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
def __xor__(self, other: NUMBER) -> "Binary": # type: ignore[override]
|
||||
return self
|
||||
|
||||
|
||||
BINARY = Binary()
|
||||
|
||||
assert_type(BINARY + INT, Binary)
|
||||
assert_type(BINARY & INT, Binary)
|
||||
assert_type(BINARY / INT, Binary)
|
||||
assert_type(BINARY == INT, Binary)
|
||||
assert_type(BINARY // INT, Binary)
|
||||
assert_type(BINARY >= INT, Binary)
|
||||
assert_type(BINARY > INT, Binary)
|
||||
assert_type(BINARY <= INT, Binary)
|
||||
assert_type(BINARY << INT, Binary)
|
||||
assert_type(BINARY < INT, Binary)
|
||||
assert_type(BINARY % INT, Binary)
|
||||
assert_type(BINARY * INT, Binary)
|
||||
assert_type(BINARY != INT, Binary)
|
||||
assert_type(BINARY | INT, Binary)
|
||||
assert_type(BINARY**INT, Binary)
|
||||
assert_type(BINARY >> INT, Binary)
|
||||
assert_type(BINARY - INT, Binary)
|
||||
assert_type(BINARY ^ INT, Binary)
|
||||
|
||||
assert_type(INT + BINARY, Binary)
|
||||
assert_type(INT & BINARY, Binary)
|
||||
assert_type(INT / BINARY, Binary)
|
||||
assert_type(INT == BINARY, bool)
|
||||
assert_type(INT // BINARY, Binary)
|
||||
assert_type(INT >= BINARY, Binary)
|
||||
assert_type(INT > BINARY, Binary)
|
||||
assert_type(INT <= BINARY, Binary)
|
||||
assert_type(INT << BINARY, Binary)
|
||||
assert_type(INT < BINARY, Binary)
|
||||
assert_type(INT % BINARY, Binary)
|
||||
assert_type(INT * BINARY, Binary)
|
||||
assert_type(INT != BINARY, bool)
|
||||
assert_type(INT | BINARY, Binary)
|
||||
assert_type(INT**BINARY, Binary)
|
||||
assert_type(INT >> BINARY, Binary)
|
||||
assert_type(INT - BINARY, Binary)
|
||||
assert_type(INT ^ BINARY, Binary)
|
||||
|
||||
assert_type(BINARY + FLOAT, Binary)
|
||||
assert_type(BINARY & FLOAT, Binary)
|
||||
assert_type(BINARY / FLOAT, Binary)
|
||||
assert_type(BINARY == FLOAT, Binary)
|
||||
assert_type(BINARY // FLOAT, Binary)
|
||||
assert_type(BINARY >= FLOAT, Binary)
|
||||
assert_type(BINARY > FLOAT, Binary)
|
||||
assert_type(BINARY <= FLOAT, Binary)
|
||||
assert_type(BINARY << FLOAT, Binary)
|
||||
assert_type(BINARY < FLOAT, Binary)
|
||||
assert_type(BINARY % FLOAT, Binary)
|
||||
assert_type(BINARY * FLOAT, Binary)
|
||||
assert_type(BINARY != FLOAT, Binary)
|
||||
assert_type(BINARY | FLOAT, Binary)
|
||||
assert_type(BINARY**FLOAT, Binary)
|
||||
assert_type(BINARY >> FLOAT, Binary)
|
||||
assert_type(BINARY - FLOAT, Binary)
|
||||
assert_type(BINARY ^ FLOAT, Binary)
|
||||
|
||||
assert_type(FLOAT + BINARY, Binary)
|
||||
assert_type(FLOAT & BINARY, Binary)
|
||||
assert_type(FLOAT / BINARY, Binary)
|
||||
assert_type(FLOAT == BINARY, bool)
|
||||
assert_type(FLOAT // BINARY, Binary)
|
||||
assert_type(FLOAT >= BINARY, Binary)
|
||||
assert_type(FLOAT > BINARY, Binary)
|
||||
assert_type(FLOAT <= BINARY, Binary)
|
||||
assert_type(FLOAT << BINARY, Binary)
|
||||
assert_type(FLOAT < BINARY, Binary)
|
||||
assert_type(FLOAT % BINARY, Binary)
|
||||
assert_type(FLOAT * BINARY, Binary)
|
||||
assert_type(FLOAT != BINARY, bool)
|
||||
assert_type(FLOAT | BINARY, Binary)
|
||||
assert_type(FLOAT**BINARY, Binary)
|
||||
assert_type(FLOAT >> BINARY, Binary)
|
||||
assert_type(FLOAT - BINARY, Binary)
|
||||
assert_type(FLOAT ^ BINARY, Binary)
|
||||
|
||||
assert_type(BINARY + BOOL, Binary)
|
||||
assert_type(BINARY & BOOL, Binary)
|
||||
assert_type(BINARY / BOOL, Binary)
|
||||
assert_type(BINARY == BOOL, Binary)
|
||||
assert_type(BINARY // BOOL, Binary)
|
||||
assert_type(BINARY >= BOOL, Binary)
|
||||
assert_type(BINARY > BOOL, Binary)
|
||||
assert_type(BINARY <= BOOL, Binary)
|
||||
assert_type(BINARY << BOOL, Binary)
|
||||
assert_type(BINARY < BOOL, Binary)
|
||||
assert_type(BINARY % BOOL, Binary)
|
||||
assert_type(BINARY * BOOL, Binary)
|
||||
assert_type(BINARY != BOOL, Binary)
|
||||
assert_type(BINARY | BOOL, Binary)
|
||||
assert_type(BINARY**BOOL, Binary)
|
||||
assert_type(BINARY >> BOOL, Binary)
|
||||
assert_type(BINARY - BOOL, Binary)
|
||||
assert_type(BINARY ^ BOOL, Binary)
|
||||
|
||||
assert_type(BOOL + BINARY, Binary)
|
||||
assert_type(BOOL & BINARY, Binary)
|
||||
assert_type(BOOL / BINARY, Binary)
|
||||
assert_type(BOOL == BINARY, bool)
|
||||
assert_type(BOOL // BINARY, Binary)
|
||||
assert_type(BOOL >= BINARY, Binary)
|
||||
assert_type(BOOL > BINARY, Binary)
|
||||
assert_type(BOOL <= BINARY, Binary)
|
||||
assert_type(BOOL << BINARY, Binary)
|
||||
assert_type(BOOL < BINARY, Binary)
|
||||
assert_type(BOOL % BINARY, Binary)
|
||||
assert_type(BOOL * BINARY, Binary)
|
||||
assert_type(BOOL != BINARY, bool)
|
||||
assert_type(BOOL | BINARY, Binary)
|
||||
assert_type(BOOL**BINARY, Binary)
|
||||
assert_type(BOOL >> BINARY, Binary)
|
||||
assert_type(BOOL - BINARY, Binary)
|
||||
assert_type(BOOL ^ BINARY, Binary)
|
||||
|
||||
# Tensor operators whose types could be improved
|
||||
# This is the "diff" of the first and second sections.
|
||||
|
||||
assert_type(BOOL // TENSOR, Any)
|
||||
assert_type(FLOAT // TENSOR, Any)
|
||||
assert_type(INT // TENSOR, Any)
|
||||
assert_type(TENSOR // BOOL, Any)
|
||||
assert_type(TENSOR // FLOAT, Any)
|
||||
assert_type(TENSOR // INT, Any)
|
||||
assert_type(TENSOR // TENSOR, Any)
|
||||
|
||||
assert_type(BOOL**TENSOR, Any)
|
||||
assert_type(FLOAT**TENSOR, Any)
|
||||
assert_type(INT**TENSOR, Any)
|
||||
assert_type(TENSOR**BOOL, Any)
|
||||
assert_type(TENSOR**FLOAT, Any)
|
||||
assert_type(TENSOR**INT, Any)
|
||||
assert_type(TENSOR**TENSOR, Any)
|
||||
|
||||
assert_type(BOOL - TENSOR, Any)
|
||||
assert_type(FLOAT - TENSOR, Any)
|
||||
assert_type(INT - TENSOR, Any)
|
||||
|
||||
assert_type(BOOL / TENSOR, Any)
|
||||
assert_type(FLOAT / TENSOR, Any)
|
||||
assert_type(INT / TENSOR, Any)
|
||||
|
||||
assert_type(BOOL % TENSOR, Any)
|
||||
assert_type(FLOAT % TENSOR, Any)
|
||||
assert_type(INT % TENSOR, Any)
|
||||
|
||||
assert_type(BOOL << TENSOR, Any)
|
||||
assert_type(FLOAT << TENSOR, Any)
|
||||
assert_type(INT << TENSOR, Any)
|
||||
|
||||
assert_type(BOOL >> TENSOR, Any)
|
||||
assert_type(FLOAT >> TENSOR, Any)
|
||||
assert_type(INT >> TENSOR, Any)
|
||||
|
||||
assert_type(FLOAT & TENSOR, Tensor) # type: ignore[operator]
|
||||
assert_type(INT & TENSOR, Any) # type: ignore[operator]
|
||||
|
||||
assert_type(FLOAT | TENSOR, Tensor) # type: ignore[operator]
|
||||
assert_type(INT | TENSOR, Any) # type: ignore[operator]
|
||||
|
||||
assert_type(FLOAT ^ TENSOR, Tensor) # type: ignore[operator]
|
||||
assert_type(INT ^ TENSOR, Any) # type: ignore[operator]
|
||||
Loading…
Reference in a new issue