Skip to content

Commit 12a58cf

Browse files
authored
Fix run_tutorials code (#1552)
* Fix run_tutorials code Summary: Last script actually has some errors but didn't error out, this PR added the logic for the CI job to show error when some job fails and also fixed remaining code Test Plan: CI Reviewers: Subscribers: Tasks: Tags: * checking status code * script * more logs * tensor paralell check * change tp file check condition * deps * testing failing * update loop * try again * try again * done * restore * remove extra pint
1 parent d57704c commit 12a58cf

File tree

5 files changed

+37
-12
lines changed

5 files changed

+37
-12
lines changed

.github/workflows/run_tutorials.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,4 +30,4 @@ jobs:
3030
${CONDA_RUN} pip install -r dev-requirements.txt
3131
${CONDA_RUN} pip install .
3232
cd tutorials
33-
${CONDA_RUN} sh run_all.sh
33+
${CONDA_RUN} bash run_all.sh

dev-requirements.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ lm_eval
2121
diskcache
2222
pycocotools
2323
tqdm
24+
importlib_metadata
2425

2526
# Custom CUDA Extensions
2627
ninja

tutorials/developer_api_guide/my_dtype_tensor_subclass.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
Layout,
1717
PlainLayout,
1818
)
19-
from torchao.quantization.quant_primitives import (
19+
from torchao.quantization import (
2020
MappingType,
2121
choose_qparams_affine,
2222
dequantize_affine,

tutorials/developer_api_guide/my_trainable_tensor_subclass.py

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,11 @@
1515
from torch.utils._python_dispatch import return_and_correct_aliasing
1616

1717
from torchao.dtypes.utils import Layout, PlainLayout
18-
from torchao.quantization.quant_primitives import MappingType, choose_qparams_affine
18+
from torchao.quantization import (
19+
MappingType,
20+
choose_qparams_affine,
21+
quantize_affine,
22+
)
1923

2024
aten = torch.ops.aten
2125

@@ -40,10 +44,12 @@ def _quantize(
4044
Convert from a floating point tensor (fp32/fp16/bf16) to the desired dtype.
4145
"""
4246
mapping_type = MappingType.SYMMETRIC
43-
block_size = input_float.shape
44-
dtype = torch.int16
45-
scale, _ = choose_qparams_affine(input_float, mapping_type, block_size, dtype)
46-
int_data = (input_float / scale).to(torch.int8)
47+
block_size = (1, input_float.shape[-1])
48+
dtype = torch.int8
49+
scale, zero_point = choose_qparams_affine(
50+
input_float, mapping_type, block_size, dtype
51+
)
52+
int_data = quantize_affine(input_float, block_size, scale, zero_point, dtype)
4753
tensor_impl_ctr = cls.get_tensor_impl_constructor(type(_layout))
4854
return tensor_impl_ctr(int_data, scale, _layout)
4955

tutorials/run_all.sh

Lines changed: 23 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,37 @@
11
#!/bin/bash
2-
find . -type d | while read dir; do
2+
FAILED=0
3+
for dir in $(find . -type d); do
34
if [ -f "$dir/run.sh" ]; then
45
echo "Running: $dir/run.sh"
5-
pushd "$dir"
6+
CURRENT_DIR=$(pwd)
7+
cd "$dir"
68
bash run.sh
7-
popd
9+
cd "$CURRENT_DIR"
810
else
9-
find "$dir" -maxdepth 1 -name "*.py" | while read file; do
10-
if [[ "$file" == *"tensor_parallel"* ]]; then
11+
for file in $(find "$dir" -maxdepth 1 -name "*.py"); do
12+
filename=$(basename "$file")
13+
if echo "$filename" | grep -q "tensor_parallel"; then
1114
echo "Running: torchrun --standalone --nnodes=1 --nproc-per-node=1 $file"
1215
torchrun --standalone --nnodes=1 --nproc-per-node=4 "$file"
16+
STATUS=$?
1317
else
1418
echo "Running: python $file"
1519
python "$file"
20+
STATUS=$?
21+
fi
22+
23+
if [ $STATUS -ne 0 ]; then
24+
FAILED=1
25+
echo "Test failed: $file"
1626
fi
1727
done
1828
fi
1929
done
30+
31+
if [ "$FAILED" -eq 1 ]; then
32+
echo "One or more tests failed"
33+
exit 1
34+
else
35+
echo "All tests passed"
36+
exit 0
37+
fi

0 commit comments

Comments
 (0)