Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -20,4 +20,6 @@ dist
# for pip install -e . --config-settings editable_mode=strict
build/

*.secret
*.secret

temp.dat
3 changes: 3 additions & 0 deletions notebook/notes.txt
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@ to run tests:
to add new tests:
add anydice code in `.\test\autoouts\fetch_in.py`
`python .\test\autoouts\fetch.py --fetch`
to profile code (and visualize) (add code in example_parse.py):
pip install snakeviz
python -m cProfile -o temp.dat .\src\parser\example_parse.py ; snakeviz .\temp.dat
to build package (new):
update __version__ in __init__.py
commit and push changes
Expand Down
2 changes: 1 addition & 1 deletion src/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__version__ = '0.3.4.dev1'
__version__ = '0.3.4.dev2'

# core classes
from .randvar import RV
Expand Down
28 changes: 21 additions & 7 deletions src/parser/example_parse.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,19 +6,32 @@

trials = [
r'''
A: {2*1..3}
output(1dA)
A: {(3.0*2.0)/1..(4.0*3.0)/1}
output(1dA)

A: {1d4, "water", "fire"}
output(2dA)
\
output
(10+1d10+24+3d8+4d10+1d8 + 1d10+7+2d10)*5 +
(10+1d10+24+3d8+6d10+1d8 + 1d10+7+3d10)*10 +
(10+1d10+24+3d8+8d10+1d8 + 1d10+7+4d10)*9 +
(10+1d10+24+3d8+8d10+1d8 + 10+1d10+24+8d10 + 1d10+7+4d10)*1 +
(10+24)*5 + 41d8+15 + 6d8+6 + 2d6+5d6
\
output 0
'''
]
# flags = {'COMPILER_FLAG_NON_LOCAL_SCOPE': True, 'COMPILER_FLAG_OPERATOR_ON_INT': True}
flags = {}


def run_dicecode():
start = time.time()
from dice_calc import roll, output
a = roll('40d10')
b = roll('40d10')
c = a + b + a + b + a + b + a + b + a + b # type: ignore
output(c > 40*5.5*10)
output(len(c.vals))

print('Time taken to run dicecode:', f'{time.time() - start:.2f}s')

def setup_logger(filename):
logging.basicConfig(filename=filename, level=logging.DEBUG, filemode='w', format='%(asctime)s - %(levelname)s - %(message)s')
logging.getLogger().addHandler(logging.StreamHandler())
Expand All @@ -29,6 +42,7 @@ def setup_logger(filename):


def main(trials=trials):
run_dicecode()
for to_parse in trials:
try:
# print('Parsing:', to_parse)
Expand Down
72 changes: 64 additions & 8 deletions src/randvar.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,15 +41,32 @@ def __init__(self, vals: Iterable[float], probs: Iterable[int], truncate=None):
@staticmethod
def _sort_and_group(vals: Iterable[float], probs: Iterable[int], skip_zero_probs, normalize):
assert all(isinstance(p, int) and p >= 0 for p in probs), 'probs must be non-negative integers'
zipped = sorted(zip(vals, probs), reverse=True)
zipped = RV._get_zip(vals, probs)
# print('before', len(zipped))
newzipped = RV._get_new_zipped(zipped, skip_zero_probs)
# print('after', len(newzipped))
return RV._get_normalized(newzipped, normalize)

@staticmethod
def _get_zip(v, p):
return sorted(zip(v, p), reverse=True)

@staticmethod
def _get_new_zipped(zipped, skip_zero_probs):
newzipped: list[tuple[float, int]] = []
for i in range(len(zipped) - 1, -1, -1):
if skip_zero_probs and zipped[i][1] == 0:
continue
if i > 0 and zipped[i][0] == zipped[i - 1][0]: # add the two probs, go to next
for i in range(len(zipped) - 1, 0, -1):
if zipped[i][0] == zipped[i - 1][0]: # add the two probs, go to next
zipped[i - 1] = (zipped[i - 1][0], zipped[i - 1][1] + zipped[i][1])
else:
newzipped.append(zipped[i])
if len(zipped) > 0:
newzipped.append(zipped[0])
if skip_zero_probs:
newzipped = [v for v in newzipped if v[1] != 0]
return newzipped

@staticmethod
def _get_normalized(newzipped, normalize):
vals = tuple(v[0] for v in newzipped)
probs = tuple(v[1] for v in newzipped)
if normalize:
Expand Down Expand Up @@ -186,8 +203,7 @@ def _convolve(self, other: T_ifsr, operation: Callable[[float, float], float]):
other = other.sum()
if not isinstance(other, MetaRV):
return RV([operation(v, other) for v in self.vals], self.probs)
new_vals = tuple(operation(v1, v2) for v1 in self.vals for v2 in other.vals)
new_probs = tuple(p1 * p2 for p1 in self.probs for p2 in other.probs)
new_vals, new_probs = _rdict.fast_convolve((self.vals, self.probs), (other.vals, other.probs), operation)
res = RV(new_vals, new_probs)
res = _INTERNAL_PROB_LIMIT_VALS(res)
return res
Expand Down Expand Up @@ -384,11 +400,51 @@ def _sum_at(orig: T_S, locs: T_S):
return sum(orig[int(i)] for i in locs)


class _rdict:
def __init__(self):
self.d = {}

def __setitem__(self, key, value):
# in below comparisons, __setitem__ is called 6 million times
# without using _rdict | 8.35 s
# super().__setitem__(key, self.get(key, 0) + value) # slowest code, self is subclass of dict | 4.43 s
# self.d[key] = self.d.get(key, 0) + value # slow code | 3.15 s
# fastest code | 2.08 s
if key in self.d:
self.d[key] += value
else:
self.d[key] = value

def to_tuples(self):
sorted_items = sorted(self.d.items())
keys, values = zip(*sorted_items) if sorted_items else ((), ())
return keys, values

@staticmethod
def fast_convolve(items1: tuple[tuple, tuple], items2: tuple[tuple, tuple], operation: Callable[[float, float], float]):
if operation == operator.add:
return _rdict.__fast_convolve_op_add(items1, items2)
d = _rdict()
for k1, v1 in zip(*items1):
for k2, v2 in zip(*items2):
d[operation(k1, k2)] = v1 * v2
return d.to_tuples()

@staticmethod
def __fast_convolve_op_add(items1, items2):
"""Since 'add' is the most common operation, we can optimize it by not calling operation() every iter of the N^2 algorithm"""
d = _rdict()
for k1, v1 in zip(*items1):
for k2, v2 in zip(*items2):
d[k1 + k2] = v1 * v2
return d.to_tuples()


def _INTERNAL_PROB_LIMIT_VALS(rv: RV, sum_limit: float = 10e30):
sum_ = rv._get_sum_probs()
if sum_ <= sum_limit:
return rv
normalizing_const = int(10e10 * sum_ // sum_limit)
normalizing_const = int(10e10 * (sum_ // sum_limit))
logger.warning(f'WARNING reducing probabilities | sum limit {sum_limit}, sum{sum_:.1g}, NORMALIZING BY {normalizing_const:.1g} | from my calc, abs err <= {1 / (sum_ / normalizing_const - 1)}')
# napkin math for the error. int(x) = x - x_ϵ where x_ϵ∈[0,1) is for the rounding error. Don't quote me on this math, not 100% sure.
# P(x_i )=p_i/(∑p_i ) before normalization (p_i is an integer probability unbounded)
Expand Down
2 changes: 1 addition & 1 deletion test/autoouts/fetch.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def main_fetch():
for (key, inp) in new_inps:
resp = get_anydice_resp(inp, C)
print('GOT', resp.text)
fetch_out['data'].append({'inp': inp, 'out': resp.text, 'key': key, 'time': datetime.now().isoformat()})
fetch_out['data'].append({'inp': inp, 'out': resp.text, 'key': key, 'time': datetime.now().isoformat(), 'i': len(fetch_out['data'])})
with open(cur_dir / 'fetch_out.json', 'w') as f:
json.dump(fetch_out, f, indent=2)
time.sleep(1 + random.random() * 5) # sleep for 1-6 seconds
Expand Down
Loading
Loading