42 lines
1.2 KiB
Python
42 lines
1.2 KiB
Python
from itertools import product
|
|
|
|
def tokenize(vals: list[int], ops: list[str]) -> list[str|int]:
|
|
tok = []
|
|
for item in zip(vals, ops):
|
|
tok.extend(item)
|
|
tok.append(vals[-1])
|
|
return tok
|
|
|
|
def compute(tokens: list[str|int]) -> int:
|
|
# Case where we have 1 || 2 ==> 12
|
|
while len(tokens) >= 3:
|
|
# Pop off the first 3 items
|
|
left, op, right = tokens[:3]
|
|
tokens = tokens[3:] if len(tokens) > 3 else []
|
|
# Now we do some computation and prepend the list with that result
|
|
if op == '*':
|
|
tokens.insert(0, left * right)
|
|
elif op == '+':
|
|
tokens.insert(0, left + right)
|
|
# the coalesce case
|
|
else:
|
|
tokens.insert(0, int(f'{left}{right}'))
|
|
return tokens[0]
|
|
|
|
|
|
with open('input.data') as file:
|
|
cases = []
|
|
for line in file:
|
|
line = line.replace(':', '').split()
|
|
cases.append([int(val) for val in line])
|
|
s = 0
|
|
for case in cases:
|
|
answer, vals = case[0], case[1:]
|
|
results = set()
|
|
for ops in list(product(['*', '+', '||'], repeat=len(vals)-1)):
|
|
tokens = tokenize(vals, ops)
|
|
results.add(compute(tokens))
|
|
if answer in results:
|
|
s += answer
|
|
print(s)
|