Day 1
Part 1
Example input
input_lines_example = list(map(int, """199
200
208
210
200
207
240
269
260
263""".split('\n')))
Competition input
with open('input-day-1.txt', 'r') as f:
input_lines = list(map(int, f.read().splitlines()))
num_increased = 0
for idx, line in enumerate(input_lines):
if idx == 0:
continue
if line > input_lines[idx-1]:
num_increased += 1
num_increased
Part 2
windows = []
for idx, line in enumerate(input_lines):
if idx == 0:
continue
if idx + 1 == len(input_lines):
continue
windows.append(input_lines[idx-1] + line + input_lines[idx+1])
num_increased = 0
for idx, line in enumerate(windows):
if idx == 0:
continue
if line > windows[idx-1]:
num_increased += 1
num_increased
Day 2
sample_input = """forward 5
down 5
forward 8
up 3
down 8
forward 2""".split('\n')
with open('input-day-2.txt', 'r') as f:
comp_input = list(map(str.strip, f.readlines()))
Part 1
horizontal = 0
depth = 0
inp = comp_input
for line in inp:
direction, step_str = line.split(' ')
step = int(step_str)
if direction == 'forward':
horizontal += step
elif direction == 'down':
depth += step
elif direction == 'up':
depth -= step
horizontal * depth
Part 2
horizontal = 0
depth = 0
aim = 0
inp = comp_input
for line in inp:
direction, step_str = line.split(' ')
step = int(step_str)
if direction == 'forward':
horizontal += step
depth += aim * step
elif direction == 'down':
aim += step
elif direction == 'up':
aim -= step
horizontal * depth
Day 3
Part 1
sample_input = """00100
11110
10110
10111
10101
01111
00111
11100
10000
11001
00010
01010""".split('\n')
with open('input-day-3.txt', 'r') as f:
comp_input = list(map(str.strip, f.readlines()))
from pandas import DataFrame
inp = comp_input
input_df = DataFrame([list(s) for s in inp])
most_common_values = []
for column in input_df:
most_common = input_df[column].value_counts().idxmax()
most_common_values.append(most_common)
least_common_values = ['1' if n == '0' else '0' for n in most_common_values]
gamma = int(''.join(most_common_values), 2)
epsilon = int(''.join(least_common_values), 2)
gamma * epsilon
Part 2
inp = comp_input
inp_temp = inp.copy()
for column, _ in enumerate(inp_temp[0]):
zeros = [line for line in inp_temp if line[column] == '0']
ones = [line for line in inp_temp if line[column] == '1']
if len(zeros) > len(ones):
inp_temp = zeros
else:
inp_temp = ones
if len(inp_temp) == 1:
break
oxygen_generator = int(''.join(inp_temp), 2)
print("Oxygen generator rating:", oxygen_generator)
inp_temp = inp.copy()
for column, _ in enumerate(inp_temp[0]):
zeros = [line for line in inp_temp if line[column] == '0']
ones = [line for line in inp_temp if line[column] == '1']
if len(zeros) <= len(ones):
inp_temp = zeros
else:
inp_temp = ones
if len(inp_temp) == 1:
break
co2_scrubber = int(''.join(inp_temp), 2)
print("CO2 scrubber rating:", co2_scrubber)
print("Life support rating:", oxygen_generator * co2_scrubber)
Day 4
Part 1
sample_input = """7,4,9,5,11,17,23,2,0,14,21,24,10,16,13,6,15,25,12,22,18,20,8,19,3,26,1
22 13 17 11 0
8 2 23 4 24
21 9 14 16 7
6 10 3 18 5
1 12 20 15 19
3 15 0 2 22
9 18 13 17 5
19 8 7 25 23
20 11 10 24 4
14 21 16 12 6
14 21 17 24 4
10 16 15 9 19
18 8 23 26 20
22 11 13 6 5
2 0 12 3 7""".split("\n")
with open('input-day-4.txt', 'r') as f:
comp_input = f.read().splitlines()
Part 2
import re
import itertools
inp = sample_input.copy()
inp = comp_input.copy()
called_numbers = list(map(int, inp.pop(0).split(',')))
boards_raw = []
for line in inp:
if line == '':
current_board = []
boards_raw.append(current_board)
else:
line_split = filter(None, re.split(r'\s+', line))
current_board.append(list(map(lambda x: (int(x), False), line_split)))
boards = boards_raw
def one_round(called_number, boards):
for board in boards:
for row_idx, row in enumerate(board):
for column_idx, (number, marked) in enumerate(row):
if number == called_number:
board[row_idx][column_idx] = (number, True)
def evaluate_boards(boards):
for board_index, board in enumerate(boards):
for column_idx in range(len(board[0])):
if all(row[column_idx][1] for row in board):
# print("Winning by columns", board)
yield board_index, board
for row_idx, row in enumerate(board):
if all([marked for number, marked in row]):
# print("Winning by rows", board)
yield board_index, board
def board_score(winning_board, called_number):
return sum([number for sublist in winning_board for number, marked in sublist if not marked]) * called_number
winning_board_indexes = set()
all_done = False
for called_number in called_numbers:
if all_done:
break
one_round(called_number, boards)
for winning_board_index, winning_board in evaluate_boards(boards):
if len(winning_board_indexes) == 0:
print("First winning board score:", board_score(winning_board, called_number))
winning_board_indexes.add(winning_board_index)
if len(winning_board_indexes) == len(boards):
print("Last board to win score:", board_score(boards[winning_board_index], called_number))
all_done = True
break
Day 5
sample_input = """0,9 -> 5,9
8,0 -> 0,8
9,4 -> 3,4
2,2 -> 2,1
7,0 -> 7,4
6,4 -> 2,0
0,9 -> 2,9
3,4 -> 1,4
0,0 -> 8,8
5,5 -> 8,2""".split("\n")
with open('input-day-5.txt', 'r') as f:
comp_input = f.read().splitlines()
import math
from itertools import cycle
inp = comp_input.copy()
vectors = []
for line in inp:
start, end = line.split(' -> ')
start_x, start_y = list(map(int, start.split(',')))
end_x, end_y = list(map(int, end.split(',')))
vectors.append(((start_x, start_y), (end_x, end_y)))
def build_cover_diagram(vectors):
cover_diagram = {}
for ((start_x, start_y), (end_x, end_y)) in vectors:
sign_x = int(math.copysign(1, end_x - start_x))
range_x = range(start_x, end_x + sign_x, sign_x)
sign_y = int(math.copysign(1, end_y - start_y))
range_y = range(start_y, end_y + sign_y, sign_y)
points = list(zip(range_x, cycle(range_y)) if len(range_x) > len(range_y) else zip(cycle(range_x), range_y))
for x,y in points:
cover_diagram.setdefault((x,y), 0)
cover_diagram[(x,y)] += 1
return cover_diagram
vectors_filtered = [((start_x, start_y), (end_x, end_y)) for (start_x, start_y), (end_x, end_y) in vectors if start_x == end_x or start_y == end_y]
print("Overlaps of vertical and horizontal lines:",
len([value for _, value in build_cover_diagram(vectors_filtered).items() if value >= 2]))
print("Overlaps of all lines:",
len([value for value in build_cover_diagram(vectors).values() if value >= 2]))
Day 6
sample_input = """3,4,3,1,2"""
with open('input-day-6.txt', 'r') as f:
comp_input = f.read().strip()
inp = comp_input
ages = list(map(int, inp.split(",")))
fish_by_age = dict((age,0) for age in range(9))
for age in ages:
fish_by_age[age] += 1
def one_fish_day(fish_by_age):
new_fish_by_age = dict((age, 0) for age in range(9))
for age, count in fish_by_age.items():
if age == 0:
new_fish_by_age[6] += count
new_fish_by_age[8] += count
else:
new_fish_by_age[age-1] += count
return new_fish_by_age
for day in range(256):
fish_by_age = one_fish_day(fish_by_age)
if day == 79:
print("# of lanternfish after 80 days:", sum(fish_by_age.values()))
print("# of lanternfish after 256 days:", sum(fish_by_age.values()))
Day 7
sample_input = """16,1,2,0,4,2,7,1,2,14"""
with open('input-day-7.txt', 'r') as f:
comp_input = f.read().strip()
inp = comp_input
positions = list(map(int, inp.split(',')))
all_costs = []
all_costs_part_2 = []
def partial_sum(n):
return int((n * (n+1)) / 2)
for desired_position in range(min(positions), max(positions)+1):
costs = 0
costs_part_2 = 0
for position in positions:
costs += abs(position - desired_position)
costs_part_2 += partial_sum(abs(position - desired_position))
all_costs.append(costs)
all_costs_part_2.append(costs_part_2)
print("Part one cost:", min(enumerate(all_costs), key=lambda v: v[1])[1])
print("Part two cost:", min(enumerate(all_costs_part_2), key=lambda v: v[1])[1])
Day 8
sample_input = """be cfbegad cbdgef fgaecd cgeb fdcge agebfd fecdb fabcd edb | fdgacbe cefdb cefbgd gcbe
edbfga begcd cbg gc gcadebf fbgde acbgfd abcde gfcbed gfec | fcgedb cgb dgebacf gc
fgaebd cg bdaec gdafb agbcfd gdcbef bgcad gfac gcb cdgabef | cg cg fdcagb cbg
fbegcd cbd adcefb dageb afcb bc aefdc ecdab fgdeca fcdbega | efabcd cedba gadfec cb
aecbfdg fbg gf bafeg dbefa fcge gcbea fcaegb dgceab fcbdga | gecf egdcabf bgf bfgea
fgeab ca afcebg bdacfeg cfaedg gcfdb baec bfadeg bafgc acf | gebdcfa ecba ca fadegcb
dbcfg fgd bdegcaf fgec aegbdf ecdfab fbedc dacgb gdcebf gf | cefg dcbef fcge gbcadfe
bdfegc cbegaf gecbf dfcage bdacg ed bedf ced adcbefg gebcd | ed bcgafe cdgba cbgef
egadfb cdbfeg cegd fecab cgb gbdefca cg fgcdab egfdb bfceg | gbdfcae bgc cg cgb
gcafb gcf dcaebfg ecagb gf abcdeg gaef cafbge fdbac fegbdc | fgae cfgab fg bagce""".split("\n")
with open('input-day-8.txt', 'r') as f:
comp_input = list(map(str.strip, f.readlines()))
inp = comp_input
parsed_lines = []
for line in inp:
signal_patterns, outputs = line.split(" | ")
signal_patterns = signal_patterns.split(' ')
outputs = outputs.split(' ')
parsed_lines.append((signal_patterns, outputs))
def find_by_length(l, length):
return filter(lambda s: len(s) == length, l)
seven_segment_mapping = {
''.join(sorted('abcefg')): 0,
''.join(sorted('cf')): 1,
''.join(sorted('acdeg')): 2,
''.join(sorted('acdfg')): 3,
''.join(sorted('bcdf')): 4,
''.join(sorted('abdfg')): 5,
''.join(sorted('abdefg')): 6,
''.join(sorted('acf')): 7,
''.join(sorted('abcdefg')): 8,
''.join(sorted('abcdfg')): 9
}
def decode_seven_segment(letters):
return seven_segment_mapping[''.join(sorted(letters))]
def print_sorted_dict(d):
for k in sorted(d.keys()):
print(k, ' => ', d[k])
num_filtered = 0
total_outputs = 0
for signal_patterns, outputs in parsed_lines:
num_filtered += len(list(filter(lambda x: len(x) in (2, 4, 3, 7), outputs)))
one = next(find_by_length(signal_patterns, 2), None)
seven = next(find_by_length(signal_patterns, 3), None)
four = next(find_by_length(signal_patterns, 4), None)
eight = next(find_by_length(signal_patterns, 7), None)
segment_mapping = {}
maybe_069 = list(find_by_length (signal_patterns, 6))
maybe_235 = list(find_by_length (signal_patterns, 5))
segment_mapping['a'] = (set(seven) - set(one)).pop()
segment_mapping['f'] = next(filter(lambda x: len(list(filter(lambda y: x in y, maybe_069))) == 3, list(one)))
segment_mapping['c'] = (set(one) - set(segment_mapping['f'])).pop()
segment_b_or_d = list(set(four) - set([segment_mapping[k] for k in ['c', 'f']]))
segment_mapping['d'] = next(filter(lambda x: len(list(filter(lambda y: x in y, maybe_235))) == 3, segment_b_or_d))
segment_mapping['b'] = (set(segment_b_or_d) - set(segment_mapping['d'])).pop()
segment_e_or_g = set(eight) - set(segment_mapping.values())
segment_mapping['g'] = next(filter(lambda x: len(list(filter(lambda y: x in y, maybe_235))) == 3, segment_e_or_g))
segment_mapping['e'] = (set(eight) - set(segment_mapping.values())).pop()
segment_mapping_reverse = dict([(v, k) for k, v in segment_mapping.items()])
output_numbers = []
for output in outputs:
output_translated = list(map(lambda x: segment_mapping_reverse[x], output))
output_number = decode_seven_segment(output_translated)
output_numbers.append(str(output_number))
total_outputs += int(''.join(output_numbers))
print("Part one:", num_filtered)
print("Part two:", total_outputs)
Day 9
sample_input = """2199943210
3987894921
9856789892
8767896789
9899965678""".split("\n")
with open('input-day-9.txt', 'r') as f:
comp_input = f.read().splitlines()
import numpy as np
from collections import deque
inp = comp_input
inp = list(map(list, inp))
def get_neighbors(matrix: np.matrix, row, column):
rows, columns = matrix.shape
neighbor_indices = []
# left
if column - 1 >= 0:
neighbor_indices.append((row, column - 1))
# right
if (column + 1) < columns:
neighbor_indices.append((row, column + 1))
# top
if row - 1 >= 0:
neighbor_indices.append((row - 1, column))
# bottom
if row + 1 < rows:
neighbor_indices.append((row + 1, column))
return [((r, c), matrix[r, c]) for (r, c) in neighbor_indices]
matrix = np.matrix(inp).astype(int)
lowest = []
for index, value in np.ndenumerate(matrix):
neighbors = get_neighbors(matrix, *index)
neighbor_values = list(map(lambda x: x[1], neighbors))
if value < min(neighbor_values):
lowest.append((index, value))
print("Part 1:", sum(map(lambda x: x[1]+1, lowest)))
basin_lengths = []
for index, value in lowest:
q = deque()
q.append(index)
basin = []
visited = set()
while len(q) > 0:
row, col = q.popleft()
if (row, col) in visited:
continue
visited.add((row, col))
v = matrix[row, col]
if v < 9:
basin.append(v)
neighbors = get_neighbors(matrix, row, col)
q.extend(list(map(lambda x: x[0], neighbors)))
basin_lengths.append(len(basin))
print("Part 2:", np.prod(sorted(basin_lengths, reverse=True)[0:3]))
Day 10
sample_input = """[({(<(())[]>[[{[]{<()<>>
[(()[<>])]({[<{<<[]>>(
{([(<{}[<>[]}>{[]{[(<()>
(((({<>}<{<{<>}{[]{[]{}
[[<[([]))<([[{}[[()]]]
[{[{({}]{}}([{[{{{}}([]
{<[[]]>}<{[{[{[]{()[[[]
[<(<(<(<{}))><([]([]()
<{([([[(<>()){}]>(<<{{
<{([{{}}[<[[[<>{}]]]>[]]""".split('\n')
with open('input-day-10.txt', 'r') as f:
comp_input = f.read().splitlines()
inp = comp_input
character_pairs = {
'(': ')',
'[': ']',
'{': '}',
'<': '>'
}
opening_characters = character_pairs.keys()
char_scores = {
')': 3,
']': 57,
'}': 1197,
'>': 25137
}
autocomplete_char_scores = {
')': 1,
']': 2,
'}': 3,
'>': 4
}
unexpected_chars = []
incomplete_lines = []
for line in inp:
stack = []
corrupted = False
for char in line:
if char in opening_characters:
stack.append(char)
else:
previous_char = stack.pop()
if character_pairs[previous_char] != char:
unexpected_chars.append(char)
corrupted = True
break
if not corrupted and len(stack) > 0:
incomplete_lines.append((line, stack))
print("Part 1:", sum(map(lambda char: char_scores[char], unexpected_chars)))
all_closing_chars = []
all_closing_scores = []
for line, stack in incomplete_lines:
closing_chars = []
closing_chars_score = 0
while not len(stack) == 0:
closing_char = character_pairs[stack.pop()]
closing_chars.append(closing_char)
closing_chars_score = 5 * closing_chars_score + autocomplete_char_scores[closing_char]
all_closing_chars.append(closing_chars)
all_closing_scores.append(closing_chars_score)
print("Part 2:", int(np.median(all_closing_scores)))
Day 11
sample_input = """5483143223
2745854711
5264556173
6141336146
6357385478
4167524645
2176841721
6882881134
4846848554
5283751526""".split("\n")
with open('input-day-11.txt', 'r') as f:
comp_input = f.read().splitlines()
import numpy as np
import itertools
inp = comp_input
inp = list(map(list, inp))
matrix = np.matrix(inp).astype(int)
def get_all_neighbors(matrix: np.matrix, row, column):
rows, columns = matrix.shape
left = max(0, column - 1)
right = min(columns - 1, column + 1)
top = max(0, row - 1)
bottom = min(rows - 1, row + 1)
row_indices = [r for r in range(top, bottom+1)]
column_indices = [c for c in range(left, right+1)]
indices = list(itertools.product(row_indices, column_indices))
return [((r, c), matrix[r, c]) for (r, c) in indices]
total_flashes = 0
for step in itertools.count(1):
matrix_increased = matrix + np.ones((10, 10)).astype(int)
flashed = set()
while True:
to_flash = np.transpose(np.where(matrix_increased > 9))
to_flash_filtered = list(filter(lambda x: (x[0], x[1]) not in flashed ,to_flash))
if len(to_flash_filtered) == 0:
break
for (row, col) in to_flash_filtered:
flashed.add((row, col))
neighbors = get_all_neighbors(matrix_increased, row, col)
for (n_row, n_col), n_value in neighbors:
matrix_increased[n_row, n_col] = n_value + 1
total_flashes += len(flashed)
if step == 100:
print("Part 1:", total_flashes)
matrix_increased[matrix_increased > 9] = 0
if np.all(matrix_increased == matrix_increased[0,0]):
print("Part 2:", step)
break
matrix = matrix_increased
Day 12
!pip install networkx
sample_input = """dc-end
HN-start
start-kj
dc-start
dc-HN
LN-dc
HN-end
kj-sa
kj-HN
kj-dc""".split('\n')
sample2_input = """fs-end
he-DX
fs-he
start-DX
pj-DX
end-zg
zg-sl
zg-pj
pj-he
RW-he
fs-DX
pj-RW
zg-RW
start-pj
he-WI
zg-he
pj-fs
start-RW""".split('\n')
with open('input-day-12.txt', 'r') as f:
comp_input = f.read().splitlines()
import networkx as nx
from collections import deque
G = nx.Graph()
inp = comp_input
for line in inp:
[start, end] = line.split('-', 2)
G.add_edge(start, end)
nx.draw(G, with_labels=True, node_size=1000)
paths = set()
to_visit = deque([('start', [], None)])
while len(to_visit) > 0:
current, visited, small_for_twice = to_visit.popleft()
for n in G.neighbors(current):
if n == 'start':
continue
elif n == 'end':
path = ','.join(visited + [current, 'end'])
paths.add(path)
elif n.isupper() or not n in visited or (n == small_for_twice and visited.count(n) == 1):
if small_for_twice is None:
if n.islower():
to_visit.append((n, visited + [current], n))
to_visit.append((n, visited + [current], None))
else:
to_visit.append((n, visited + [current], small_for_twice))
print("Part 2:", len(paths))
Day 13
sample_input = """6,10
0,14
9,10
0,3
10,4
4,11
6,0
6,12
4,1
0,13
10,12
3,4
3,0
8,4
1,10
2,14
8,10
9,0
fold along y=7
fold along x=5""".split('\n')
with open('input-day-13.txt', 'r') as f:
comp_input = f.read().splitlines()
import numpy as np
inp = comp_input
sep_index = inp.index('')
coordinates = inp[:sep_index]
folds = inp[sep_index + 1:]
parsed_coordinates = []
for [x, y] in map(lambda x: x.split(',', 2), coordinates):
parsed_coordinates.append((int(x), int(y)))
max_x = max(map(lambda x: x[0], parsed_coordinates))
max_y = max(map(lambda x: x[1], parsed_coordinates))
image = np.full((max_x + 1, max_y + 1), False)
for x, y in parsed_coordinates:
image[x, y] = True
image = image.transpose()
for fold_count, fold in enumerate(folds):
equal_index = fold.index('=')
axis = fold[equal_index-1]
num = int(fold[equal_index+1:])
if axis == 'x':
image = image.transpose()
rows_total, col_total = image.shape
image_new = np.full((num, col_total), False)
for line_number in range(num):
image_new[line_number] = np.bitwise_or(image[line_number], image[rows_total-1 -line_number])
image = image_new
if axis == 'x':
image = image.transpose()
if fold_count == 0:
print("Part 1:", np.count_nonzero(image))
print("Part 2:")
print('\n'.join([''.join('##' if c else '..' for c in line) for line in image]))
Day 14
sample_input = """NNCB
CH -> B
HH -> N
CB -> H
NH -> C
HB -> C
HC -> B
HN -> C
NN -> C
BH -> H
NC -> B
NB -> B
BN -> B
BB -> N
BC -> B
CC -> N
CN -> C""".split('\n')
with open('input-day-14.txt', 'r') as f:
comp_input = f.read().splitlines()
import collections
inp = comp_input
sep_index = inp.index('')
template = inp[:sep_index][0]
replacement_lines = inp[sep_index + 1:]
replacements = {}
for line in replacement_lines:
orig, replacement = line.split(' -> ')
replacements[orig] = replacement
for step in range(10):
template_new = template[0]
for a, b in zip(template, template[1:]):
insertion = replacements.get(''.join([a, b]), '')
if insertion == '':
print(a,b)
template_new += ''.join([insertion, b])
template = template_new
if step == 9:
c = collections.Counter(template)
_, most_count = c.most_common()[0]
_, least_count = c.most_common()[-1]
print("Part 1:", most_count - least_count)
Day 15
sample_input = """1163751742
1381373672
2136511328
3694931569
7463417111
1319128137
1359912421
3125421639
1293138521
2311944581""".split('\n')
with open('input-day-15.txt', 'r') as f:
comp_input = f.read().splitlines()
from functools import partial
inp = comp_input
inp = list(map(list, inp))
matrix = np.matrix(inp).astype(int)
def build_graph(matrix):
g = nx.Graph()
for (row, column), value in np.ndenumerate(matrix):
neighbors = get_neighbors(matrix, row, column)
for (n_row, n_column), weight in neighbors:
g.add_node((row, column), weight=value)
g.add_node((n_row, n_column), weight=weight)
g.add_edge((row, column), (n_row, n_column))
return g
def weight_func(graph, u, v, d):
return graph.nodes[v].get("weight")
def path_weight(path, matrix):
total_weight = 0
path_matrix = np.full_like(matrix, 0)
for node in path[1:]:
w = matrix[node]
total_weight += w
path_matrix[node] = 1
return total_weight
graph_small = build_graph(matrix)
path = nx.dijkstra_path(graph_small, (0,0), (matrix.shape[0] -1, matrix.shape[1]-1), weight=partial(weight_func, graph_small))
print("Part 1:", path_weight(path, matrix))
def increase_and_wrap(n, increase):
increased = n + increase
if increased == 9:
return increased
return increased % 9
matrix_large = np.full(shape = (matrix.shape[0] * 5, matrix.shape[1] * 5), fill_value=0)
small_rows, small_columns = matrix.shape
for (row, column), value in np.ndenumerate(matrix):
matrix_large[row, column] = value
matrix_large[row, column] = value
for i in range(1, 5):
new_value = increase_and_wrap(value, i)
matrix_large[row, column + (i * small_columns)] = new_value
# matrix_large[row + (i * small_rows), column] = new_value
for row_num in range(small_rows):
for col_num in range(matrix_large.shape[1]):
for i in range(1, 5):
old_value = matrix_large[row_num, col_num]
new_value = increase_and_wrap(old_value, i)
matrix_large[row_num + (i * small_rows), col_num] = new_value
graph_large = build_graph(matrix_large)
matrix_large[10, 0]
path = nx.dijkstra_path(graph_large, (0,0), (matrix_large.shape[0] -1, matrix_large.shape[1]-1), weight=partial(weight_func, graph_large))
print("Part 2:", path_weight(path, matrix_large))
Day 16
comp_input = """805311100469800804A3E488ACC0B10055D8009548874F65665AD42F60073E7338E7E5C538D820114AEA1A19927797976F8F43CD7354D66747B3005B401397C6CBA2FCEEE7AACDECC017938B3F802E000854488F70FC401F8BD09E199005B3600BCBFEEE12FFBB84FC8466B515E92B79B1003C797AEBAF53917E99FF2E953D0D284359CA0CB80193D12B3005B4017968D77EB224B46BBF591E7BEBD2FA00100622B4ED64773D0CF7816600B68020000874718E715C0010D8AF1E61CC946FB99FC2C20098275EBC0109FA14CAEDC20EB8033389531AAB14C72162492DE33AE0118012C05EEB801C0054F880102007A01192C040E100ED20035DA8018402BE20099A0020CB801AE0049801E800DD10021E4002DC7D30046C0160004323E42C8EA200DC5A87D06250C50015097FB2CFC93A101006F532EB600849634912799EF7BF609270D0802B59876F004246941091A5040402C9BD4DF654967BFDE4A6432769CED4EC3C4F04C000A895B8E98013246A6016CB3CCC94C9144A03CFAB9002033E7B24A24016DD802933AFAE48EAA3335A632013BC401D8850863A8803D1C61447A00042E3647B83F313674009E6533E158C3351F94C9902803D35C869865D564690103004E74CB001F39BEFFAAD37DFF558C012D005A5A9E851D25F76DD88A5F4BC600ACB6E1322B004E5FE1F2FF0E3005EC017969EB7AE4D1A53D07B918C0B1802F088B2C810326215CCBB6BC140C0149EE87780233E0D298C33B008C52763C9C94BF8DC886504E1ECD4E75C7E4EA00284180371362C44320043E2EC258F24008747785D10C001039F80644F201217401500043A2244B8D200085C3F8690BA78F08018394079A7A996D200806647A49E249C675C0802609D66B004658BA7F1562500366279CCBEB2600ACCA6D802C00085C658BD1DC401A8EB136100"""
import itertools
import numpy as np
def hex_to_bin(char):
return "{0:04b}".format(int(char, 16))
def hex_str_to_bin(hex_str):
return ''.join(map(hex_to_bin, hex_str))
def bin_to_int(chars):
return int(chars, 2)
def parse_literal_value(chars):
bin_to_parse = ''
for i in itertools.count(0, 5):
bin_to_parse += chars[i+1:i+5]
if chars[i] == '0':
break
return (bin_to_int(bin_to_parse), i+5)
def parse_packet(bin_str):
packet_version = bin_to_int(bin_str[0:3])
type_id = bin_to_int(bin_str[3:6])
if type_id == 4:
literal_value, length = parse_literal_value(bin_str[6:])
return (packet_version, literal_value, length + 6)
else:
length_type_id = bin_str[6]
offset = 0
subpacket_literals = []
sum_version_numbers = 0
if length_type_id == '0':
total_length = bin_to_int(bin_str[7:7+15])
subpackets_start_position = 7 + 15
while offset < total_length:
substr = bin_str[subpackets_start_position+offset:]
version_number, literal_value, length = parse_packet(substr)
subpacket_literals.append(literal_value)
offset += length
sum_version_numbers += version_number
else:
num_subpackets = bin_to_int(bin_str[7:7+11])
subpackets_start_position = 7 + 11
for _ in range(num_subpackets):
substr = bin_str[subpackets_start_position+offset:]
version_number, literal_value, length = parse_packet(substr)
subpacket_literals.append(literal_value)
offset += length
sum_version_numbers += version_number
if type_id == 0:
total_value = sum(subpacket_literals)
elif type_id == 1:
total_value = np.product(subpacket_literals)
elif type_id == 2:
total_value = min(subpacket_literals)
elif type_id == 3:
total_value = max(subpacket_literals)
elif type_id == 5:
total_value = 1 if subpacket_literals[0] > subpacket_literals[1] else 0
elif type_id == 6:
total_value = 1 if subpacket_literals[0] < subpacket_literals[1] else 0
elif type_id == 7:
total_value = 1 if subpacket_literals[0] == subpacket_literals[1] else 0
return(packet_version + sum_version_numbers, total_value, subpackets_start_position+offset)
# Literal packet
# parse_packet(hex_str_to_bin('D2FE28'))
# Subpacket length type 0
# parse_packet(hex_str_to_bin('38006F45291200'))
# Subpacket length type 1
# parse_packet(hex_str_to_bin('EE00D40C823060'))
# Examples for the operators:
# for s in ["C200B40A82", '04005AC33890','880086C3E88112', 'CE00C43D881120', 'D8005AC2A8F0', 'F600BC2D8F', '9C005AC2F8F0', '9C0141080250320F1802104A08']:
# print(s, '==', parse_packet(hex_str_to_bin(s))[1])
total_versions, total, _ = parse_packet(hex_str_to_bin(comp_input))
print("Part 1:", total_versions)
print("Part 2:", total)
Day 17
sample_input = "target area: x=20..30, y=-10..-5"
comp_input = "target area: x=144..178, y=-100..-76"
import itertools
inp = comp_input
inp = inp[len('target area: '):]
(x_from, x_to), (y_from, y_to) = map(lambda x: list(map(int, x.strip()[2:].split('..'))), inp.split(','))
target_area = (x_from, x_to), (y_from, y_to)
def one_step(position_x, position_y, velocity_x, velocity_y):
new_position_x = position_x + velocity_x
new_position_y = position_y + velocity_y
if velocity_x == 0:
new_velocity_x = 0
elif velocity_x > 0:
new_velocity_x = velocity_x - 1
else:
new_velocity_x = velocity_x + 1
new_velocity_y = velocity_y - 1
return (new_position_x, new_position_y, new_velocity_x, new_velocity_y)
def in_target(position_x, position_y, target_area):
(x_from, x_to), (y_from, y_to) = target_area
return position_x in range(x_from, x_to + 1) and position_y in range(y_from, y_to + 1)
def too_far(position_x, position_y, target_area):
target_x, target_y = target_area
return position_x > max(*target_x) or position_y < min(*target_y)
correct_velocities = []
# Lazy brute force on y axis 🙈
r = itertools.product(range(1, x_to+1), range(-500, 500))
for initial_velocity_x, initial_velocity_y in r:
# print("Trying", initial_velocity_x, initial_velocity_y)
max_y = 0
position_x, position_y = 0, 0
velocity_x, velocity_y = initial_velocity_x, initial_velocity_y
for step in itertools.count(1):
(position_x, position_y, velocity_x, velocity_y) = one_step(position_x, position_y, velocity_x, velocity_y)
max_y = max(max_y, position_y)
# print(f"After step {step}", (position_x, position_y), (velocity_x, velocity_y))
if in_target(position_x, position_y, target_area):
# print("In target", (initial_velocity_x, initial_velocity_y), (position_x, position_y))
correct_velocities.append(((initial_velocity_x, initial_velocity_y), max_y))
break
if too_far(position_x, position_y, target_area):
# print("Too far", (initial_velocity_x, initial_velocity_y), (position_x, position_y))
break
print("Part 1:", sorted(correct_velocities, key=lambda x: x[1], reverse=True)[0][1])
print("Part 2", len(correct_velocities))