Skip to content

Commit

Permalink
move old code from notebook
Browse files Browse the repository at this point in the history
  • Loading branch information
landoskape committed Apr 17, 2024
1 parent dc7df97 commit 957eaf6
Show file tree
Hide file tree
Showing 2 changed files with 537 additions and 60 deletions.
97 changes: 37 additions & 60 deletions dominoes.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -45,15 +45,13 @@
"outputs": [],
"source": [
"# TODO For refactoring\n",
"# check reward sequencer function\n",
"\n",
"# start working on TSP dataset\n",
"# start working on supervised learning child of parent dataset class (and make dominoeDataset an child of that also!)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 1,
"id": "8404c54b",
"metadata": {},
"outputs": [],
Expand All @@ -70,80 +68,66 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 20,
"id": "94b81df6",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[3, 7, 5, 6, 8, 9, 9, 9, 9],\n",
" [6, 3, 4, 0, 8, 9, 9, 9, 9],\n",
" [8, 9, 9, 9, 9, 9, 9, 9, 9],\n",
" [0, 1, 6, 5, 8, 9, 9, 9, 9]])"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([[ 1., 0., 0., 0., 0., 0., 0., 0., 0.],\n",
" [ 1., 0., 0., 0., 0., 0., 0., 0., 0.],\n",
" [12., 10., 4., 10., 13., 1., 0., 0., 0.],\n",
" [ 8., 1., 0., 0., 0., 0., 0., 0., 0.]])\n"
]
}
],
"source": [
"highest_dominoe = 9\n",
"dataset = DominoeDataset(\"sequencer\", highest_dominoe, hand_size=8, return_target=True)\n",
"\n",
"batch = dataset.generate_batch(train=False, batch_size=4)\n",
"batch = dataset.generate_batch(train=False, batch_size=4, value_method=\"dominoe\")\n",
"dominoes = dataset.get_dominoe_set(train=False)\n",
"\n",
"ib = 0\n",
"hand = dominoes[batch[\"selection\"][ib]]\n",
"target_as_choice = batch[\"target\"].clone()\n",
"target_as_choice[target_as_choice==-1] = dataset.prms[\"hand_size\"] + 1\n",
"# reward = dataset._measurereward_sequencer(target_as_choice, batch)\n",
"target_as_choice"
]
},
{
"cell_type": "code",
"execution_count": 39,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"torch.int64"
]
},
"execution_count": 39,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"dominoes.dtype"
"target_as_choice[target_as_choice==-1] = dataset.prms[\"hand_size\"]\n",
"\n",
"reward, direction = dataset._measurereward_sequencer(target_as_choice, batch, return_direction=True)\n",
"print(reward)"
]
},
{
"cell_type": "code",
"execution_count": 55,
"id": "b7e87b30",
"execution_count": 21,
"id": "80eea3a8",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"False"
]
},
"execution_count": 55,
"metadata": {},
"output_type": "execute_result"
"name": "stdout",
"output_type": "stream",
"text": [
"tensor(3)\n",
"tensor([3., 8.]) forward value = 12.0\n",
"tensor([1., 8.]) reverse value = 10.0\n",
"tensor([1., 2.]) forward value = 4.0\n",
"tensor([2., 7.]) forward value = 10.0\n",
"tensor([5., 7.]) reverse value = 13.0\n",
"tensor([-1., -1.]) reverse value = -1.0\n",
"tensor([-1., -1.]) reverse value = -1.0\n",
"tensor([-1., -1.]) reverse value = -1.0\n",
"tensor([-1., -1.]) reverse value = -1.0\n"
]
}
],
"source": [
"ss = \"length\"\n",
"ib = 2\n",
"hand = dominoes[batch[\"selection\"][ib]]\n",
"hand_null = torch.cat([hand, -torch.ones((1, 2))], dim=0)\n",
"\n",
"not (ss == \"dominoe\" or ss == \"length\")"
"print(batch[\"available\"][ib])\n",
"for c, d in zip(target_as_choice[ib], direction[ib]):\n",
" print(hand_null[c], \"forward\" if d==0 else \"reverse\", f\"value = {torch.sum(hand_null[c])+1}\")"
]
},
{
Expand All @@ -167,13 +151,6 @@
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
Expand Down
Loading

0 comments on commit 957eaf6

Please sign in to comment.