diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000..9933178 Binary files /dev/null and b/.DS_Store differ diff --git a/Batch_Graph.py b/Batch_Graph.py new file mode 100644 index 0000000..441cff2 --- /dev/null +++ b/Batch_Graph.py @@ -0,0 +1,251 @@ +from typing import List +import torch +from torch import Tensor +from torch_sparse import SparseTensor, cat +import torch_geometric +from torch_geometric.data import Data + + +class Batch(Data): + r"""A plain old python object modeling a batch of graphs as one big + (disconnected) graph. With :class:`torch_geometric.data.Data` being the + base class, all its methods can also be used here. + In addition, single graphs can be reconstructed via the assignment vector + :obj:`batch`, which maps each node to its respective graph identifier. + """ + def __init__(self, batch=None, ptr=None, **kwargs): + super(Batch, self).__init__(**kwargs) + + for key, item in kwargs.items(): + if key == 'num_nodes': + self.__num_nodes__ = item + else: + self[key] = item + + self.batch = batch + self.ptr = ptr + self.__data_class__ = Data + self.__slices__ = None + self.__cumsum__ = None + self.__cat_dims__ = None + self.__num_nodes_list__ = None + self.__num_graphs__ = None + + @classmethod + def from_data_list(cls, data_list, follow_batch=[], exclude_keys=[]): + r"""Constructs a batch object from a python list holding + :class:`torch_geometric.data.Data` objects. + The assignment vector :obj:`batch` is created on the fly. + Additionally, creates assignment batch vectors for each key in + :obj:`follow_batch`. + Will exclude any keys given in :obj:`exclude_keys`.""" + + keys = list(set(data_list[0].keys) - set(exclude_keys)) + assert 'batch' not in keys and 'ptr' not in keys + + batch = cls() + for key in data_list[0].__dict__.keys(): + if key[:2] != '__' and key[-2:] != '__': + batch[key] = None + + batch.__num_graphs__ = len(data_list) + batch.__data_class__ = data_list[0].__class__ + for key in keys + ['batch']: + batch[key] = [] + batch['ptr'] = [0] + + device = None + slices = {key: [0] for key in keys} + cumsum = {key: [0] for key in keys} + cat_dims = {} + num_nodes_list = [] + for i, data in enumerate(data_list): + for key in keys: + item = data[key] + + # Increase values by `cumsum` value. + cum = cumsum[key][-1] + if isinstance(item, Tensor) and item.dtype != torch.bool: + if not isinstance(cum, int) or cum != 0: + item = item + cum + elif isinstance(item, SparseTensor): + value = item.storage.value() + if value is not None and value.dtype != torch.bool: + if not isinstance(cum, int) or cum != 0: + value = value + cum + item = item.set_value(value, layout='coo') + elif isinstance(item, (int, float)): + item = item + cum + + # Treat 0-dimensional tensors as 1-dimensional. + if isinstance(item, Tensor) and item.dim() == 0: + item = item.unsqueeze(0) + + batch[key].append(item) + + # Gather the size of the `cat` dimension. + size = 1 + cat_dim = data.__cat_dim__(key, data[key]) + cat_dims[key] = cat_dim + if isinstance(item, Tensor): + size = item.size(cat_dim) + device = item.device + elif isinstance(item, SparseTensor): + size = torch.tensor(item.sizes())[torch.tensor(cat_dim)] + device = item.device() + + slices[key].append(size + slices[key][-1]) + inc = data.__inc__(key, item) + if isinstance(inc, (tuple, list)): + inc = torch.tensor(inc) + cumsum[key].append(inc + cumsum[key][-1]) + + if key in follow_batch: + if isinstance(size, Tensor): + for j, size in enumerate(size.tolist()): + tmp = f'{key}_{j}_batch' + batch[tmp] = [] if i == 0 else batch[tmp] + batch[tmp].append( + torch.full((size, ), i, dtype=torch.long, + device=device)) + else: + tmp = f'{key}_batch' + batch[tmp] = [] if i == 0 else batch[tmp] + batch[tmp].append( + torch.full((size, ), i, dtype=torch.long, + device=device)) + + if hasattr(data, '__num_nodes__'): + num_nodes_list.append(data.__num_nodes__) + else: + num_nodes_list.append(None) + + num_nodes = data.num_nodes + if num_nodes is not None: + item = torch.full((num_nodes, ), i, dtype=torch.long, + device=device) + batch.batch.append(item) + batch.ptr.append(batch.ptr[-1] + num_nodes) + + # Fix initial slice values: + for key in keys: + slices[key][0] = slices[key][1] - slices[key][1] + + batch.batch = None if len(batch.batch) == 0 else batch.batch + batch.ptr = None if len(batch.ptr) == 1 else batch.ptr + batch.__slices__ = slices + batch.__cumsum__ = cumsum + batch.__cat_dims__ = cat_dims + batch.__num_nodes_list__ = num_nodes_list + + ref_data = data_list[0] + for key in batch.keys: + items = batch[key] + item = items[0] + if isinstance(item, Tensor): + batch[key] = torch.cat(items, ref_data.__cat_dim__(key, item)) + elif isinstance(item, SparseTensor): + batch[key] = cat(items, ref_data.__cat_dim__(key, item)) + elif isinstance(item, (int, float)): + batch[key] = torch.tensor(items) + + if torch_geometric.is_debug_enabled(): + batch.debug() + + return batch.contiguous() + + + def get_example(self, idx: int) -> Data: + r"""Reconstructs the :class:`torch_geometric.data.Data` object at index + :obj:`idx` from the batch object. + The batch object must have been created via :meth:`from_data_list` in + order to be able to reconstruct the initial objects.""" + + if self.__slices__ is None: + raise RuntimeError( + ('Cannot reconstruct data list from batch because the batch ' + 'object was not created using `Batch.from_data_list()`.')) + + data = self.__data_class__() + + for key in self.__slices__.keys(): + item = self[key] + # Narrow the item based on the values in `__slices__`. + if isinstance(item, Tensor): + dim = self.__cat_dims__[key] + start = self.__slices__[key][idx] + end = self.__slices__[key][idx + 1] + item = item.narrow(dim, start, end - start) + elif isinstance(item, SparseTensor): + for j, dim in enumerate(self.__cat_dims__[key]): + start = self.__slices__[key][idx][j].item() + end = self.__slices__[key][idx + 1][j].item() + item = item.narrow(dim, start, end - start) + else: + start = self.__slices__[key][idx] + end = self.__slices__[key][idx + 1] + item = item[start:end] + item = item[0] if len(item) == 1 else item + + # Decrease its value by `cumsum` value: + cum = self.__cumsum__[key][idx] + if isinstance(item, Tensor): + if not isinstance(cum, int) or cum != 0: + item = item - cum + elif isinstance(item, SparseTensor): + value = item.storage.value() + if value is not None and value.dtype != torch.bool: + if not isinstance(cum, int) or cum != 0: + value = value - cum + item = item.set_value(value, layout='coo') + elif isinstance(item, (int, float)): + item = item - cum + + data[key] = item + + if self.__num_nodes_list__[idx] is not None: + data.num_nodes = self.__num_nodes_list__[idx] + + return data + + + def index_select(self, idx: Tensor) -> List[Data]: + if isinstance(idx, slice): + idx = list(range(self.num_graphs)[idx]) + elif torch.is_tensor(idx): + if idx.dtype == torch.bool: + idx = idx.nonzero(as_tuple=False).view(-1) + idx = idx.tolist() + elif isinstance(idx, list) or isinstance(idx, tuple): + pass + else: + raise IndexError( + 'Only integers, slices (`:`), list, tuples, and long or bool ' + 'tensors are valid indices (got {}).'.format( + type(idx).__name__)) + + return [self.get_example(i) for i in idx] + + def __getitem__(self, idx): + if isinstance(idx, str): + return super(Batch, self).__getitem__(idx) + elif isinstance(idx, int): + return self.get_example(idx) + else: + return self.index_select(idx) + + + def to_data_list(self) -> List[Data]: + r"""Reconstructs the list of :class:`torch_geometric.data.Data` objects + from the batch object. + The batch object must have been created via :meth:`from_data_list` in + order to be able to reconstruct the initial objects.""" + return [self.get_example(i) for i in range(self.num_graphs)] + + + @property + def num_graphs(self) -> int: + """Returns the number of graphs in the batch.""" + if self.__num_graphs__ is not None: + return self.__num_graphs__ + return self.ptr.numel() + 1 diff --git a/DuelingNet.py b/DuelingNet.py new file mode 100755 index 0000000..66dd6ae --- /dev/null +++ b/DuelingNet.py @@ -0,0 +1,347 @@ +import numpy as np +import torch as T +import torch +import os +import torch.optim as optim +from torch.nn import ReLU, GRU, Sequential, Linear +import torch.nn.functional as F +import torch.nn as nn +from torch import sigmoid, softmax, relu, tanh +import matplotlib.pyplot as plt +from torch.autograd import Variable +from collections import namedtuple, deque +from torch_geometric.data import DataLoader +from torch_geometric.utils import normalized_cut +from torch_geometric.nn import (NNConv, GATConv, graclus, max_pool, max_pool_x, + global_mean_pool, BatchNorm, InstanceNorm, GraphConv, + GCNConv, TAGConv, SGConv, LEConv, TransformerConv, SplineConv, + GMMConv, GatedGraphConv, ARMAConv, GENConv, DeepGCNLayer, + LayerNorm) +import random +import scipy as sp +import time +import Batch_Graph as bg +from itertools import islice +import time + +from torch.nn import Sequential as Seq, Linear as Lin, ReLU +from torch_scatter import scatter_mean +#from torch_geometric.nn import MetaLayer + +''' +class EdgeModel(torch.nn.Module): + def __init__(self,dim_in, dim, dim_out): + super(EdgeModel, self).__init__() + self.edge_mlp = Seq(Lin(dim_in, dim), ReLU(), Lin(dim, dim_out)) + + def forward(self, src, dest, edge_attr, u, batch): + # source, target: [E, F_x], where E is the number of edges. + # edge_attr: [E, F_e] + # u: [B, F_u], where B is the number of graphs. + # batch: [E] with max entry B - 1. + out = torch.cat([src, dest, edge_attr], 1) + return self.edge_mlp(out) + +class NodeModel(torch.nn.Module): + def __init__(self,dim_in1,dim_in2, dim, dim_out): + super(NodeModel, self).__init__() + self.node_mlp_1 = Seq(Lin(dim_in1, dim), ReLU(), Lin(dim, dim)) + self.node_mlp_2 = Seq(Lin(dim_in2, dim), ReLU(), Lin(dim, dim_out)) + + def forward(self, x, edge_index, edge_attr, u, batch): + # x: [N, F_x], where N is the number of nodes. + # edge_index: [2, E] with max entry N - 1. + # edge_attr: [E, F_e] + # u: [B, F_u] + # batch: [N] with max entry B - 1. + row, col = edge_index + out = torch.cat([x[row], edge_attr], dim=1) + out = self.node_mlp_1(out) + out = scatter_mean(out, col, dim=0, dim_size=x.size(0)) + out = torch.cat([x, out], dim=1) + return self.node_mlp_2(out) + +class GlobalModel(torch.nn.Module): + def __init__(self, dim): + super(GlobalModel, self).__init__() + self.global_mlp = Seq(Lin(dim, dim), ReLU(), Lin(dim, dim)) + + def forward(self, x, edge_index, edge_attr, u, batch): + # x: [N, F_x], where N is the number of nodes. + # edge_index: [2, E] with max entry N - 1. + # edge_attr: [E, F_e] + # u: [B, F_u] + # batch: [N] with max entry B - 1. + out = torch.cat([u, scatter_mean(x, batch, dim=0)], dim=1) + return self.global_mlp(out) + + + +class Net(torch.nn.Module): + + def __init__(self, dim, K, lr, num_nodes): + + super(Net, self).__init__() + + + + self.m1 = MetaLayer(EdgeModel(5, dim, dim), NodeModel(dim+2,dim+2, dim, dim))#, GlobalModel(dim)) + self.m2 = MetaLayer(EdgeModel(3*dim, dim, dim), NodeModel(2*dim,2*dim, dim, dim)) + # self.m3 = MetaLayer(EdgeModel(3*dim, dim, dim), NodeModel(2*dim,2*dim, dim, dim)) + # self.m4 = MetaLayer(EdgeModel(3*dim, dim, dim), NodeModel(2*dim,2*dim, dim, dim)) + # self.m5 = MetaLayer(EdgeModel(3*dim, dim, dim), NodeModel(2*dim,2*dim, dim, dim)) + # self.m6 = MetaLayer(EdgeModel(3*dim, dim, dim), NodeModel(2*dim,2*dim, dim, dim))#, GlobalModel(dim)) + # self.m7 = MetaLayer(EdgeModel(3*dim, dim, dim), NodeModel(2*dim,2*dim, dim, dim)) + # self.m8 = MetaLayer(EdgeModel(3*dim, dim, dim), NodeModel(2*dim,2*dim, dim, dim)) + # self.m9 = MetaLayer(EdgeModel(3*dim, dim, dim), NodeModel(2*dim,2*dim, dim, dim)) + self.m10 = MetaLayer(EdgeModel(3*dim, dim, 1), NodeModel(1+dim,2*dim, dim, 1)) + + + self.optimizer = optim.Adam(self.parameters(), lr = lr) + self.loss = nn.MSELoss() + self.device = T.device('cpu') + self.to(self.device) + + def forward(self, D): + + x = D.x + edge_index = D.edge_index + edge_attr = D.edge_attr + + x, edge_attr,_ = self.m1(x, edge_index, edge_attr) + x = relu(x) + edge_attr = relu(edge_attr) + + x, edge_attr,_ = self.m2(x, edge_index, edge_attr) + x = F.elu(x) + edge_attr = F.elu(edge_attr) + # x, edge_attr,_ = self.m3(x, edge_index, edge_attr) + # x = relu(x) + # edge_attr = relu(edge_attr) + # x, edge_attr,_ = self.m4(x, edge_index, edge_attr) + # x = F.elu(x) + # edge_attr = F.elu(edge_attr) + # x, edge_attr,_ = self.m5(x, edge_index, edge_attr) + # x = relu(x) + # edge_attr = relu(edge_attr) + # x, edge_attr,_ = self.m6(x, edge_index, edge_attr) + # x = F.elu(x) + # edge_attr = F.elu(edge_attr) + # x, edge_attr,_ = self.m7(x, edge_index, edge_attr) + # x = relu(x) + # edge_attr = relu(edge_attr) + # x, edge_attr,_ = self.m8(x, edge_index, edge_attr) + # x = F.elu(x) + # edge_attr = F.elu(edge_attr) + # x, edge_attr,_ = self.m9(x, edge_index, edge_attr) + # x = relu(x) + # edge_attr = relu(edge_attr) + x, edge_attr,_ = self.m10(x, edge_index, edge_attr) + + return x + +from typing import Optional, Tuple + +import torch +from torch import Tensor + + +class MetaLayer(torch.nn.Module): + r"""A meta layer for building any kind of graph network, inspired by the + `"Relational Inductive Biases, Deep Learning, and Graph Networks" + `_ paper. + + A graph network takes a graph as input and returns an updated graph as + output (with same connectivity). + The input graph has node features :obj:`x`, edge features :obj:`edge_attr` + as well as global-level features :obj:`u`. + The output graph has the same structure, but updated features. + + Edge features, node features as well as global features are updated by + calling the modules :obj:`edge_model`, :obj:`node_model` and + :obj:`global_model`, respectively. + + To allow for batch-wise graph processing, all callable functions take an + additional argument :obj:`batch`, which determines the assignment of + edges or nodes to their specific graphs. + + Args: + edge_model (Module, optional): A callable which updates a graph's edge + features based on its source and target node features, its current + edge features and its global features. (default: :obj:`None`) + node_model (Module, optional): A callable which updates a graph's node + features based on its current node features, its graph + connectivity, its edge features and its global features. + (default: :obj:`None`) + global_model (Module, optional): A callable which updates a graph's + global features based on its node features, its graph connectivity, + its edge features and its current global features. + + .. code-block:: python + + from torch.nn import Sequential as Seq, Linear as Lin, ReLU + from torch_scatter import scatter_mean + from torch_geometric.nn import MetaLayer + + class EdgeModel(torch.nn.Module): + def __init__(self): + super(EdgeModel, self).__init__() + self.edge_mlp = Seq(Lin(..., ...), ReLU(), Lin(..., ...)) + + def forward(self, src, dest, edge_attr, u, batch): + # source, target: [E, F_x], where E is the number of edges. + # edge_attr: [E, F_e] + # u: [B, F_u], where B is the number of graphs. + # batch: [E] with max entry B - 1. + out = torch.cat([src, dest, edge_attr, u[batch]], 1) + return self.edge_mlp(out) + + class NodeModel(torch.nn.Module): + def __init__(self): + super(NodeModel, self).__init__() + self.node_mlp_1 = Seq(Lin(..., ...), ReLU(), Lin(..., ...)) + self.node_mlp_2 = Seq(Lin(..., ...), ReLU(), Lin(..., ...)) + + def forward(self, x, edge_index, edge_attr, u, batch): + # x: [N, F_x], where N is the number of nodes. + # edge_index: [2, E] with max entry N - 1. + # edge_attr: [E, F_e] + # u: [B, F_u] + # batch: [N] with max entry B - 1. + row, col = edge_index + out = torch.cat([x[row], edge_attr], dim=1) + out = self.node_mlp_1(out) + out = scatter_mean(out, col, dim=0, dim_size=x.size(0)) + out = torch.cat([x, out, u[batch]], dim=1) + return self.node_mlp_2(out) + + class GlobalModel(torch.nn.Module): + def __init__(self): + super(GlobalModel, self).__init__() + self.global_mlp = Seq(Lin(..., ...), ReLU(), Lin(..., ...)) + + def forward(self, x, edge_index, edge_attr, u, batch): + # x: [N, F_x], where N is the number of nodes. + # edge_index: [2, E] with max entry N - 1. + # edge_attr: [E, F_e] + # u: [B, F_u] + # batch: [N] with max entry B - 1. + out = torch.cat([u, scatter_mean(x, batch, dim=0)], dim=1) + return self.global_mlp(out) + + op = MetaLayer(EdgeModel(), NodeModel(), GlobalModel()) + x, edge_attr, u = op(x, edge_index, edge_attr, u, batch) + """ + def __init__(self, edge_model=None, node_model=None, global_model=None): + super(MetaLayer, self).__init__() + self.edge_model = edge_model + self.node_model = node_model + self.global_model = global_model + + + self.reset_parameters() + + def reset_parameters(self): + for item in [self.node_model, self.edge_model, self.global_model]: + if hasattr(item, 'reset_parameters'): + item.reset_parameters() + + + def forward( + self, x, edge_index, edge_attr, u: Optional[Tensor] = None, + batch: Optional[Tensor] = None) -> Tuple[Tensor, Tensor, Tensor]: + """""" + # x = D.x + # edge_index = D.edge_index + # edge_attr = D.edge_attr + + # print (x.shape) + # print (edge_attr.shape) + # print (edge_index.shape) + row = edge_index[0] + col = edge_index[1] + + if self.edge_model is not None: + edge_attr = self.edge_model(x[row], x[col], edge_attr, u, + batch if batch is None else batch[row]) + + if self.node_model is not None: + x = self.node_model(x, edge_index, edge_attr, u, batch) + + if self.global_model is not None: + u = self.global_model(x, edge_index, edge_attr, u, batch) + + return x, edge_attr, u + + + def __repr__(self): + return ('{}(\n' + ' edge_model={},\n' + ' node_model={},\n' + ' global_model={}\n' + ')').format(self.__class__.__name__, self.edge_model, + self.node_model, self.global_model) + + +# x, edge_attr, u = op(x, edge_index, edge_attr, u, batch) + +''' + +class Net(T.nn.Module): + + def __init__(self, dim, K, lr):#, name,chkpt_dir): + + super(Net, self).__init__() + + + self.conv1 = TAGConv(2, dim, K = K) + self.conv2 = TAGConv(dim, dim, K = K) + self.conv_A = TAGConv(dim, 1, K = K) + self.conv_V = TAGConv(dim, 1, K=K) + + self.optimizer = optim.Adam(self.parameters(), lr = lr) + self.loss = nn.MSELoss() + self.device = T.device('cpu') + self.to(self.device) + + + def forward(self, D, b_states = None): + + + x, edge_index, edge_attr = D.x, D.edge_index, D.edge_attr + # x = self.lin0(x) #comment for TAG conv + # edge_attr = self.linattr(edge_attr) #comment for TAG conv + + edge_attr = edge_attr.flatten() #uncomment for TAG conv + data = relu(self.conv1(x, edge_index, edge_attr)) + data = relu(self.conv2(data, edge_index, edge_attr)) + #edge_attr = self.linattr(D.edge_attr) + # data_A = 0.5*tanh(self.conv_A(data, edge_index, edge_attr))-0.5 + data_A = self.conv_A(data, edge_index, edge_attr) + #data_A = self.agg_A(data_A) #comment for TAG conv + + data_V = self.conv_V(data, edge_index, edge_attr) + + #data_V = self.agg_V(data_V) #comment for TAG conv + + if b_states != None: + list_idxs = [torch.nonzero(b_states.batch == i).flatten().tolist() for i in range(len(b_states.ptr)-1)] + + V_s = torch.ones((b_states.x.shape[0],1)) + max_A_s = torch.ones((b_states.x.shape[0],1)) + + for i in range(len(b_states.ptr)-1): + + + V_s[list_idxs[i]] = data_V[list_idxs[i]].mean() + max_A_s[list_idxs[i]] = data_A[list_idxs[i]].max() + + data = V_s + data_A - max_A_s + + else: + data_V = data_V.mean() + data = data_V+data_A - data_A.max() + + return data, data_A + + diff --git a/Lloyd_Unstructured.py b/Lloyd_Unstructured.py new file mode 100755 index 0000000..fd054aa --- /dev/null +++ b/Lloyd_Unstructured.py @@ -0,0 +1,263 @@ +import numpy as np +import torch as T +import matplotlib.pyplot as plt +from torch.autograd import Variable +from torch_geometric.data import DataLoader +from torch_geometric.utils import normalized_cut +from torch_geometric.nn import (NNConv, graclus, max_pool, max_pool_x, global_mean_pool, + BatchNorm, InstanceNorm) +import random +import scipy as sp +from pyamg.gallery.diffusion import diffusion_stencil_2d +from pyamg.gallery import stencil_grid +from torch_geometric.data import Data +from pyamg.aggregation import lloyd_aggregation +from pyamg.gallery import poisson +from scipy.sparse import coo_matrix +import time +from torch_geometric.data import Data, DataLoader +from MG_Agent import Agent +from scipy.spatial import ConvexHull, convex_hull_plot_2d +import pygmsh +from Unstructured import MyMesh, grid, rand_Amesh_gen, rand_grid_gen, plot_cycle +import fem +from torch.utils.tensorboard import SummaryWriter +import sys +from Scott_greedy import greedy_coarsening +from scipy.spatial import Delaunay +import copy +from Cycle import make_coarse_grid + +def Coloring(graph, regions, list_neighbours): + + all_colors = [0.0,1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0] + colors = [[] for i in range(graph.x.shape[0])] + reg_color = [[] for i in range(len(regions))] + for i in range(len(regions)): + + reg = regions[i] + forbid = [] + + + for node in reg: + for neigh in list_neighbours[node]: + if len(colors[neigh]) != 0: + forbid.append(colors[neigh][0]) + forbid = list(set(forbid)) + #print (reg_list4nodes[neigh]) + + #print (forbid) + available = list(set(all_colors) - set(forbid)) + + for node in reg: + + colors[node].append(min(available)) + + reg_color[i].append(min(available)) + + return colors, reg_color + +def hop_neigh(K, region, list_neighbours): + + set_all = set([]) + + set_all = set_all.union(set(region)) + prev_set = copy.deepcopy(set_all) + this_hop = region + + for i in range(K): + + for node in this_hop: + + set_all = set_all.union(set(list_neighbours[node])) + + this_hop = list(set_all.difference(prev_set)) + prev_set = copy.deepcopy(set_all) + + return list(set_all) + +def Lloyd(given_grid, Test_greedy = True): + if given_grid == None: + grid_ = rand_grid_gen(None) + else: + grid_ = copy.deepcopy(given_grid) + grid_gr = copy.deepcopy(grid_) + AA = grid_.A + AA = sp.sparse.csr_matrix(AA) + num_nodes = grid_.num_nodes + num_C = int(num_nodes/30) + Ratio = 0.05#num_C/(num_nodes) + K = 7 + + Agg = lloyd_aggregation(AA,ratio=Ratio,maxiter=1000)[0] + + AA = sp.sparse.csr_matrix.toarray(Agg) + AA = T.from_numpy(abs(AA)) + num_C = AA.shape[1] + + + regions = [] + hop_regs = [] + for i in range(num_C): + + regions.append(T.nonzero(AA[:,i]).flatten().tolist()) + + + + list_neighbours = [[] for i in range(grid_.x.shape[0])] + + for i in range(grid_.edge_index.shape[1]): + + list_neighbours[grid_.edge_index[0,i].clone().tolist()].append(grid_.edge_index[1\ + ,i].clone().tolist()) + + + for i in range(len(regions)): + + hop_regs.append(hop_neigh(K, regions[i], list_neighbours)) + + + #colors, reg_color = Coloring(grid_, regions, list_neighbours) + mymsh = grid_.mesh + points = mymsh.V + #tri = Delaunay(points) + #plt.triplot(points[:,0], points[:,1], tri.simplices) + + color_code = {0.0:'b.', 1.0:'g.', 2.0:'r.', 3.0:'k.', 4.0: 'y.', 5.0:'c.', 6.0:'m.'} + + #for i in range(len(regions)): + + #plt.plot(points[regions[i],0], points[regions[i],1], color_code.get(reg_color[i][0])) + + + agent = Agent(dim = 32, K = 12, gamma = 1, epsilon = 1, \ + lr= 0.001, mem_size = 5000, batch_size = 64, net_type = 'TAGConv', \ + eps_min = 0.01 , eps_dec = 1.333/5000, replace=10) + + + + agent.q_eval.load_state_dict(T.load('Models/Dueling_batch_train_final.pth')) + agent.epsilon = 0 + ''' + num_Qhull_nodes = random.randint(15, 45) + points = np.random.rand(num_Qhull_nodes, 2) # 30 random points in 2-D + hull = ConvexHull(points) + + msh_sz = 0.1*random.random()+0.2 + + with pygmsh.geo.Geometry() as geom: + geom.add_polygon( + + hull.points[hull.vertices.tolist()].tolist() + , + mesh_size=msh_sz, + ) + mesh = geom.generate_mesh() + ''' + #mesh = T.load("mesh.pth") + done = False + ''' + grid_ = rand_grid_gen(mesh) + grid_gr = rand_grid_gen(mesh) + ''' + scores = np.zeros(len(regions)) + + for i in range(len(regions)): + + scores[i] = len(regions[i]) + t1 = time.time() + + while not done: + + max_idx = np.argmax(scores) + hreg = hop_regs[max_idx] + reg = regions[max_idx] + observation = grid_.subgrid(hreg) + is_viol = grid_.viol_nodes()[1] + hops = list(set(hreg).difference(set(reg))) + is_viol[hops] = 0 + sub_viols = T.nonzero(is_viol[hreg]).flatten().tolist() + if sub_viols == []: + scores[max_idx] = -1 + + else: + act = agent.choose_action(observation, sub_viols) + action = hreg[act] + # print ("ACTION", action) + # print ("VIOLS", grid_.viol_nodes()[0]) + # print (agent.q_eval.forward(grid_.data)) + grid_.coarsen_node(action) + + new_is_viol = grid_.viol_nodes()[1] + scores[max_idx] = T.sum(new_is_viol[reg]).item() + done = True if grid_.viol_nodes()[2] == 0 else False + + print ("RL result", sum(grid_.active)/grid_.num_nodes) + #grid_.plot() + t2 = time.time() + print ("#nodes",grid_.num_nodes, "TIME", t2-t1) + + + if Test_greedy: + + grid_gr = greedy_coarsening(grid_gr) + + return grid_, grid_gr + + + +def Multilevel_MG (given_grid, num_cycle, Plot=False, Test_greedy=False): + + rl_list = [] + rl_f_frac = [] + gr_list = [] + gr_f_frac = [] + + if given_grid == None: + given_grid = rand_grid_gen(None) + + crl = copy.deepcopy(given_grid) + + for i in range(num_cycle): + rl,_ = Lloyd(crl) + + rl_list.append(copy.deepcopy(rl)) + rl_f_frac.append(sum(rl_list[i].active)/rl_list[i].num_nodes) + crl = make_coarse_grid(rl) + + crl = copy.deepcopy(given_grid) + + if Test_greedy: + + for i in range(num_cycle): + + _,gr = Lloyd(crl) + gr_list.append(copy.deepcopy(gr)) + gr_f_frac.append(sum(gr_list[i].active)/gr_list[i].num_nodes) + crl = make_coarse_grid(gr) + + + if Plot: + + plot_cycle(rl_list) + title = 'RL '+ str(num_cycle-1)+' level coarsening, black edges = before coarsening,\n green edges = after cycle 1, (if 2 level coarsening, magenta edges = \n after cycle 2). F-fractions of cycles = ' + for i in range(num_cycle): + title += str(np.round(rl_f_frac[i],4)) + ', ' + + plt.title(title) + plt.show() + + if Test_greedy: + plot_cycle(gr_list) + title = 'Greedy '+ str(num_cycle-1)+' level coarsening, black edges = before coarsening,\n green edges = after cycle 1, (if 2 level coarsening, magenta edges = \n after cycle 2). F-fractions of cycles = ' + for i in range(num_cycle): + title += str(np.round(gr_f_frac[i],4)) + ', ' + + plt.title(title) + plt.show() + + return rl_list, gr_list + + + +#rl_list, gr_list = Multilevel_MG (None, 2, Plot=True,Test_greedy=True) diff --git a/MG_Agent.py b/MG_Agent.py new file mode 100644 index 0000000..5289cca --- /dev/null +++ b/MG_Agent.py @@ -0,0 +1,324 @@ +import numpy as np +import torch as T +import os +import torch.optim as optim +from torch.nn import ReLU, GRU, Sequential, Linear +import torch.nn.functional as F +import torch.nn as nn +from torch import sigmoid, softmax, relu, tanh +import matplotlib.pyplot as plt +from torch.autograd import Variable +from collections import namedtuple, deque +from torch_geometric.data import DataLoader +from torch_geometric.utils import normalized_cut +from torch_geometric.nn import (NNConv, GATConv, graclus, max_pool, max_pool_x, + global_mean_pool, BatchNorm, InstanceNorm, GraphConv, + GCNConv, TAGConv, SGConv, LEConv, TransformerConv, SplineConv, + GMMConv, GatedGraphConv, ARMAConv, GENConv, DeepGCNLayer, + LayerNorm) +import random +import scipy as sp +import time +import Batch_Graph as bg +from itertools import islice +import time +#from NeuralNet import Net +from DuelingNet import Net as Net_TAGConv +from DuelingNet2 import Net as Net_MPNN +from FCNet import Net as Net_FC + + +class ReplayBuffer(): + + def __init__(self, max_size): + + self.mem_size = max_size + self.replay_buffer = deque(maxlen=max_size) + + def store(self, state, list_viol, num_viol, action, reward, next_state\ + , next_list_viol, next_num_viol, node, mask): + + experience = namedtuple('Experience', ['state', 'list_viol', 'num_viol',\ + 'action', 'reward','next_state', 'next_list_viol', 'next_num_viol',\ + 'node','mask']) + + e = experience(state, list_viol, num_viol, action, reward, next_state\ + , next_list_viol, next_num_viol, node, mask) + + self.replay_buffer.append(e) + + def sample(self, batch_size): + + experiences = random.sample(self.replay_buffer, k=batch_size) + + states = [e.state for e in experiences if e is not None] + list_viols = [e.list_viol for e in experiences if e is not None] + num_viols = [e.num_viol for e in experiences if e is not None] + actions = [e.action for e in experiences if e is not None] + rewards = [e.reward for e in experiences if e is not None] + next_states = [e.next_state for e in experiences if e is not None] + next_list_viols = [e.next_list_viol for e in experiences if e is not None] + next_num_viols = [e.next_num_viol for e in experiences if e is not None] + nodes = [e.node for e in experiences if e is not None] + masks = [e.mask for e in experiences if e is not None] + + return (states, list_viols, num_viols, actions, rewards, \ + next_states, next_list_viols, next_num_viols, nodes, masks) + + +class Agent(): + + def __init__(self, dim, K, gamma, epsilon, lr, mem_size, batch_size, net_type, eps_min=0.01, + eps_dec=1e-4, replace=10): #, chkpt_dir='tmp/unstructured_gnn'): + + #self.num_nodes = num_nodes + self.gamma = gamma + self.epsilon = epsilon + self.lr = lr + self.dim = dim + self.loss = T.tensor([0]) + self.K = K + self.batch_size = batch_size + self.eps_min = eps_min + self.eps_dec = eps_dec + self.replace_targe_cnt = replace + #self.chkpt_dir = chkpt_dir + self.memory = ReplayBuffer(mem_size) + self.learn_step_counter = 0 + self.net_type = net_type + + if self.net_type == 'FC': + self.q_eval = Net_FC(self.dim, self.K, self.lr)#, self.num_nodes) #, name='unstructured_q_eval', + #chkpt_dir=self.chkpt_dir) + + self.q_targ = Net_FC(self.dim, self.K, self.lr)#, self.num_nodes) #, name='unstructured_q_targ', + #chkpt_dir=self.chkpt_dir) + + if self.net_type == 'MPNN': + self.q_eval = Net_MPNN(self.dim, self.K, self.lr)#, self.num_nodes) #, name='unstructured_q_eval', + #chkpt_dir=self.chkpt_dir) + + self.q_targ = Net_MPNN(self.dim, self.K, self.lr)#, self.num_nodes) #, name='unstructured_q_targ', + #chkpt_dir=self.chkpt_dir) + + if self.net_type == 'TAGConv': + self.q_eval = Net_TAGConv(self.dim, self.K, self.lr)#, self.num_nodes) #, name='unstructured_q_eval', + #chkpt_dir=self.chkpt_dir) + + self.q_targ = Net_TAGConv(self.dim, self.K, self.lr)#, self.num_nodes) #, name='unstructured_q_targ', + #chkpt_dir=self.chkpt_dir) + + + def choose_action(self, state, viol_nodes): + + + + if np.random.random()> self.epsilon: + + with T.no_grad(): + + advantage = self.q_eval.forward(state)[0] + action = viol_nodes[T.argmax(advantage[viol_nodes]).item()] + + else: + + action = np.random.choice(viol_nodes) + + return action + + def store_transition(self, state, list_viols, num_viol, \ + action, reward, next_state, next_list_viol, next_num_viol, node, mask): + + self.memory.store(state, list_viols, num_viol, \ + action, reward, next_state, next_list_viol, next_num_viol, node, mask) + + def replace_target_network(self): + + if self.learn_step_counter % self.replace_targe_cnt == 0: + self.q_targ.load_state_dict(self.q_eval.state_dict()) + + + def decrement_epsilon(self): + + self.epsilon = self.epsilon - self.eps_dec\ + if self.epsilon>self.eps_min else self.eps_min + + def save_models(self): + self.q_eval.save_checkpoint() + self.q_targ.save_checkpoint() + + def load_models(self): + self.q_eval.load_checkpoint() + self.q_targ.load_checkpoint() + + def learn(self): + + if len(self.memory.replay_buffer) < self.batch_size: + return + + # t1 = time.time() + self.q_eval.optimizer.zero_grad() + + + self.replace_target_network() + + states, list_viols, num_viols, actions, rewards, next_states,\ + next_list_viols, next_num_viols, nodes, masks = self.memory.sample(self.batch_size) + + loss = 0 + ''' + for i in range(self.batch_size): + + + Q_prime = self.q_targ.forward(next_states[i])[0].detach() + + Qmodd = self.q_eval.forward(next_states[i])[0].detach()#.squeeze(0) + + if len(next_list_viols[i]) != 0: + + argmax = np.array(next_list_viols[i][Qmodd[next_list_viols[i]].argmax(0)]) + + else: + + argmax = 0 + + argmax = T.tensor(argmax).unsqueeze(0) + + # print ("argmax", argmax) + Qprime = Q_prime.gather(0,T.tensor(argmax).unsqueeze(1).long()) + Qprime = Qprime.flatten() + + y = T.tensor(rewards[i]) + self.gamma*Qprime*T.tensor(masks[i]) + + Q1 = self.q_eval.forward(states[i])[0]#.squeeze(0) + Q = Q1.gather(0, T.tensor(actions[i]).unsqueeze(0).unsqueeze(0).long()) + Q = Q.squeeze(1) + + + loss += self.q_eval.loss(Q,y) + + # if len(next_list_viols[i]) == 0: + # print (masks[i]) + # print (loss) + # print (Q) + # print (y) + # print ("*********") + + + self.loss = loss + loss.backward() + self.q_eval.optimizer.step() + + + self.q_eval.optimizer.zero_grad() + + #self.decrement_epsilon() + + + + # if loss>5: + # print (list_viols[i]) + # print (next_list_viols[i]) + # print (actions[i]) + # print ("*********") + + + + ''' + b_states = bg.Batch.from_data_list(states) + b_next_states = bg.Batch.from_data_list(next_states) + + # t2 = time.time() + + Q_prime = self.q_targ.forward(b_next_states, b_next_states)[0].detach() + + # t3 = time.time() + + Qmodd = self.q_eval.forward(b_next_states, b_next_states)[0].detach()#.squeeze(0) + + # t4 = time.time() + + Qmodd = Qmodd.flatten().tolist() + + Inputt = iter(Qmodd) + splited_Qmodd = [list(islice(Inputt, elem)) for elem in nodes] + + Qprime = T.zeros(self.batch_size) + Q = T.zeros(self.batch_size) + argmax = [] + + # t5 = time.time() + + Q1 = self.q_eval.forward(b_states, b_states)[0]#.squeeze(0) + + # t6 = time.time() + idx_in_batch = 0 + for i in range(self.batch_size): + + if i>0: + idx_in_batch += nodes[i-1] + actions = (np.array(actions) + nodes[i-1]).tolist() + + if (np.array(splited_Qmodd[i])[next_list_viols[i]]).tolist() != []: + + argmax = np.array(next_list_viols[i])[np.argmax(\ + np.array(splited_Qmodd[i])[next_list_viols[i]])] + + argmax = argmax+idx_in_batch + + Qprime[i] = Q_prime.gather(0, T.tensor(argmax).unsqueeze(0).unsqueeze(0).long()) + + else: + + argmax = 0 + + Q[i] = Q1.gather(0, T.tensor(actions[i]).unsqueeze(0).unsqueeze(0).long()) + #print (Q_prime.shape) + + + Qprime.flatten() + + + y = T.tensor(rewards) + self.gamma*Qprime*T.tensor(masks) + + + loss = self.q_eval.loss(Q,y) + # if loss>100: + + # print (Q) + # print (y) + + # kiri = T.argmin(Q) + # print(states[kiri].x) + # print(states[kiri].edge_attr) + + self.loss = loss + + # t7 = time.time() + + loss.backward() + + # t8 = time.time() + + self.q_eval.optimizer.step() + + # t9 = time.time() + + self.learn_step_counter += 1 + + + + # print ("T21", t2-t1) + # print ("T32", t3-t2) + # print ("T43", t4-t3) + # print ("T54", t5-t4) + # print ("T65", t6-t5) + # print ("T76", t7-t6) + # print ("T87", t8-t7) + # print ("T98", t9-t8) + + + + + + \ No newline at end of file diff --git a/Optim.py b/Optim.py new file mode 100755 index 0000000..7ad746e --- /dev/null +++ b/Optim.py @@ -0,0 +1,738 @@ +from Unstructured import MyMesh, rand_Amesh_gen, rand_grid_gen, grid +from pyamg.gallery.diffusion import diffusion_stencil_2d +from pyamg.gallery import stencil_grid +from numpy import sin, cos, pi +import matplotlib.pyplot as plt +from scipy.spatial import Delaunay +import scipy +import fem +import networkx as nx +import numpy as np +import scipy as sp +import pygmsh +import time +from scipy.spatial import ConvexHull, convex_hull_plot_2d +import random +import torch as T +from torch_geometric.data import Data +import Batch_Graph as bg +import copy +import networkx as nx +from networkx.drawing.nx_pylab import draw_networkx +from pyamg.gallery.diffusion import diffusion_stencil_2d +from pyamg.gallery import stencil_grid +import torch_geometric +from torch_geometric.data import Data +from pyamg.gallery import poisson +import matplotlib as mpl +import os +from MG_Agent import Agent +from scipy.sparse import csr_matrix, coo_matrix, isspmatrix_csr, isspmatrix_csc +from pyamg import amg_core +from pyamg.graph import lloyd_cluster +from Scott_greedy import greedy_coarsening + +import sys + +# list(list(G.edges(data=True))[1][-1].values()) + + +def from_scipy_sparse_matrix(A): + r"""Converts a scipy sparse matrix to edge indices and edge attributes. + + Args: + A (scipy.sparse): A sparse matrix. + """ + A = A.tocoo() + row = T.from_numpy(A.row).to(T.long) + col = T.from_numpy(A.col).to(T.long) + edge_index = T.stack([row, col], dim=0) + edge_weight = T.from_numpy(A.data) + return edge_index, edge_weight + + +def from_networkx(G): + r"""Converts a :obj:`networkx.Graph` or :obj:`networkx.DiGraph` to a + :class:`torch_geometric.data.Data` instance. + + Args: + G (networkx.Graph or networkx.DiGraph): A networkx graph. + """ + + G = nx.convert_node_labels_to_integers(G) + G = G.to_directed() if not nx.is_directed(G) else G + edge_index = T.LongTensor(list(G.edges)).t().contiguous() + + data = {} + + for i, (_, feat_dict) in enumerate(G.nodes(data=True)): + for key, value in feat_dict.items(): + data[str(key)] = [value] if i == 0 else data[str(key)] + [value] + + for i, (_, _, feat_dict) in enumerate(G.edges(data=True)): + for key, value in feat_dict.items(): + data[str(key)] = [value] if i == 0 else data[str(key)] + [value] + + for key, item in data.items(): + try: + data[key] = T.tensor(item) + except ValueError: + pass + + data['edge_index'] = edge_index.view(2, -1) + data = torch_geometric.data.Data.from_dict(data) + data.num_nodes = G.number_of_nodes() + + return data + +''' + +class grid: + + def __init__(self, A, fine_nodes, coarse_nodes, mesh, Theta): + + self.A = A.tocsr() + self.fine_nodes = fine_nodes + self.coarse_nodes = coarse_nodes + self.num_nodes = mesh.nv + #self.edges = set_edge + self.mesh = mesh + active = np.ones(self.num_nodes) + active[self.coarse_nodes] = 0 + self.active = active + self.Theta = Theta + + self.G = nx.from_scipy_sparse_matrix(self.A, edge_attribute='weight', parallel_edges=False) + + + self.x = T.cat((T.from_numpy(self.active).unsqueeze(1), \ + T.from_numpy(self.active).unsqueeze(1)),dim=1).float() + + + edge_index, edge_attr = from_scipy_sparse_matrix(abs(self.A)) + + list_neighbours1 = [] + list_neighbours2 = [] + for node in range(self.num_nodes): + a = list(self.G.edges(node,data = True)) + l1 = [] + l2 = [] + for i in range(len(a)): + l1.append(a[i][1]) + l2.append(abs(np.array(list(a[i][-1].values())))[0]) + + list_neighbours1.append(l1) + list_neighbours2.append(l2) + + self.list_neighbours = [list_neighbours1, list_neighbours2] + + self.data = Data(x=self.x, edge_index=edge_index, edge_attr= edge_attr.float()) + self.violating_nodes = [i for i in range(self.num_nodes)] #self.viol_nodes()[0] + self.is_violating = np.array([1 for i in range(self.num_nodes)]) #self.viol_nodes()[1] + + + def subgrid(self, node_list): + + sub_x = self.x[node_list] + sub_data = from_networkx(self.G.subgraph(node_list)) + sub_data = Data(x=sub_x, edge_index=sub_data.edge_index, edge_attr= abs(sub_data.weight.float())) + + return sub_data + + + def node_hop_neigh(self, node, K): + + return list(nx.single_source_shortest_path(self.G, node, cutoff=K).keys()) + + + def is_viol(self, node): + + if self.active[node] == 0: + return False + + else: + + neigh_list = self.list_neighbours[0][node]#list(self.G.neighbors(node)) + actives = self.active[neigh_list] + aij = self.list_neighbours[1][node] + # aij = np.array([abs(np.array(list(self.G.get_edge_data(node,neigh).values())[0])) \ + # for neigh in neigh_list]) + aij = aij*actives + aij = aij.sum() + if abs(np.array(list(self.G.get_edge_data(node,node).values())[0]))< self.Theta*aij: + + return True + + else: + + return False + + + def viol_nodes(self): + + violatings = [] + isviol = [] + + for node in range(self.num_nodes): + + if self.active[node]!=0: + + neigh_list = self.list_neighbours[0][node] + + #neigh_list = list(self.G.neighbors(node)) + actives = self.active[neigh_list] + # aij = np.array([abs(np.array(list(self.G.get_edge_data(node,neigh).values())[0])) \ + # for neigh in neigh_list]) + aij = self.list_neighbours[1][node] + aij = aij*actives + aij = aij.sum() + + if abs(np.array(list(self.G.get_edge_data(node,node).values())[0]))< self.Theta*aij: + + isviol.append(1) + violatings.append(node) + + else: + + isviol.append(0) + + else: + + isviol.append(0) + + num_viol = len(violatings) + + return violatings, isviol, num_viol + + + def coarsen_node(self, node_a): + + #tkir1 = time.time() + newly_removed = [] + + #self.fine_nodes.remove(node_a) + self.coarse_nodes.append(node_a) + self.active[node_a] = 0 + #self.violating_nodes.remove(node_a) + self.is_violating[node_a] = 0 + newly_removed.append(node_a) + + for neigh in self.list_neighbours[0][node_a]:#self.G.neighbors(node_a): + if self.is_viol(neigh) == False and self.is_violating[neigh] == 1: + #self.violating_nodes.remove(neigh) + self.is_violating[neigh] = 0 + newly_removed.append(neigh) + + + + self.data.x[node_a, 0] = 0 + self.data.x[newly_removed, 1] = 0 + + return newly_removed + + def uncoarsen(self, node_a): + + self.fine_nodes.append(node_a) + #self.coarse_nodes.remove(node_a) + self.active[node_a] = 1 + #self.violating_nodes.remove(node_a) + #self.is_violating[node_a] = 0 + newly_added = [] + + if self.is_viol(node_a) == True and self.is_violating[node_a] == 0: + self.is_violating[node_a] = 1 + newly_added.append(node_a) + + + for neigh in self.list_neighbours[0][node_a]:#self.G.neighbors(node_a): + if self.is_viol(neigh) == True and self.is_violating[neigh] == 0: + + self.is_violating[neigh] = 1 + newly_added.append(neigh) + + + self.data.x[node_a, 0] = 1 + self.data.x[newly_added, 1] = 1 + + return newly_added + + def plot(self): + + G = nx.Graph() + mymsh = self.mesh + + points = mymsh.N + edges = mymsh.Edges + + pos_dict = {} + for i in range(mymsh.nv): + pos_dict[i] = [mymsh.X[i], mymsh.Y[i]] + + G.add_nodes_from(points) + G.add_edges_from(edges) + colors = [i for i in range(mymsh.nv)] + + for i in self.fine_nodes: + colors[i] = 'b' + for i in self.coarse_nodes: + colors[i] = 'r' + for i in self.viol_nodes()[0]: + colors[i] = 'g' + + draw_networkx(G, pos=pos_dict, with_labels=False, node_size=12, \ + node_color = colors, node_shape = 'o') +''' + + +def structured(n_row, n_col, Theta): + + num_nodes = int(n_row*n_col) + + X = np.array([[i/(n_col*n_row) for i in range(n_col)] for j in range(n_row)]).flatten() + Y = np.array([[j/(n_row*n_col) for i in range(n_col)] for j in range(n_row)]).flatten() + E = [] + V = [] + nv = num_nodes + N = [i for i in range(num_nodes)] + + epsilon = 1 + theta = 1 #param of A matrix + + sten = diffusion_stencil_2d(epsilon=epsilon,theta=theta,type='FD') + AA = stencil_grid(sten, (n_row, n_col), dtype=float, format='csr') + + nz_row = [] + nz_col = [] + t1 = time.time() + + for i in range(n_row): + for j in range(n_col): + + if i!=n_row-1: + if j!=n_col-1: + + nz_row.append(i*n_col+j) + nz_row.append(i*n_col+j) + nz_col.append(i*n_col+j+1) + nz_col.append(i*n_col+j+n_col) + else: + nz_row.append(i*n_col+j) + nz_col.append(i*n_col+j+n_col) + + if i == n_row-1: + if j!=n_col-1: + + nz_row.append(i*n_col+j) + nz_col.append(i*n_col+j+1) + + + nz_row = np.array(nz_row) + nz_col = np.array(nz_col) + + + # print ("t21", t2-t1) + e = np.concatenate((np.expand_dims(nz_row,axis=1), np.expand_dims(nz_col, axis=1)), axis=1) + Edges = list(tuple(map(tuple, e))) + num_edges = len(Edges) + g = rand_grid_gen(None) + + mesh = copy.deepcopy(g.mesh) + + mesh.X = X + mesh.Y = Y + mesh.E = E + mesh.V = V + mesh.nv = nv + mesh.ne = [] + mesh.N = N + mesh.Edges = Edges + mesh.num_edges = num_edges + + fine_nodes = [i for i in range(num_nodes)] + + grid_ = grid(AA,fine_nodes,[], mesh, Theta) + + + # print ("t21", t2-t1) + # print ("t32", t3-t2) + # print ("t43", t4-t3) + return grid_ + +def lloyd_aggregation(C, ratio=0.03, distance='unit', maxiter=10): + """Aggregate nodes using Lloyd Clustering. + + Parameters + ---------- + C : csr_matrix + strength of connection matrix + ratio : scalar + Fraction of the nodes which will be seeds. + distance : ['unit','abs','inv',None] + Distance assigned to each edge of the graph G used in Lloyd clustering + + For each nonzero value C[i,j]: + + ======= =========================== + 'unit' G[i,j] = 1 + 'abs' G[i,j] = abs(C[i,j]) + 'inv' G[i,j] = 1.0/abs(C[i,j]) + 'same' G[i,j] = C[i,j] + 'sub' G[i,j] = C[i,j] - min(C) + ======= =========================== + + maxiter : int + Maximum number of iterations to perform + + Returns + ------- + AggOp : csr_matrix + aggregation operator which determines the sparsity pattern + of the tentative prolongator + seeds : array + array of Cpts, i.e., Cpts[i] = root node of aggregate i + + See Also + -------- + amg_core.standard_aggregation + + Examples + -------- + >>> from scipy.sparse import csr_matrix + >>> from pyamg.gallery import poisson + >>> from pyamg.aggregation.aggregate import lloyd_aggregation + >>> A = poisson((4,), format='csr') # 1D mesh with 4 vertices + >>> A.todense() + matrix([[ 2., -1., 0., 0.], + [-1., 2., -1., 0.], + [ 0., -1., 2., -1.], + [ 0., 0., -1., 2.]]) + >>> lloyd_aggregation(A)[0].todense() # one aggregate + matrix([[1], + [1], + [1], + [1]], dtype=int8) + >>> # more seeding for two aggregates + >>> Agg = lloyd_aggregation(A,ratio=0.5)[0].todense() + + """ + if ratio <= 0 or ratio > 1: + raise ValueError('ratio must be > 0.0 and <= 1.0') + + if not (isspmatrix_csr(C) or isspmatrix_csc(C)): + raise TypeError('expected csr_matrix or csc_matrix') + + if distance == 'unit': + data = np.ones_like(C.data).astype(float) + elif distance == 'abs': + data = abs(C.data) + elif distance == 'inv': + data = 1.0/abs(C.data) + elif distance is 'same': + data = C.data + elif distance is 'min': + data = C.data - C.data.min() + else: + raise ValueError('unrecognized value distance=%s' % distance) + + if C.dtype == complex: + data = np.real(data) + + assert(data.min() >= 0) + + G = C.__class__((data, C.indices, C.indptr), shape=C.shape) + + num_seeds = int(min(max(ratio * G.shape[0], 1), G.shape[0])) + + distances, clusters, seeds = lloyd_cluster(G, num_seeds, maxiter=maxiter) + + row = (clusters >= 0).nonzero()[0] + col = clusters[row] + data = np.ones(len(row), dtype='int8') + AggOp = coo_matrix((data, (row, col)), + shape=(G.shape[0], num_seeds)).tocsr() + + return AggOp, seeds, col + + + + + +# t1 = time.time() +# g = structured(60, 60, 0.56) +# t2 = time.time() +# dat = from_scipy_sparse_matrix(g.A) +# t3 = time.time() +# print (t2-t1) +# print (t3-t2) + + +sz_list = [100*(i+1) for i in range(1)] +K = 4 +agent = Agent(dim = 32, K = K, gamma = 1, epsilon = 1, \ + lr= 0.001, mem_size = 5000, net_type = 'TAGConv', batch_size = 64, \ + eps_min = 0.01 , eps_dec = 1.333/5000, replace=10) + +agent.q_eval.load_state_dict(T.load('Models/MPNN/Dueling_MPNN900.pth')) +#agent.q_eval.load_state_dict(T.load('Models/Dueling_batch_train_final.pth')) +agent.epsilon = 0 +list_size = [] +list_time = [] + + +def Post_processing(num_iter, agent, grid_, K): + + for _ in range(num_iter): + + ffrac = sum(grid_.active)/grid_.num_nodes + copy_grid = copy.deepcopy(grid_) + center = np.random.randint(0,grid_.num_nodes) + + region2 = grid_.node_hop_neigh(center, 2*K) + region = grid_.node_hop_neigh(center, K) + + indices = [] + newly_added = [] + for node in region: + + news = grid_.uncoarsen(node) + newly_added.append(news) + indices.append(region2.index(node)) + + done = False + + while not done: + + data = grid_.subgrid(region2) + Q, advantage = agent.q_eval.forward(data) + + viols_idx = grid_.is_violating[region2].nonzero()[0].tolist() + viols = np.array(region2)[viols_idx].tolist() + + if len(viols_idx) != 0: + + node_max = viols[T.argmax(advantage[viols_idx])] + + newly_ = grid_.coarsen_node(node_max) + + done = True if len(viols_idx) == 0 else False + + if ffrac > sum(grid_.active)/grid_.num_nodes: + + grid_ = copy_grid + + + + grid_.fine_nodes = grid_.active.nonzero()[0].tolist()#list(set(grid_.fine_nodes)-set(maxes)) + grid_.coarse_nodes = np.nonzero(grid_.active == 1)[0].tolist() + grid_.violating_nodes = grid_.is_violating.nonzero()[0].tolist() + + ffrac = sum(grid_.active)/grid_.num_nodes + + return grid_, ffrac + + + +def Linear_Coarsening_Lloyd(g_, agent, Greedy): + + grid_ = copy.deepcopy(g_) + grid_ = grid(grid_.A, grid_.fine_nodes, grid_.coarse_nodes, grid_.mesh, grid_.Theta) + + if not Greedy: + + observation = grid_.data + + with T.no_grad(): + Q, advantage = agent.q_eval.forward(observation) + + adv_tensor = copy.deepcopy(advantage) + + done = False + + _,_,index_agg = lloyd_aggregation(grid_.A,ratio=0.033,maxiter=1000) + + list_agg = [] + num_aggs = index_agg.max()+1 + for i in range(num_aggs): + + list_agg.append(np.nonzero(index_agg==i)[0].tolist()) + + while not done: + + viols = grid_.violating_nodes + + for idx in range(num_aggs): + + aggreg = np.array(list_agg [idx]) + viols = aggreg[grid_.is_violating[aggreg.tolist()].nonzero()[0]].tolist() + + if len(viols) != 0: + node_max = viols[T.argmax(adv_tensor[viols])] + + _ = grid_.coarsen_node(node_max) + + observation = grid_.data + # grid_.active[maxes] = 0 + # grid_.is_violating[newly_removed] = 0 + grid_.fine_nodes = grid_.active.nonzero()[0].tolist()#list(set(grid_.fine_nodes)-set(maxes)) + grid_.violating_nodes = grid_.is_violating.nonzero()[0].tolist() + + Q, adv_tensor = agent.q_eval.forward(observation) + + + done = True if len(grid_.violating_nodes) == 0 else False + + else: + + grid_ = greedy_coarsening(grid_) + + return grid_ + + +# for sz in sz_list: + +# print (sz) + +# #grid_ = structured(sz,sz,0.56) +# grid_ = T.load('Test_Graphs/Auto_generated/graph_28') +# grid_ = grid(grid_.A, grid_.fine_nodes, grid_.coarse_nodes, grid_.mesh, grid_.Theta) +# t1 = time.time() +# observation = grid_.data +# with T.no_grad(): +# Q, advantage = agent.q_eval.forward(observation) + +# adv_tensor = copy.deepcopy(advantage) #get all of the advantage values + +# ##get k-hop neighbours of every violating node +# t12_13 = 0 +# t13_14 = 0 +# t14_15 = 0 +# t15_16 = 0 +# t16_17 = 0 +# t01_02 = 0 +# t_updata_values = 0 +# done = False +# count = 0 + +# t_initial_cal2 = time.time() +# t_agg0 = time.time() +# _,_,index_agg = lloyd_aggregation(grid_.A,ratio=0.03,maxiter=100) + +# list_agg = [] +# num_aggs = index_agg.max()+1 +# for i in range(num_aggs): + +# list_agg.append(np.nonzero(index_agg==i)[0].tolist()) + +# t_agg1 = time.time() +# while not done: + +# count += 1 +# viols = grid_.violating_nodes +# newly_removed = [] +# maxes = [] +# ''' +# while len(viols) !=0: + +# t12 = time.time() +# node_max = viols[T.argmax(adv_tensor[viols])] +# t13 = time.time() +# maxes.append(node_max) + +# newly = grid_.coarsen_node(node_max) +# t14 = time.time() + + +# for ijk in newly: +# newly_removed.append(ijk) + +# t15 = time.time() +# k2_hop = grid_.node_hop_neigh(node_max, 2*K) + +# t16 = time.time() +# viols = list(set(viols)-set(k2_hop)) +# #viols = (grid_.is_violating).nonzero()[0] +# t17 = time.time() + + +# t12_13 += t13-t12 +# t13_14 += t14-t13 +# t14_15 += t15-t14 +# t15_16 += t16-t15 +# t16_17 += t17-t16 +# ''' + +# for idx in range(num_aggs): + +# t12 = time.time() +# aggreg = np.array(list_agg [idx]) +# viols = aggreg[grid_.is_violating[aggreg.tolist()].nonzero()[0]].tolist() + +# if len(viols) != 0: +# node_max = viols[T.argmax(adv_tensor[viols])] + +# t13 = time.time() +# maxes.append(node_max) + +# newly = grid_.coarsen_node(node_max) +# t14 = time.time() + +# ''' +# for ijk in newly: +# newly_removed.append(ijk) +# ''' +# t15 = time.time() + + + +# t12_13 += t13-t12 +# t13_14 += t14-t13 +# t14_15 += t15-t14 + + +# t01 = time.time() +# observation = grid_.data +# # grid_.active[maxes] = 0 +# # grid_.is_violating[newly_removed] = 0 +# grid_.fine_nodes = grid_.active.nonzero()[0].tolist()#list(set(grid_.fine_nodes)-set(maxes)) +# grid_.violating_nodes = grid_.is_violating.nonzero()[0].tolist() +# t02 = time.time() +# #list(set(grid_.violating_nodes)-set(newly_removed)) +# t01_02 += t02-t01 + +# t_updata_values0 = time.time() +# Q, adv_tensor = agent.q_eval.forward(observation) +# t_updata_values1 = time.time() +# t_updata_values += t_updata_values1-t_updata_values0 + +# # print ("ACTION", action) +# # print ("VIOLS", grid_.viol_nodes()[0]) +# # print (agent.q_eval.forward(grid_.data)) + + +# done = True if len(grid_.violating_nodes) == 0 else False + +# t2 = time.time() + +# print ("F-fraction = ", sum(grid_.active)/grid_.num_nodes) +# list_size.append(sz**2) +# list_time.append(t2-t1) + +# print (t2-t1) +# print ("count = ", count) +# print ("time per size = ", (t2-t1)/sz**2) +# print("t12_13 per size = ", (t12_13)/sz**2) +# print("t13_14 per size = ", (t13_14)/sz**2) +# print("t14_15 per size = ", (t14_15)/sz**2) +# print("t15_16 per size = ", (t15_16)/sz**2) +# print("t16_17 per size = ", (t16_17)/sz**2) +# print("t01_02 per size = ", (t01_02)/sz**2) +# print ("init_cal = ", (t_initial_cal2-t1)/sz**2) +# print ("agg time per size = ", (t_agg1-t_agg0)/sz**2) +# print ("update time = ", (t_updata_values)/sz**2) +# print ("sum time = ", ((t12_13)+(t13_14)+(t14_15)+(t15_16)+(t16_17)+(t16_17)+\ +# (t01_02)+(t_initial_cal2-t1)+(t_agg1-t_agg0)+\ +# (t_updata_values))/sz**2) + +# kir + + diff --git a/Solve_MG.py b/Solve_MG.py new file mode 100755 index 0000000..a5e59f5 --- /dev/null +++ b/Solve_MG.py @@ -0,0 +1,633 @@ +import numpy as np +import matplotlib.pyplot as plt +from matplotlib.animation import FuncAnimation, PillowWriter +import random +import torch +import scipy +import scipy.sparse as sparse +import scipy.linalg as la +import scipy.sparse.linalg as sla +from Lloyd_Unstructured import Lloyd +import copy +from Unstructured import MyMesh, grid, rand_Amesh_gen, rand_grid_gen, \ + plot_cycle, structured + +from Cycle import make_coarse_grid +import matplotlib.pyplot as plt +import sys +from MG_Agent import Agent +from Optim import Linear_Coarsening_Lloyd +import time +from Unstructured import MyMesh + + +class To_MyMesh: + def __init__(self, tri): + + self.X = tri.points[:,0:1].flatten() + self.Y = tri.points[:,1:2].flatten() + self.E = tri.simplices + self.V = tri.points[:,0:2] + self.nv = tri.points[:,0:2].shape[0] + self.ne = len(tri.simplices) + + e01 = self.E[:,[0,1]] + e02 = self.E[:,[0,2]] + e12 = self.E[:,[1,2]] + + e01 = tuple(map(tuple, e01)) + e02 = tuple(map(tuple, e02)) + e12 = tuple(map(tuple, e12)) + + e = list(set(e01).union(set(e02)).union(set(e12))) + self.N = [i for i in range(self.X.shape[0])] + self.Edges = e + self.num_edges = len(e) + + +## Relaxation +def relax_wjacobif(B,f,u,Minv): + Dii = Minv.diagonal() + r = f - B.dot(u) + u = u + r*Dii + return u + +def is_pos_def(x): + + out = np.linalg.eigvals(x) + print (out.min()) + return out + +##### V cycle without recursion +#### -uxx-uyy = 0 +def nr_vcycle_amgr(tot_num_loop, n_cut, min_dominance, level, no_prerelax, no_postrelax, grid_, precalc,\ + name_, Greedy=False): + ## Calculate epsilon and sigma + #min_dominance = 0.56 + grid_list = [] + eps = (2.0 - 2.0*min_dominance) / (2.0*min_dominance - 1.0) + sigma = 2.0 / (2.0 + eps) + #level = 2 + lvlm1 = level - 1 + A = level * [None] + fpts = lvlm1 * [None] + cpts = lvlm1 * [None] + fcpts_perm = lvlm1 * [None] + B = lvlm1 * [None] + lenB = lvlm1 * [None] + lenfpts = lvlm1 * [None] + lencpts = lvlm1 * [None] + #sqrt_lenB = lvlm1 * [None] + Bff = lvlm1 * [None] + theta_inv = lvlm1 * [None] + Dff = lvlm1 * [None] + Dffinv = lvlm1 * [None] + Minv = lvlm1 * [None] + DinvBfc = lvlm1 * [None] + P = lvlm1 * [None] + v = level * [None] + f = level * [None] + x = lvlm1 * [None] + b = lvlm1 * [None] + count_nonzeros_A = level * [None] + count_grid_points = level * [None] + + #A0 = np.load('A0.npy', allow_pickle=True).tolist() + A0 = grid_.A + count_nonzeros_A[0] = A0.count_nonzero() + count_grid_points[0] = A0.shape[0] + #no_prerelax = 1 + #no_postrelax = 1 + two_norm_old = 1.0 + ### Finest Grid + ## form A, and run CGsel to have the F-pts and C-pts + A[0] = A0 + + #A[0] = np.load('A0.npy', allow_pickle=True).tolist() + ## form all of the required matrices of different levels + + agent = Agent(dim = 32, K = 12, gamma = 1, epsilon = 1, \ + lr= 0.001, mem_size = 5000, net_type = 'TAGConv', batch_size = 64, \ + eps_min = 0.01 , eps_dec = 1.333/5000, replace=10) + + agent.q_eval.load_state_dict(torch.load('Models/Dueling_batch_train_final.pth')) + + for i in range(lvlm1): + + if Greedy: + if precalc== None: + + grid_ = Linear_Coarsening_Lloyd(grid_, agent, True) + else: + + grid_ = torch.load(name_) + + else: + + if precalc == None: + + grid_ = Linear_Coarsening_Lloyd(grid_, agent, False) + + else: + + grid_ = torch.load(name_) + + + grid_list.append(grid_) + + # grid_.plot() + # plt.show() + + # fpts[i] = np.load('Fpts'+str(i)+'.npy', allow_pickle=True).tolist() + # cpts[i] = np.load('Cpts'+str(i)+'.npy', allow_pickle=True).tolist() + + fpts[i] = grid_.fine_nodes + cpts[i] = grid_.coarse_nodes + + ## form B, separating the F- and C-points. + B[i] = sparse.csr_matrix(A[i].shape) + #lenB[i] = sa.N + lenB[i] = B[i].shape[0] + #sqrt_lenB[i] = sa.sqrt_N + #sqrt_lenB[i] = int(np.sqrt(lenB[i])) + lenfpts[i] = len(fpts[i]) + lencpts[i] = len(cpts[i]) + + Fl = [j for j in range(lenfpts[i])] + Cl = [j+lenfpts[i] for j in range(lencpts[i])] + + + fcpts_perm[i] = np.zeros(A[i].shape[0], dtype='int64') + + fcpts_perm[i][0:lenfpts[i]] = fpts[i] + fcpts_perm[i][lenfpts[i]:] = cpts[i] + + Bff[i] = A[i][np.ix_(fpts[i], fpts[i])] + + #Ali Commented# + #B[i] = A[i][fcpts_perm[i][:, None], fcpts_perm[i]] + ############### + + # B[i][:,0:lenfpts[i]][0:lenfpts[i],:] = A[i][:,fpts[i]][fpts[i],:] + # B[i][:,0:lenfpts[i]][ lenfpts[i]:,:] = A[i][:,fpts[i]][cpts[i],:] + # B[i][:,lenfpts[i]:][ 0:lenfpts[i],:] = A[i][:,cpts[i]][fpts[i],:] + # B[i][:,lenfpts[i]:][ lenfpts[i]:,:] = A[i][:,cpts[i]][cpts[i],:] + + + Bff[i] = A[i][np.ix_(fpts[i], fpts[i])] + B[i][np.ix_(Fl, Fl)] = Bff[i] + B[i][np.ix_(Cl, Fl)] = A[i][np.ix_(cpts[i], fpts[i])] + B[i][np.ix_(Fl, Cl)] = A[i][np.ix_(fpts[i], cpts[i])] + B[i][np.ix_(Cl, Cl)] = A[i][np.ix_(cpts[i], cpts[i])] + ## Calculate theta-i and Dff + theta_inv[i] = np.sum(np.abs(Bff[i]), axis=0) / Bff[i].diagonal() + Dff[i] = np.multiply(2.0-theta_inv[i], Bff[i].diagonal()) # (2.0-theta_inv)*Bff.diagonal() + print (min(np.array(Dff[0]).flatten())) + Dffinv[i] = 1.0 / Dff[i] + + #Dffinv[i] = np.array(1.0 / Dff[i])[0] + + ## Minv from reduction based AMG (REL) + Minv[i] = sparse.csr_matrix(A[i].shape) + Minv[i][np.ix_(Fl,Fl)]= sigma*Dffinv[i] + + ## Form P, the interpolation matrix + + ##Ali Commented## + # DinvBfc[i] = -B[i][0:lenfpts[i], lenfpts[i]:] + + ################# + + ##Ali Added## + DinvBfc[i] = -B[i][np.ix_(Fl, Cl)] + + + DinvBfc[i] = sparse.spdiags(Dffinv[i],0,lenfpts[i],lenfpts[i], format='csr').dot(DinvBfc[i]) + + P[i] = sparse.csr_matrix((lenB[i],lencpts[i])) + P[i][0:lenfpts[i], :] = DinvBfc[i] + P[i][lenfpts[i]:lenB[i], :] = sparse.identity(lencpts[i], format='csr') + ## form v, x, and b + v[i] = np.zeros(lenB[i]) + if i > 0: + x[i] = np.zeros(lenB[i]) + b[i] = np.zeros(lenB[i]) + + ## form A2h + A[i+1] = (P[i].transpose()).dot(B[i].dot(P[i])) + + #count_nonzeros_A[i+1] = A[i+1].count_nonzero() + #count_grid_points[i+1] = A[i+1].shape[0] + + + + + if abs(np.linalg.eigvals(A[-1].toarray()).min())<1e-6: + + A[-1][-1] = sparse.csr_matrix(A[-1][-1].toarray()*0+1) + + # for i in range(len(B)): + + # if abs(np.linalg.eigvals(B[i].toarray()).min())<1e-6: + + # B[i][-1] = sparse.csr_matrix(B[i][-1].toarray()*0+1) + # print("KKHHHHAAAIII", abs(np.linalg.eigvals(B[i].toarray()).min())) + + + ## Exact solution + xexact = np.zeros(lenB[0]) + + ## x, f, b for the finest grid + random_state = np.random.RandomState(seed=5) + x[0] = random_state.normal(size=lenB[0]) + + #x[0] = np.random.normal(size=lenB[0]) + #x[0] = np.zeros(lenB[0]) + f[0] = np.zeros(lenB[0]) + b[0][0:lenfpts[0]] = f[0][fpts[0]] + b[0][lenfpts[0]:lenB[0]] = f[0][cpts[0]] + + + list_v = [] + list_x = [] + mean_v = [] + #1000 for 5617, 22241; 800 for 1433; for dom2: all 1000 + + A_inv = sla.inv(A[-1]) + + + two_norm_n_list = [] + convfacts = [0] + for no_loop in range(tot_num_loop+1): + ## go from finest to coarsest + + + + list_x.append(x[0]) + for i in range(lvlm1): + for j in range(no_prerelax): + + x[i] = relax_wjacobif(B[i],b[i],x[i],Minv[i]) + + + f[i+1] = (P[i].transpose()).dot(b[i] - B[i].dot(x[i])) + + + if i <= lvlm1-2: + + b[i+1][0:lenfpts[i+1]] = f[i+1][fpts[i+1]] + b[i+1][lenfpts[i+1]:lenB[i+1]] = f[i+1][cpts[i+1]] + + ## solve for the coarsest + v[-1] = (A_inv).dot(f[-1]) + + ## go from coarsest to finest + for i in range(lvlm1-1, -1, -1): + + x[i] = x[i] + P[i].dot(v[i+1]) + + + for j in range(no_postrelax): + + x[i] = relax_wjacobif(B[i],b[i],x[i],Minv[i]) + + v[i][fpts[i]] = x[i][0:lenfpts[i]] + v[i][cpts[i]] = x[i][lenfpts[i]:lenB[i]] + + + for v_indx in range(len(v)): + + v[v_indx] = v[v_indx] - v[v_indx].mean() + + #inf_norm = np.linalg.norm(xexact - v, np.inf) + two_norm_n = np.linalg.norm(xexact - v[0]+v[0].mean(), 2) + ratio_norm = two_norm_n / two_norm_old + two_norm_old = two_norm_n + #print('ratio of norms:', ratio_norm) + list_v.append(copy.deepcopy(v[0]-v[0].mean())) + mean_v.append((v[0]-v[0].mean()).mean()) + + two_norm_n_list.append(two_norm_n) + + if len(two_norm_n_list)>=15: + + factor = (two_norm_n_list[-1]/two_norm_n_list[-10])**(1/10) + if abs(factor - convfacts[-1])<0.01 and abs(factor - convfacts[-2])<0.01\ + and abs(factor - convfacts[-3])<0.01: + + Grid_Complexity = 0 + Operator_Complexity = 0 + for i in range(len(A)): + + Operator_Complexity += np.nonzero(A[i])[0].shape[0] + Grid_Complexity += A[i].shape[0] + + Grid_Complexity = Grid_Complexity/A[0].shape[0] + Operator_Complexity = Operator_Complexity/np.nonzero(A[0])[0].shape[0] + print ("factor = ", factor, "num iter", no_loop) + + plt.plot(np.arange(0,v[0].shape[0]), v[0]) + plt.title('solution on the fine grid after convergence factor stops changing, solving for Ax = 0') + plt.ylim([-2, 2]) + plt.xlabel('nodes') + plt.ylabel('nodes values') + plt.show() + + plt.plot(np.arange(0,v[1].shape[0]), v[1]) + plt.title('solution on the coarse grid after convergence factor stops changing, solving for Ax = 0') + plt.ylim([-2, 2]) + plt.xlabel('nodes') + plt.ylabel('nodes values') + plt.show() + + + # kir + return factor, convfacts, Grid_Complexity, Operator_Complexity + + if no_loop == tot_num_loop: + + Grid_Complexity = 0 + Operator_Complexity = 0 + for i in range(len(A)): + + Operator_Complexity += np.nonzero(A[i])[0].shape[0] + Grid_Complexity += A[i].shape[0] + + Grid_Complexity = Grid_Complexity/A[0].shape[0] + Operator_Complexity = Operator_Complexity/np.nonzero(A[0])[0].shape[0] + print ("factor = ", factor, "num iter", no_loop) + + plt.plot(np.arange(0,v[0].shape[0]), v[0]) + plt.title('solution on the fine grid after convergence factor stops changing, solving for Ax = 0') + plt.ylim([-2, 2]) + plt.xlabel('nodes') + plt.ylabel('nodes values') + plt.show() + + plt.plot(np.arange(0,v[1].shape[0]), v[1]) + plt.title('solution on the coarse grid after convergence factor stops changing, solving for Ax = 0') + plt.ylim([-2, 2]) + plt.xlabel('nodes') + plt.ylabel('nodes values') + plt.show() + + + # kir + return factor, convfacts, Grid_Complexity, Operator_Complexity + + convfacts.append(factor) + + if no_loop == 0: + firstv = copy.deepcopy(v[0]-v[0].mean()) + + + for ij in range(len(v)): + + v[ij] = v[ij] - v[ij].mean() + + #print('convergence factor using consecutive norms:', ratio_norm) + + conv_fac_ini_fin = (two_norm_n_list[-1]/two_norm_n_list[-10])**(1/10) + lastv = v[0]-v[0].mean() + + # plt.plot(np.arange(0, 1, 1/xexact.shape[0]), lastv) + # plt.ylim([-2, 2]) + # plt.show() + + # plt.plot(np.arange(0,len(mean_v)), mean_v) + # plt.title(Greedy) + # plt.ylim([-2, 2]) + # plt.show() + + print('convergence factor using init and final:', conv_fac_ini_fin) + Grid_Complexity = 0 + Operator_Complexity = 0 + for i in range(len(A)): + + Operator_Complexity += np.nonzero(A[i])[0].shape[0] + Grid_Complexity += A[i].shape[0] + + Grid_Complexity = Grid_Complexity/A[0].shape[0] + Operator_Complexity = Operator_Complexity/np.nonzero(A[0])[0].shape[0] + + # return firstv, lastv, list_v, list_x + return conv_fac_ini_fin, convfacts, Grid_Complexity, Operator_Complexity + + +list_RL_CRate = [] +list_GR_CRate = [] +list_GR_Gcompx = [] +list_RL_Gcompx = [] +list_GR_Gcompx = [] +list_RL_Ocompx = [] +list_GR_Ocompx = [] + + +dict_rate_complexity = torch.load('rate_and_complx.pth') + +# dict_rate_complexity["Convex"] = {} + + + + +#dict_rate_complexity["Graded_mesh"] = {} +# l = [2,8,10,11] +# for i in l: +# print ("convex, number ",i) +# precalc = i + + +# grid_ = torch.load('Test_Graphs/Hand_crafted/Grids/Graph_'+str(precalc)) +# grid_.A = -grid_.A +# torch.save(grid_, 'Test_Graphs/Hand_crafted/Grids/Graph_'+str(precalc)) + +# rl = torch.load('Test_Graphs/Hand_crafted/Grids/rl_1L_'+str(precalc)) +# rl.A = -rl.A +# torch.save(rl, 'Test_Graphs/Hand_crafted/Grids/rl_1L_'+str(precalc)) + +# gr = torch.load('Test_Graphs/Hand_crafted/Grids/gr_1L_'+str(precalc)) +# gr.A = -gr.A +# torch.save(gr, 'Test_Graphs/Hand_crafted/Grids/gr_1L_'+str(precalc)) + +''' +for i in range(1,13): + print ("Graded_mesh, number ",i) + precalc = i + n_loop = 100 + n_cut = 10 + + grid_ = torch.load('Test_Graphs/Smoothly/Graph'+str(precalc)) + grid_gr = copy.deepcopy(grid_) + + num_nodes = grid_.num_nodes + + rl_cr, rl_cr_list, RL_Gcompx, RL_Ocompx = nr_vcycle_amgr(n_loop, n_cut, 0.56, 2, 2, 2, grid_, \ + precalc, \ + 'Test_Graphs/Smoothly/RL_1L_'+str(precalc), Greedy=False) + + + gr_cr, gr_cr_list, GR_Gcompx, GR_Ocompx = nr_vcycle_amgr(n_loop, n_cut,0.56, 2, 2, 2, grid_gr, \ + precalc, \ + 'Test_Graphs/Smoothly/GR_1L_'+str(precalc), Greedy=True) + + dict_rate_complexity['Graded_mesh'][i] = {'num_nodes':num_nodes, 'rl_cr': rl_cr, 'gr_cr': gr_cr, \ + 'RL_Gcompx': RL_Gcompx, 'GR_Gcompx':GR_Gcompx, + 'RL_Ocompx':RL_Ocompx, 'GR_Ocompx':GR_Ocompx} + + torch.save(dict_rate_complexity, 'rate_and_complx.pth') + + + +dict_rate_complexity["Wide_valence"] = {} +for i in range(1,13): + + print ("Wide_valence, number ",i) + precalc = i + n_loop = 100 + n_cut = 10 + + grid_ = torch.load('Test_Graphs/test_STD/Graph'+str(precalc)) + grid_gr = copy.deepcopy(grid_) + + num_nodes = grid_.num_nodes + + rl_cr, rl_cr_list, RL_Gcompx, RL_Ocompx = nr_vcycle_amgr(n_loop, n_cut, 0.56, 2, 2, 2, grid_, \ + precalc, \ + 'Test_Graphs/test_STD/RL_1L_'+str(precalc), Greedy=False) + + gr_cr, gr_cr_list, GR_Gcompx, GR_Ocompx = nr_vcycle_amgr(n_loop, n_cut,0.56, 2, 2, 2, grid_gr, \ + precalc, \ + 'Test_Graphs/test_STD/GR_1L_'+str(precalc), Greedy=True) + + dict_rate_complexity['Wide_valence'][i] = {'num_nodes':num_nodes, 'rl_cr': rl_cr, 'gr_cr': gr_cr, \ + 'RL_Gcompx': RL_Gcompx, 'GR_Gcompx':GR_Gcompx, + 'RL_Ocompx':RL_Ocompx, 'GR_Ocompx':GR_Ocompx} + + torch.save(dict_rate_complexity, 'rate_and_complx.pth') + + ''' +#dict_rate_complexity["Structured"] = {} +for i in range(1,11): + + print ("Structured, number ",i) + precalc = i + n_loop = 100 + n_cut = 10 + + grid_ = torch.load('Test_Graphs/Structured/Graph'+str(precalc)) + grid_gr = copy.deepcopy(grid_) + + num_nodes = grid_.num_nodes + + rl_cr, rl_cr_list, RL_Gcompx, RL_Ocompx = nr_vcycle_amgr(n_loop, n_cut, 0.56, 2, 2, 2, grid_, \ + precalc, \ + 'Test_Graphs/Structured/RL_1L_'+str(precalc), Greedy=False) + + gr_cr, gr_cr_list, GR_Gcompx, GR_Ocompx = nr_vcycle_amgr(n_loop, n_cut,0.56, 2, 2, 2, grid_gr, \ + precalc, \ + 'Test_Graphs/Structured/GR_1L_'+str(precalc), Greedy=True) + + dict_rate_complexity['Structured'][i] = {'num_nodes':num_nodes, 'rl_cr': rl_cr, 'gr_cr': gr_cr, \ + 'RL_Gcompx': RL_Gcompx, 'GR_Gcompx':GR_Gcompx, + 'RL_Ocompx':RL_Ocompx, 'GR_Ocompx':GR_Ocompx} + + torch.save(dict_rate_complexity, 'rate_and_complx.pth') + + +''' + +dict_rate_complexity["Aspect_Ratio"] = {} + + +for i in range(1,13): + + print ("Aspect_Ratio, number ",i) + precalc = i + n_loop = 100 + n_cut = 10 + + grid_ = torch.load('Test_Graphs/Aspect_Ratio/Graph_'+str(precalc)) + grid_gr = copy.deepcopy(grid_) + + num_nodes = grid_.num_nodes + + rl_cr, rl_cr_list, RL_Gcompx, RL_Ocompx = nr_vcycle_amgr(n_loop, n_cut, 0.56, 2, 2, 2, grid_, \ + precalc, \ + 'Test_Graphs/Aspect_Ratio/rl_1L_'+str(precalc), Greedy=False) + + gr_cr, gr_cr_list, GR_Gcompx, GR_Ocompx = nr_vcycle_amgr(n_loop, n_cut,0.56, 2, 2, 2, grid_gr, \ + precalc, \ + 'Test_Graphs/Aspect_Ratio/gr_1L_'+str(precalc), Greedy=True) + + dict_rate_complexity['Aspect_Ratio'][i] = {'num_nodes':num_nodes, 'rl_cr': rl_cr, 'gr_cr': gr_cr, \ + 'RL_Gcompx': RL_Gcompx, 'GR_Gcompx':GR_Gcompx, + 'RL_Ocompx':RL_Ocompx, 'GR_Ocompx':GR_Ocompx} + + torch.save(dict_rate_complexity, 'rate_and_complx.pth') + + + +dict_rate_complexity["Convex"] = {} + + +for i in range(1,13): + i = 11 + print ("Convex, number ",i) + precalc = i + n_loop = 100 + n_cut = 10 + + grid_ = torch.load('Test_Graphs/Hand_crafted/Grids/Graph_'+str(precalc)) + grid_gr = copy.deepcopy(grid_) + + num_nodes = grid_.num_nodes + + rl_cr, rl_cr_list, RL_Gcompx, RL_Ocompx = nr_vcycle_amgr(n_loop, n_cut, 0.56, 2, 2, 2, grid_, \ + precalc, \ + 'Test_Graphs/Hand_crafted/Grids/rl_1L_'+str(precalc), Greedy=False) + + + gr_cr, gr_cr_list, GR_Gcompx, GR_Ocompx = nr_vcycle_amgr(n_loop, n_cut,0.56, 2, 2, 2, grid_gr, \ + precalc, \ + 'Test_Graphs/Hand_crafted/Grids/gr_1L_'+str(precalc), Greedy=True) + + dict_rate_complexity['Convex'][i] = {'num_nodes':num_nodes, 'rl_cr': rl_cr, 'gr_cr': gr_cr, \ + 'RL_Gcompx': RL_Gcompx, 'GR_Gcompx':GR_Gcompx, + 'RL_Ocompx':RL_Ocompx, 'GR_Ocompx':GR_Ocompx} + + torch.save(dict_rate_complexity, 'rate_and_complx.pth') + + +dict_rate_complexity["Different_size"] = {} + +for j in range(45): + i = j+1 + if i!=35 and i!=36: + + print ("Different_size, number ",i) + precalc = i + n_loop = 100 + n_cut = 10 + + grid_ = torch.load('Test_Graphs/Auto_generated/graph_'+str(precalc)) + + num_nodes = grid_.num_nodes + + grid_gr = copy.deepcopy(grid_) + + rl_cr, rl_cr_list, RL_Gcompx, RL_Ocompx = nr_vcycle_amgr(n_loop, n_cut, 0.56, 2, 2, 2, grid_, \ + precalc, \ + 'Test_Graphs/Auto_generated/rl_1L_'+str(precalc), Greedy=False) + + gr_cr, gr_cr_list, GR_Gcompx, GR_Ocompx = nr_vcycle_amgr(n_loop, n_cut,0.56, 2, 2, 2, grid_gr, \ + precalc, \ + 'Test_Graphs/Auto_generated/gr_1L_'+str(precalc), Greedy=True) + + dict_rate_complexity['Different_size'][i] = {'num_nodes':num_nodes, 'rl_cr': rl_cr, 'gr_cr': gr_cr, \ + 'RL_Gcompx': RL_Gcompx, 'GR_Gcompx':GR_Gcompx, + 'RL_Ocompx':RL_Ocompx, 'GR_Ocompx':GR_Ocompx} + + torch.save(dict_rate_complexity, 'rate_and_complx.pth') + +''' + diff --git a/Unstructured.py b/Unstructured.py new file mode 100644 index 0000000..b3d282b --- /dev/null +++ b/Unstructured.py @@ -0,0 +1,1022 @@ +import numpy as np +from numpy import sin, cos, pi +import matplotlib.pyplot as plt +from scipy.spatial import Delaunay +import scipy +import fem +import scipy as sp +import pygmsh +import time +from scipy.spatial import ConvexHull, convex_hull_plot_2d +import random +import torch as T +import torch_geometric +import Batch_Graph as bg +import copy +import networkx as nx +from networkx.drawing.nx_pylab import draw_networkx +from pyamg.gallery.diffusion import diffusion_stencil_2d +from pyamg.gallery import stencil_grid +from torch_geometric.data import Data +from pyamg.aggregation import lloyd_aggregation +from pyamg.gallery import poisson +import matplotlib as mpl +import os +from scipy.sparse import csr_matrix, coo_matrix, isspmatrix_csr, isspmatrix_csc +from pyamg import amg_core +from pyamg.graph import lloyd_cluster +import sys +from MG_Agent import Agent + +mpl.rcParams['figure.dpi'] = 300 + + +class MyMesh: + def __init__(self, mesh): + + self.X = mesh.points[:,0:1].flatten() + self.Y = mesh.points[:,1:2].flatten() + self.E = mesh.cells[1].data + self.V = mesh.points[:,0:2] + self.nv = mesh.points[:,0:2].shape[0] + self.ne = len(mesh.cells[1].data) + + e01 = self.E[:,[0,1]] + e02 = self.E[:,[0,2]] + e12 = self.E[:,[1,2]] + + e01 = tuple(map(tuple, e01)) + e02 = tuple(map(tuple, e02)) + e12 = tuple(map(tuple, e12)) + + e = list(set(e01).union(set(e02)).union(set(e12))) + self.N = [i for i in range(self.X.shape[0])] + self.Edges = e + self.num_edges = len(e) + + +def structured(n_row, n_col, Theta): + + num_nodes = int(n_row*n_col) + + X = np.array([[i/(n_col*n_row) for i in range(n_col)] for j in range(n_row)]).flatten() + Y = np.array([[j/(n_row*n_col) for i in range(n_col)] for j in range(n_row)]).flatten() + E = [] + V = [] + nv = num_nodes + N = [i for i in range(num_nodes)] + + epsilon = 1 + theta = 1 #param of A matrix + + sten = diffusion_stencil_2d(epsilon=epsilon,theta=theta,type='FD') + AA = stencil_grid(sten, (n_row, n_col), dtype=float, format='csr') + A = AA.toarray() + + nz_row = np.nonzero(A)[0] + nz_col = np.nonzero(A)[1] + e = np.concatenate((np.expand_dims(nz_row,axis=1), np.expand_dims(nz_col, axis=1)), axis=1) + Edges = list(tuple(map(tuple, e))) + num_edges = len(Edges) + g = rand_grid_gen(None) + + mesh = copy.deepcopy(g.mesh) + + mesh.X = X + mesh.Y = Y + mesh.E = E + mesh.V = V + mesh.nv = nv + mesh.ne = [] + mesh.N = N + mesh.Edges = Edges + mesh.num_edges = num_edges + + fine_nodes = [i for i in range(num_nodes)] + + grid_ = grid(AA,fine_nodes,[], mesh, Theta) + + return grid_ + +# class grid: + +# def __init__(self, A, fine_nodes, coarse_nodes, mesh, Theta): + +# self.A = A.tocsr() +# self.fine_nodes = fine_nodes +# self.coarse_nodes = coarse_nodes +# self.num_nodes = mesh.nv +# #self.edges = set_edge +# self.mesh = mesh +# active = np.ones(self.num_nodes) +# active[self.coarse_nodes] = 0 +# self.active = active +# self.Theta = Theta +# AA = abs(self.A) +# self.AA = AA + +# colsz = self.num_nodes + +# A_flat = self.AA.reshape(self.num_nodes**2, 1).tocsr() +# edges_list = A_flat.nonzero()[0] +# edge_row = edges_list//colsz +# edge_row = edge_row.tolist() +# edge_col = edges_list% colsz +# edge_col = edge_col.tolist() + +# edge_attr = T.tensor(A_flat.data) +# edge_attr = edge_attr.unsqueeze(1).float() +# edge_index = T.tensor([edge_row, edge_col],dtype=T.long) +# self.edge_index = edge_index +# self.edge_attr = edge_attr + + +# self.x = T.cat((T.from_numpy(self.active).unsqueeze(1), \ +# T.from_numpy(self.active).unsqueeze(1)),dim=1).float() + + + +# list_neighbours = [[] for i in range(self.x.shape[0])] +# for i in range(self.edge_index.shape[1]): + +# list_neighbours[self.edge_index[0,i].clone().tolist()].append(self.edge_index[1\ +# ,i].clone().tolist()) +# self.list_neighbours = list_neighbours + +# deg_l = [] +# for l in self.list_neighbours: +# deg_l.append(len(l)) +# deg_arr = np.array(deg_l) + +# self.deg_std = deg_arr.std() +# self.deg_mean = deg_arr.mean() + +# self.data = Data(x=self.x, edge_index=edge_index, edge_attr= edge_attr) + +# self.violating_nodes = self.viol_nodes()[0] +# self.is_violating = self.viol_nodes()[1] + +# def subgrid(self, node_list): + +# sub_x = self.x[node_list] +# ''' +# n = len(node_list) +# sub_AA = np.zeros((n,n)) +# for i in range(300): +# for j in range(300): +# if i(self.Theta*self.AA[node,:]*np.expand_dims(self.active,1))[0,0]: +# return False +# else: +# return True + +# def viol_nodes(self): + +# active_nodes = self.active +# AA = self.AA +# I = scipy.sparse.csr_matrix(np.identity(self.num_nodes)) +# active_aij = AA.multiply(active_nodes).tocsr() +# sum_aij = active_aij.sum(1) +# B = AA.multiply(I) +# Diag = B.diagonal() +# vals = np.sign(Diag-self.Theta*np.array(sum_aij).flatten()) +# viols = np.nonzero(vals == -1)[0].tolist() + +# ###newly added### + +# # viols2 = [] +# # for node in viols: +# # viols2.append(node) +# # for neighbors in self.list_neighbours[node]: +# # viols2.append(neighbors) +# # viols = viols2 + +# ###end### + +# violatings = list(set(viols).difference(self.coarse_nodes)) +# isviol = T.zeros(self.num_nodes) +# isviol[violatings] = 1 +# num_viol = len(violatings) + +# return violatings, isviol, num_viol + + +# def coarsen_node(self, node_a): + + +# self.fine_nodes.remove(node_a) +# self.coarse_nodes.append(node_a) +# self.active[node_a] = 0 +# self.violating_nodes.remove(node_a) +# self.is_violating[node_a] = 0 + +# for neigh in self.list_neighbours[node_a]: +# if self.is_viol(neigh) == False and neigh in self.violating_nodes: +# self.violating_nodes.remove(neigh) +# self.is_violating[neigh] = 0 + +# self.x = T.cat((T.from_numpy(self.active).unsqueeze(1), \ +# self.is_violating.unsqueeze(1).double()),dim=1).float() + +# self.data = Data(x=self.x, edge_index=self.edge_index, edge_attr=self.edge_attr) + +# def plot(self): + +# G = nx.Graph() +# mymsh = self.mesh + +# points = mymsh.N +# edges = mymsh.Edges + +# pos_dict = {} +# for i in range(mymsh.nv): +# pos_dict[i] = [mymsh.X[i], mymsh.Y[i]] + +# G.add_nodes_from(points) +# G.add_edges_from(edges) +# colors = [i for i in range(mymsh.nv)] + +# for i in self.fine_nodes: +# colors[i] = 'b' +# for i in self.coarse_nodes: +# colors[i] = 'r' +# for i in self.viol_nodes()[0]: +# colors[i] = 'g' + +# draw_networkx(G, pos=pos_dict, with_labels=False, node_size=6, \ +# node_color = colors, node_shape = 'o') +# ''' +# plt.plot(points[self.fine_nodes,0], points[self.fine_nodes,1], 'b.',\ +# points[self.coarse_nodes,0], points[self.coarse_nodes,1], 'r.',\ +# points[self.viol_nodes()[0],0], points[self.viol_nodes()[0],1], 'g.') +# ''' + + +def from_scipy_sparse_matrix(A): + r"""Converts a scipy sparse matrix to edge indices and edge attributes. + + Args: + A (scipy.sparse): A sparse matrix. + """ + A = A.tocoo() + row = T.from_numpy(A.row).to(T.long) + col = T.from_numpy(A.col).to(T.long) + edge_index = T.stack([row, col], dim=0) + edge_weight = T.from_numpy(A.data) + return edge_index, edge_weight + + +def from_networkx(G): + r"""Converts a :obj:`networkx.Graph` or :obj:`networkx.DiGraph` to a + :class:`torch_geometric.data.Data` instance. + + Args: + G (networkx.Graph or networkx.DiGraph): A networkx graph. + """ + + G = nx.convert_node_labels_to_integers(G) + G = G.to_directed() if not nx.is_directed(G) else G + edge_index = T.LongTensor(list(G.edges)).t().contiguous() + + data = {} + + for i, (_, feat_dict) in enumerate(G.nodes(data=True)): + for key, value in feat_dict.items(): + data[str(key)] = [value] if i == 0 else data[str(key)] + [value] + + for i, (_, _, feat_dict) in enumerate(G.edges(data=True)): + for key, value in feat_dict.items(): + data[str(key)] = [value] if i == 0 else data[str(key)] + [value] + + for key, item in data.items(): + try: + data[key] = T.tensor(item) + except ValueError: + pass + + data['edge_index'] = edge_index.view(2, -1) + data = torch_geometric.data.Data.from_dict(data) + data.num_nodes = G.number_of_nodes() + + return data + + + +class grid: + + def __init__(self, A, fine_nodes, coarse_nodes, mesh, Theta): + + self.A = A.tocsr() + self.fine_nodes = fine_nodes + self.coarse_nodes = coarse_nodes + self.num_nodes = mesh.nv + #self.edges = set_edge + self.mesh = mesh + active = np.ones(self.num_nodes) + active[self.coarse_nodes] = 0 + self.active = active + self.Theta = Theta + + self.G = nx.from_scipy_sparse_matrix(self.A, edge_attribute='weight', parallel_edges=False) + + + self.x = T.cat((T.from_numpy(self.active).unsqueeze(1), \ + T.from_numpy(self.active).unsqueeze(1)),dim=1).float() + + + edge_index, edge_attr = from_scipy_sparse_matrix(abs(self.A)) + + list_neighbours1 = [] + list_neighbours2 = [] + for node in range(self.num_nodes): + a = list(self.G.edges(node,data = True)) + l1 = [] + l2 = [] + for i in range(len(a)): + l1.append(a[i][1]) + l2.append(abs(np.array(list(a[i][-1].values())))[0]) + + list_neighbours1.append(l1) + list_neighbours2.append(l2) + + self.list_neighbours = [list_neighbours1, list_neighbours2] + + self.data = Data(x=self.x, edge_index=edge_index, edge_attr= edge_attr.float()) + self.violating_nodes = [i for i in range(self.num_nodes)] #self.viol_nodes()[0] + self.is_violating = np.array([1 for i in range(self.num_nodes)]) #self.viol_nodes()[1] + + + def subgrid(self, node_list): + + sub_x = self.x[node_list] + sub_data = from_networkx(self.G.subgraph(node_list)) + sub_data = Data(x=sub_x, edge_index=sub_data.edge_index, edge_attr= abs(sub_data.weight.float())) + + return sub_data + + + def node_hop_neigh(self, node, K): + + return list(nx.single_source_shortest_path(self.G, node, cutoff=K).keys()) + + + def is_viol(self, node): + + if self.active[node] == 0: + return False + + else: + + neigh_list = self.list_neighbours[0][node]#list(self.G.neighbors(node)) + actives = self.active[neigh_list] + aij = self.list_neighbours[1][node] + # aij = np.array([abs(np.array(list(self.G.get_edge_data(node,neigh).values())[0])) \ + # for neigh in neigh_list]) + aij = aij*actives + aij = aij.sum() + if abs(np.array(list(self.G.get_edge_data(node,node).values())[0]))< self.Theta*aij: + + return True + + else: + + return False + + + def viol_nodes(self): + + violatings = [] + isviol = [] + + for node in range(self.num_nodes): + + if self.active[node]!=0: + + neigh_list = self.list_neighbours[0][node] + + #neigh_list = list(self.G.neighbors(node)) + actives = self.active[neigh_list] + # aij = np.array([abs(np.array(list(self.G.get_edge_data(node,neigh).values())[0])) \ + # for neigh in neigh_list]) + aij = self.list_neighbours[1][node] + aij = aij*actives + aij = aij.sum() + + if self.G.get_edge_data(node,node) != None: + + if abs(np.array(list(self.G.get_edge_data(node,node).values())[0]))< self.Theta*aij: + + isviol.append(1) + violatings.append(node) + + else: + + isviol.append(0) + + else: + + isviol.append(0) + + num_viol = len(violatings) + + return violatings, isviol, num_viol + + + def coarsen_node(self, node_a): + + #tkir1 = time.time() + newly_removed = [] + #self.fine_nodes.remove(node_a) + self.coarse_nodes.append(node_a) + self.active[node_a] = 0 + #self.violating_nodes.remove(node_a) + self.is_violating[node_a] = 0 + newly_removed.append(node_a) + + for neigh in self.list_neighbours[0][node_a]:#self.G.neighbors(node_a): + if self.is_viol(neigh) == False and self.is_violating[neigh] == 1: + #self.violating_nodes.remove(neigh) + self.is_violating[neigh] = 0 + newly_removed.append(neigh) + + + + + self.data.x[node_a, 0] = 0 + self.data.x[newly_removed, 1] = 0 + + return newly_removed + + def uncoarsen(self, node_a): + + self.fine_nodes.append(node_a) + #self.coarse_nodes.remove(node_a) + self.active[node_a] = 1 + #self.violating_nodes.remove(node_a) + #self.is_violating[node_a] = 0 + newly_added = [] + + if self.is_viol(node_a) == True and self.is_violating[node_a] == 0: + self.is_violating[node_a] = 1 + newly_added.append(node_a) + + + for neigh in self.list_neighbours[0][node_a]:#self.G.neighbors(node_a): + if self.is_viol(neigh) == True and self.is_violating[neigh] == 0: + + self.is_violating[neigh] = 1 + newly_added.append(neigh) + + + self.data.x[node_a, 0] = 1 + self.data.x[newly_added, 1] = 1 + + return newly_added + + + def plot(self, size, w): + + G = nx.Graph() + mymsh = self.mesh + + points = mymsh.N + edges = mymsh.Edges + + pos_dict = {} + for i in range(mymsh.nv): + pos_dict[i] = [mymsh.X[i], mymsh.Y[i]] + + G.add_nodes_from(points) + G.add_edges_from(edges) + colors = [i for i in range(mymsh.nv)] + + for i in self.fine_nodes: + colors[i] = 'b' + for i in self.coarse_nodes: + colors[i] = 'r' + for i in self.viol_nodes()[0]: + colors[i] = 'g' + + draw_networkx(G, pos=pos_dict, with_labels=False, node_size=size, \ + node_color = colors, node_shape = 'o', width = w) + + plt.axis('equal') + + +def structured(n_row, n_col, Theta): + + num_nodes = int(n_row*n_col) + + X = np.array([[i/(n_col*n_row) for i in range(n_col)] for j in range(n_row)]).flatten() + Y = np.array([[j/(n_row*n_col) for i in range(n_col)] for j in range(n_row)]).flatten() + E = [] + V = [] + nv = num_nodes + N = [i for i in range(num_nodes)] + + epsilon = 1 + theta = 1 #param of A matrix + + sten = diffusion_stencil_2d(epsilon=epsilon,theta=theta,type='FD') + AA = stencil_grid(sten, (n_row, n_col), dtype=float, format='csr') + + nz_row = [] + nz_col = [] + t1 = time.time() + + for i in range(n_row): + for j in range(n_col): + + if i!=n_row-1: + if j!=n_col-1: + + nz_row.append(i*n_col+j) + nz_row.append(i*n_col+j) + nz_col.append(i*n_col+j+1) + nz_col.append(i*n_col+j+n_col) + else: + nz_row.append(i*n_col+j) + nz_col.append(i*n_col+j+n_col) + + if i == n_row-1: + if j!=n_col-1: + + nz_row.append(i*n_col+j) + nz_col.append(i*n_col+j+1) + + + nz_row = np.array(nz_row) + nz_col = np.array(nz_col) + + + # print ("t21", t2-t1) + e = np.concatenate((np.expand_dims(nz_row,axis=1), np.expand_dims(nz_col, axis=1)), axis=1) + Edges = list(tuple(map(tuple, e))) + num_edges = len(Edges) + g = rand_grid_gen(None) + + mesh = copy.deepcopy(g.mesh) + + mesh.X = X + mesh.Y = Y + mesh.E = E + mesh.V = V + mesh.nv = nv + mesh.ne = [] + mesh.N = N + mesh.Edges = Edges + mesh.num_edges = num_edges + + fine_nodes = [i for i in range(num_nodes)] + + grid_ = grid(AA,fine_nodes,[], mesh, Theta) + + + # print ("t21", t2-t1) + # print ("t32", t3-t2) + # print ("t43", t4-t3) + return grid_ + +def lloyd_aggregation(C, ratio=0.03, distance='unit', maxiter=10): + """Aggregate nodes using Lloyd Clustering. + + Parameters + ---------- + C : csr_matrix + strength of connection matrix + ratio : scalar + Fraction of the nodes which will be seeds. + distance : ['unit','abs','inv',None] + Distance assigned to each edge of the graph G used in Lloyd clustering + + For each nonzero value C[i,j]: + + ======= =========================== + 'unit' G[i,j] = 1 + 'abs' G[i,j] = abs(C[i,j]) + 'inv' G[i,j] = 1.0/abs(C[i,j]) + 'same' G[i,j] = C[i,j] + 'sub' G[i,j] = C[i,j] - min(C) + ======= =========================== + + maxiter : int + Maximum number of iterations to perform + + Returns + ------- + AggOp : csr_matrix + aggregation operator which determines the sparsity pattern + of the tentative prolongator + seeds : array + array of Cpts, i.e., Cpts[i] = root node of aggregate i + + See Also + -------- + amg_core.standard_aggregation + + Examples + -------- + >>> from scipy.sparse import csr_matrix + >>> from pyamg.gallery import poisson + >>> from pyamg.aggregation.aggregate import lloyd_aggregation + >>> A = poisson((4,), format='csr') # 1D mesh with 4 vertices + >>> A.todense() + matrix([[ 2., -1., 0., 0.], + [-1., 2., -1., 0.], + [ 0., -1., 2., -1.], + [ 0., 0., -1., 2.]]) + >>> lloyd_aggregation(A)[0].todense() # one aggregate + matrix([[1], + [1], + [1], + [1]], dtype=int8) + >>> # more seeding for two aggregates + >>> Agg = lloyd_aggregation(A,ratio=0.5)[0].todense() + + """ + if ratio <= 0 or ratio > 1: + raise ValueError('ratio must be > 0.0 and <= 1.0') + + if not (isspmatrix_csr(C) or isspmatrix_csc(C)): + raise TypeError('expected csr_matrix or csc_matrix') + + if distance == 'unit': + data = np.ones_like(C.data).astype(float) + elif distance == 'abs': + data = abs(C.data) + elif distance == 'inv': + data = 1.0/abs(C.data) + elif distance is 'same': + data = C.data + elif distance is 'min': + data = C.data - C.data.min() + else: + raise ValueError('unrecognized value distance=%s' % distance) + + if C.dtype == complex: + data = np.real(data) + + assert(data.min() >= 0) + + G = C.__class__((data, C.indices, C.indptr), shape=C.shape) + + num_seeds = int(min(max(ratio * G.shape[0], 1), G.shape[0])) + + distances, clusters, seeds = lloyd_cluster(G, num_seeds, maxiter=maxiter) + + row = (clusters >= 0).nonzero()[0] + col = clusters[row] + data = np.ones(len(row), dtype='int8') + AggOp = coo_matrix((data, (row, col)), + shape=(G.shape[0], num_seeds)).tocsr() + + return AggOp, seeds, col + + + +def set_edge_from_msh(msh): + + edges = msh.E + array_of_tuples = map(tuple, edges[:,[1,2]]) + t12 = tuple(array_of_tuples) + array_of_tuples = map(tuple, edges[:,[0,2]]) + t02 = tuple(array_of_tuples) + array_of_tuples = map(tuple, edges[:,[0,1]]) + t01 = tuple(array_of_tuples) + + set_edge = set(t01).union(set(t02)).union(set(t12)) + + return set_edge + + +# def rand_Amesh_gen(mesh): + +# num_Qhull_nodes = random.randint(45, 90) +# points = np.random.rand(num_Qhull_nodes, 2) # 30 random points in 2-D +# hull = ConvexHull(points) + +# msh_sz = 0.15*random.random()+0.25 + +# if mesh == None: + + +# with pygmsh.geo.Geometry() as geom: + +# poly = geom.add_polygon( + +# hull.points[hull.vertices.tolist()].tolist() + +# , +# mesh_size=msh_sz, +# ) +# mesh = geom.generate_mesh() + +# ''' +# geom.set_mesh_size_callback( +# lambda dim, tag, x, y, z: 6.0e-2 + 2.0e-1 * (x ** 2 + y ** 2) +# ) + +# field0 = geom.add_boundary_layer( +# edges_list=[poly.curves[0], poly.curves[2]], +# lcmin=0.05, +# lcmax=0.2, +# distmin=0.0, +# distmax=0.2, +# ) +# field1 = geom.add_boundary_layer( +# nodes_list=[poly.points[8], poly.points[2]], +# lcmin=0.05, +# lcmax=0.2, +# distmin=0.1, +# distmax=0.4, +# ) +# geom.set_background_mesh([field0, field1], operator="Min") + +# mesh = geom.generate_mesh() + + +# ''' +# ''' +# with pygmsh.geo.Geometry() as geom: +# geom.add_polygon( +# [ +# [-1.0, -1.0], +# [+1.0, -1.0], +# [+1.0, +1.0], +# [-1.0, +1.0], +# ] +# ) +# geom.set_mesh_size_callback( +# lambda dim, tag, x, y, z: 6.0e-2 + 2.0e-1 * (x ** 2 + y ** 2) +# ) + +# mesh = geom.generate_mesh() + +# ''' +# ''' +# with pygmsh.geo.Geometry() as geom: +# poly = geom.add_polygon( +# [ + + +# [0.0, 0.0], +# [0.7, 0.5], +# [1.0, 0.0], + + + +# ], +# mesh_size=0.1 +# , +# ) + +# field0 = geom.add_boundary_layer( +# edges_list=[poly.curves[0], poly.curves[1], poly.curves[2]], +# lcmin=0.01, +# lcmax=0.1, +# distmin=0.15, +# distmax=0.01, +# ) +# field1 = geom.add_boundary_layer( +# nodes_list=[poly.points[2]], +# lcmin=0.05, +# lcmax=0.2, +# distmin=0.1, +# distmax=0.4, +# ) +# geom.set_background_mesh([field0], operator="Min") + +# mesh = geom.generate_mesh() +# ''' + + +# mymsh = MyMesh(mesh) +# # points = mymsh.V +# # tri = Delaunay(points) +# # plt.triplot(points[:,0], points[:,1], tri.simplices) +# # plt.plot(points[:,0], points[:,1], 'o') + +# A,b = fem.gradgradform(mymsh, kappa=None, f=None, degree=1) +# return A, mymsh + + +def func1(x,y,p): + + x_f = int(np.floor(p.shape[0]*x)) + y_f = int(np.floor(p.shape[1]*y)) + + return p[x_f, y_f] + +def rand_Amesh_gen(mesh): + + num_Qhull_nodes = random.randint(45, 90) + points = np.random.rand(num_Qhull_nodes, 2) # 30 random points in 2-D + hull = ConvexHull(points) + + msh_sz = 0.5 #0.1*random.random()+0.1 + + + with pygmsh.geo.Geometry() as geom: + poly = geom.add_polygon( + + hull.points[hull.vertices.tolist()].tolist() + + , + mesh_size=msh_sz, + ) + + prob = np.random.random() + if prob>5: + + min_ = 0.005+0.01*np.random.random() + min_sz = 0.1#/(min_**0.1) + p = min_ + min_sz*np.random.random((500,500)) + geom.set_mesh_size_callback( + #lambda dim, tag, x, y, z: func(x, y, points,min_dist, thresh, min_sz) + lambda dim, tag, x, y, z: func1(x, y, p) + ) + + #geom.set_background_mesh([field0, field1], operator="Min") + + mesh = geom.generate_mesh() + + + + mymsh = MyMesh(mesh) + # points = mymsh.V + # tri = Delaunay(points) + # plt.triplot(points[:,0], points[:,1], tri.simplices) + # plt.plot(points[:,0], points[:,1], 'o') + + A,b = fem.gradgradform(mymsh, kappa=None, f=None, degree=1) + + return A, mymsh + + +#T.save(mesh, "mesh.pth") +#mesh = T.load("mesh.pth") + + + +def rand_grid_gen(mesh): + + A, mymsh = rand_Amesh_gen(mesh) + fine_nodes = [i for i in range(A.shape[0])] + + #set_of_edge = set_edge_from_msh(mymsh) + rand_grid = grid(A,fine_nodes,[],mymsh,0.56) + + return rand_grid + + + +def plot_cycle(list_grid, base_node_size, edge_width, scale): + + #shapes = ['o', '^', 's'] + node_color_list = ['k', 'k', 'orange', 'yellow' ] + for index in range(len(list_grid)): + + node_color = node_color_list[index] + + grid_ = copy.deepcopy(list_grid[index]) + G = nx.Graph() + mymsh = grid_.mesh + + points = mymsh.N + + if index == 0: + edges = mymsh.Edges + else: + edges = [] + + pos_dict = {} + for i in range(mymsh.nv): + pos_dict[i] = [mymsh.X[i], mymsh.Y[i]] + + G.add_nodes_from(points) + G.add_edges_from(edges) + colors = [i for i in range(mymsh.nv)] + + for i in grid_.fine_nodes: + colors[i] = node_color + for i in grid_.coarse_nodes: + colors[i] = node_color + for i in grid_.viol_nodes()[0]: + colors[i] = 'g' + + + + draw_networkx(G, pos=pos_dict, with_labels=False, node_size = base_node_size*(1*index+scale*index**2), \ + node_color = colors, edge_color = 'k', \ + node_shape = 'o', width = edge_width) + plt.axis('equal') + + + + +import gmsh +import torch +import meshio +#mesh = meshio.read('Test_Graphs/Hand_crafted/Geometry/Graph1.msh') + +class gmsh2MyMesh: + def __init__(self, mesh): + + + diff = set([i for i in range(mesh.points[:,0:2].shape[0])]) - \ + set(mesh.cells[-1].data.flatten().tolist()) + + mesh.points = np.delete(mesh.points, list(diff), axis=0) + arr_diff = np.array(list(diff)) + for i in range(len(mesh.cells[-1].data)): + + for j in range(3): + + shift = mesh.cells[-1].data[i,j]>arr_diff + shift = np.sum(shift) + mesh.cells[-1].data[i,j] = mesh.cells[-1].data[i,j] - shift + + self.X = mesh.points[:,0:1].flatten() + self.Y = mesh.points[:,1:2].flatten() + self.E = mesh.cells[-1].data + self.V = mesh.points[:,0:2] + self.nv = mesh.points[:,0:2].shape[0] + self.ne = len(mesh.cells[-1].data) + + e01 = self.E[:,[0,1]] + e02 = self.E[:,[0,2]] + e12 = self.E[:,[1,2]] + + e01 = tuple(map(tuple, e01)) + e02 = tuple(map(tuple, e02)) + e12 = tuple(map(tuple, e12)) + + e = list(set(e01).union(set(e02)).union(set(e12))) + self.N = [i for i in range(self.X.shape[0])] + self.Edges = e + self.num_edges = len(e) + + +def hand_grid(mesh): + + msh = gmsh2MyMesh(mesh) + + A,b = fem.gradgradform(msh, kappa=None, f=None, degree=1) + + fine_nodes = [i for i in range(A.shape[0])] + + #set_of_edge = set_edge_from_msh(mymsh) + rand_grid = grid(A,fine_nodes,[],msh,0.56) + + return rand_grid + + + + + + + + + + \ No newline at end of file diff --git a/fem.py b/fem.py new file mode 100644 index 0000000..766a322 --- /dev/null +++ b/fem.py @@ -0,0 +1,898 @@ +"""Poisson problem with finite elements +""" +import numpy as np +import scipy.sparse as sparse + + +def check_mesh(V, E): + """Check the ccw orientation of each simplex in the mesh + """ + E01 = np.vstack((V[E[:, 1], 0] - V[E[:, 0], 0], + V[E[:, 1], 1] - V[E[:, 0], 1], + np.zeros(E.shape[0]))).T + E12 = np.vstack((V[E[:, 2], 0] - V[E[:, 1], 0], + V[E[:, 2], 1] - V[E[:, 1], 1], + np.zeros(E.shape[0]))).T + orientation = np.all(np.cross(E01, E12)[:, 2] > 0) + + return orientation + + +def generate_quadratic(V, E, return_edges=False): + """Generate a quadratic element list by adding midpoints to each edge + + Parameters + ---------- + V : ndarray + nv x 2 list of coordinates + + E : ndarray + ne x 3 list of vertices + + return_edges : bool + indicate whether list of the refined edges is returned + + Returns + ------- + V2 : ndarray + nv2 x 2 list of coordinates + + E2 : ndarray + ne2 x 6 list of vertices + + Edges : ndarray + ned x 2 list of edges where the midpoint is generated + + Notes + ----- + - midpoints are introduced and globally numbered at the end of the vertex list + - the element list includes the new list beteen v0-v1, v1-v2, and v2-v0 + + Examples + -------- + >>> import numpy as np + >>> V = np.array([[0,0], [1,0], [0,1], [1,1]]) + >>> E = np.array([[0,1,2], [2,3,1]]) + >>> import fem + >>> V2, E2 = fem.generate_quadratic(V, E) + array([[0. , 0. ], + [1. , 0. ], + [0. , 1. ], + [1. , 1. ], + [0.5, 0. ], + [0.5, 0.5], + [0. , 0.5], + [0.5, 1. ], + [1. , 0.5]]) + array([[0, 1, 2, 4, 5, 6], + [2, 3, 1, 7, 8, 5]]) + """ + + if not isinstance(V, np.ndarray) or not isinstance(E, np.ndarray): + raise ValueError('V and E must be ndarray') + + if V.shape[1] != 2 or E.shape[1] != 3: + raise ValueError('V should be nv x 2 and E should be ne x 3') + + ne = E.shape[0] + + # make a vertext-to-vertex graph + ID = np.kron(np.arange(0, ne), np.ones((3,), dtype=int)) + G = sparse.coo_matrix((np.ones((ne*3,), dtype=int), (E.ravel(), ID,))) + V2V = G * G.T + + # from the vertex graph, get the edges and create new midpoints + V2Vmid = sparse.tril(V2V, -1) + Edges = np.vstack((V2Vmid.row, V2Vmid.col)).T + Vmid = (V[Edges[:, 0], :] + V[Edges[:, 1], :]) / 2 + V = np.vstack((V, Vmid)) + + # enumerate the new midpoints for the edges + # V2Vmid[i,j] will have the new number of the midpoint between i and j + maxindex = E.max() + 1 + newID = maxindex + np.arange(Edges.shape[0]) + V2Vmid.data = newID + V2Vmid = V2Vmid + V2Vmid.T + + # from the midpoints, extend E + E = np.hstack((E, np.zeros((E.shape[0], 3), dtype=int))) + E[:, 3] = V2Vmid[E[:, 0], E[:, 1]] + E[:, 4] = V2Vmid[E[:, 1], E[:, 2]] + E[:, 5] = V2Vmid[E[:, 2], E[:, 0]] + + if return_edges: + return V, E, Edges + + return V, E + + +def diameter(V, E): + """Compute the diameter of a mesh + + Parameters + ---------- + V : ndarray + nv x 2 list of coordinates + + E : ndarray + ne x 3 list of vertices + + Returns + ------- + h : float + maximum diameter of a circumcircle over all elements + longest edge + + Examples + -------- + >>> import numpy as np + >>> dx = 1 + >>> V = np.array([[0,0], [dx,0], [0,dx], [dx,dx]]) + >>> E = np.array([[0,1,2], [2,3,1]]) + >>> h = diameter(V, E) + >>> print(h) + 1.4142135623730951 + + """ + if not isinstance(V, np.ndarray) or not isinstance(E, np.ndarray): + raise ValueError('V and E must be ndarray') + + if V.shape[1] != 2 or E.shape[1] != 3: + raise ValueError('V should be nv x 2 and E should be ne x 3') + + h = 0 + I = [0, 1, 2, 0] + for e in E: + hs = np.sqrt(np.diff(V[e[I], 0])**2 + np.diff(V[e[I], 1])**2) + h = max(h, hs.max()) + return h + + +def refine2dtri(V, E, marked_elements=None): + r"""Refine a triangular mesh + + Parameters + ---------- + V : ndarray + nv x 2 list of coordinates + + E : ndarray + ne x 3 list of vertices + + marked_elements : array + list of marked elements for refinement. None means uniform. + + Returns + ------- + Vref : ndarray + nv x 2 list of coordinates + + Eref : ndarray + ne x 3 list of vertices + + Notes + ----- + - Peforms quad-section in the following where n0, n1, and n2 are + the original vertices + + n2 + / | + / | + / | + n5-------n4 + / \ /| + / \ / | + / \ / | + n0 --------n3-- n1 + """ + Nel = E.shape[0] + Nv = V.shape[0] + + if marked_elements is None: + marked_elements = np.arange(0, Nel) + + marked_elements = np.ravel(marked_elements) + + # construct vertex to vertex graph + col = E.ravel() + row = np.kron(np.arange(0, Nel), [1, 1, 1]) + data = np.ones((Nel*3,)) + V2V = sparse.coo_matrix((data, (row, col)), shape=(Nel, Nv)) + V2V = V2V.T * V2V + + # compute interior edges list + V2V.data = np.ones(V2V.data.shape) + V2Vupper = sparse.triu(V2V, 1).tocoo() + + # construct EdgeList from V2V + Nedges = len(V2Vupper.data) + V2Vupper.data = np.arange(0, Nedges) + EdgeList = np.vstack((V2Vupper.row, V2Vupper.col)).T + Nedges = EdgeList.shape[0] + + # elements to edge list + V2Vupper = V2Vupper.tocsr() + edges = np.vstack((E[:, [0, 1]], + E[:, [1, 2]], + E[:, [2, 0]])) + edges.sort(axis=1) + ElementToEdge = V2Vupper[edges[:, 0], edges[:, 1]].reshape((3, Nel)).T + + marked_edges = np.zeros((Nedges,), dtype=bool) + marked_edges[ElementToEdge[marked_elements, :].ravel()] = True + + # mark 3-2-1 triangles + nsplit = len(np.where(marked_edges == 1)[0]) + edge_num = marked_edges[ElementToEdge].sum(axis=1) + edges3 = np.where(edge_num >= 2)[0] + marked_edges[ElementToEdge[edges3, :]] = True # marked 3rd edge + nsplit = len(np.where(marked_edges == 1)[0]) + + edges1 = np.where(edge_num == 1)[0] + # edges1 = edge_num[id] # all 2 or 3 edge elements + + # new nodes (only edges3 elements) + + x_new = 0.5*(V[EdgeList[marked_edges, 0], 0]) \ + + 0.5*(V[EdgeList[marked_edges, 1], 0]) + y_new = 0.5*(V[EdgeList[marked_edges, 0], 1]) \ + + 0.5*(V[EdgeList[marked_edges, 1], 1]) + + V_new = np.vstack((x_new, y_new)).T + V = np.vstack((V, V_new)) + # indices of the new nodes + new_id = np.zeros((Nedges,), dtype=int) + # print(len(np.where(marked_edges == 1)[0])) + # print(nsplit) + new_id[marked_edges] = Nv + np.arange(0, nsplit) + # New tri's in the case of refining 3 edges + # example, 1 element + # n2 + # / | + # / | + # / | + # n5-------n4 + # / \ /| + # / \ / | + # / \ / | + # n0 --------n3-- n1 + ids = np.ones((Nel,), dtype=bool) + ids[edges3] = False + ids[edges1] = False + + E_new = np.delete(E, marked_elements, axis=0) # E[id2, :] + n0 = E[edges3, 0] + n1 = E[edges3, 1] + n2 = E[edges3, 2] + n3 = new_id[ElementToEdge[edges3, 0]].ravel() + n4 = new_id[ElementToEdge[edges3, 1]].ravel() + n5 = new_id[ElementToEdge[edges3, 2]].ravel() + + t1 = np.vstack((n0, n3, n5)).T + t2 = np.vstack((n3, n1, n4)).T + t3 = np.vstack((n4, n2, n5)).T + t4 = np.vstack((n3, n4, n5)).T + + E_new = np.vstack((E_new, t1, t2, t3, t4)) + return V, E_new + + +def l2norm(u, mesh): + """Calculate the L2 norm of a funciton on mesh (V,E) + + Parameters + ---------- + u : array + (nv,) list of function values + + mesh : object + mesh object + + Returns + ------- + val : float + the value of the L2 norm of u, ||u||_2,V + + Notes + ----- + - modepy is used to generate the quadrature points + q = modepy.XiaoGimbutasSimplexQuadrature(4,2) + + Examples + -------- + >>> import numpy as np + >>> V = np.array([[0,0], [1,0], [0,1], [1,1]]) + >>> E = np.array([[0,1,2], [2,3,1]]) + >>> X, Y = V[:, 0], V[:, 1] + >>> import fem + >>> I = fem.l2norm(X+Y, V, E, degree=1) + >>> print(I) + >>> V2, E2 = fem.generate_quadratic(V, E) + >>> X, Y = V2[:, 0], V2[:, 1] + >>> I = fem.l2norm(X+Y, V2, E2, degree=2) + >>> print(I) + >>> # actual (from sympy): 1.08012344973464 + """ + if mesh.degree == 1: + V = mesh.V + E = mesh.E + + if mesh.degree == 2: + V = mesh.V2 + E = mesh.E2 + + if not isinstance(u, np.ndarray): + raise ValueError('u must be ndarray') + + if V.shape[1] != 2: + raise ValueError('V should be nv x 2') + + if mesh.degree == 1 and E.shape[1] != 3: + raise ValueError('E should be nv x 3') + + if mesh.degree == 2 and E.shape[1] != 6: + raise ValueError('E should be nv x 6') + + if mesh.degree not in [1, 2]: + raise ValueError('degree = 1 or 2 supported') + + val = 0 + + # quadrature points + ww = np.array([0.44676317935602256, 0.44676317935602256, 0.44676317935602256, + 0.21990348731064327, 0.21990348731064327, 0.21990348731064327]) + xy = np.array([[-0.10810301816807008, -0.78379396366385990], + [-0.10810301816806966, -0.10810301816807061], + [-0.78379396366386020, -0.10810301816806944], + [-0.81684757298045740, -0.81684757298045920], + [0.63369514596091700, -0.81684757298045810], + [-0.81684757298045870, 0.63369514596091750]]) + xx, yy = (xy[:, 0]+1)/2, (xy[:, 1]+1)/2 + ww *= 0.5 + + if mesh.degree == 1: + I = np.arange(3) + + def basis(x, y): + return np.array([1-x-y, + x, + y]) + + if mesh.degree == 2: + I = np.arange(6) + + def basis(x, y): + return np.array([(1-x-y)*(1-2*x-2*y), + x*(2*x-1), + y*(2*y-1), + 4*x*(1-x-y), + 4*x*y, + 4*y*(1-x-y)]) + + for e in E: + x = V[e, 0] + y = V[e, 1] + + # Jacobian + jac = np.abs((x[1]-x[0])*(y[2]-y[0]) - (x[2]-x[0])*(y[1]-y[0])) + + # add up each quadrature point + for wv, xv, yv in zip(ww, xx, yy): + val += (jac / 2) * wv * np.dot(u[e[I]], basis(xv, yv))**2 + + # take the square root for the norm + return np.sqrt(val) + + +class mesh: + """Simple mesh object that holds vertices and mesh functions + """ + + def __init__(self, V, E, degree=1): + + # check to see if E is numbered 0 ... nv + ids = np.full((E.max()+1,), False) + ids[E.ravel()] = True + nv = np.sum(ids) + if V.shape[0] != nv: + print('fixing V and E') + I = np.where(ids)[0] + J = np.arange(E.max()+1) + J[I] = np.arange(nv) + E = J[E] + V = V[I, :] + + if not check_mesh(V, E): + raise ValueError('triangles must be counter clockwise') + + self.V = V + self.E = E + self.X = V[:, 0] + self.Y = V[:, 1] + self.degree = degree + + self.nv = nv + self.ne = E.shape[0] + + self.h = diameter(V, E) + + self.V2 = None + self.E2 = None + self.Edges = None + self.newID = None + + if degree == 2: + self.generate_quadratic() + + def generate_quadratic(self): + """generate a quadratic mesh + """ + if self.V2 is None: + self.V2, self.E2, self.Edges = generate_quadratic(self.V, self.E, return_edges=True) + self.X2 = self.V2[:, 0] + self.Y2 = self.V2[:, 1] + self.newID = self.nv + np.arange(self.Edges.shape[0]) + + def refine(self, levels): + """refine the mesh + """ + self.V2 = None + self.E2 = None + self.Edges = None + self.newID = None + for l in range(levels): + self.V, self.E = refine2dtri(self.V, self.E) + self.nv = self.V.shape[0] + self.ne = self.E.shape[0] + self.h = diameter(self.V, self.E) + self.X = self.V[:, 0] + self.Y = self.V[:, 1] + + if self.degree == 2: + self.generate_quadratic() + + def smooth(self, maxit=10, tol=0.01): + edge0 = self.E[:, [0, 0, 1, 1, 2, 2]].ravel() + edge1 = self.E[:, [1, 2, 0, 2, 0, 1]].ravel() + + nedges = edge0.shape[0] + data = np.ones((nedges,), dtype=int) + + S = sparse.coo_matrix((data, (edge0, edge1)), shape=(self.V.shape[0], self.V.shape[0])).tocsr().tocoo() + S0 = S.copy() + S.data = 0 * S.data + 1 + + W = S.sum(axis=1).ravel() + + L = (self.V[edge0, 0] - self.V[edge1, 0])**2 +\ + (self.V[edge0, 1] - self.V[edge1, 1])**2 + + L_to_low = np.where(L < 1e-14)[0] + L[L_to_low] = 1e-14 + + maxit = 10 + # find the boundary nodes for this mesh (does not support a one-element + # whole) + bid = np.where(S0.data == 1)[0] + bid = np.unique(S0.row[bid]) + + for it in range(0, maxit): + x_new = np.array(S * self.V[:, 0] / W).ravel() + y_new = np.array(S * self.V[:, 1] / W).ravel() + x_new[bid] = self.V[bid, 0] + y_new[bid] = self.V[bid, 1] + self.V[:, 0] = x_new + self.V[:, 1] = y_new + L_new = (self.V[edge0, 0] - self.V[edge1, 0])**2 +\ + (self.V[edge0, 1] - self.V[edge1, 1])**2 + L_to_low = np.where(L < 1e-14)[0] + L_new[L_to_low] = 1e-14 + + move = max(abs((L_new - L) / L_new)) # inf norm + + if move < tol: + print(it) + return + L = L_new + print(move) + + +def gradgradform(mesh, kappa=None, f=None, degree=1): + """Finite element discretization of a Poisson problem. + + - div . kappa(x,y) grad u = f(x,y) + + Parameters + ---------- + V : ndarray + nv x 2 list of coordinates + + E : ndarray + ne x 3 or 6 list of vertices + + kappa : function + diffusion coefficient, kappa(x,y) with vector input + + fa : function + right hand side, f(x,y) with vector input + + degree : 1 or 2 + polynomial degree of the bases (assumed to be Lagrange locally) + + Returns + ------- + A : sparse matrix + finite element matrix where A_ij = + + b : array + finite element rhs where b_ij = + + Notes + ----- + - modepy is used to generate the quadrature points + q = modepy.XiaoGimbutasSimplexQuadrature(4,2) + + Example + ------- + >>> import numpy as np + >>> import fem + >>> import scipy.sparse.linalg as sla + >>> V = np.array( + [[ 0, 0], + [ 1, 0], + [2*1, 0], + [ 0, 1], + [ 1, 1], + [2*1, 1], + [ 0,2*1], + [ 1,2*1], + [2*1,2*1], + ]) + >>> E = np.array( + [[0,1,3], + [1,2,4], + [1,4,3], + [2,5,4], + [3,4,6], + [4,5,7], + [4,7,6], + [5,8,7]]) + >>> A, b = fem.poissonfem(V, E) + >>> print(A.toarray()) + >>> print(b) + >>> f = lambda x, y : 0*x + 1.0 + >>> g = lambda x, y : 0*x + 0.0 + >>> g1 = lambda x, y : 0*x + 1.0 + >>> tol = 1e-12 + >>> X, Y = V[:,0], V[:,1] + >>> id1 = np.where(abs(Y) < tol)[0] + >>> id2 = np.where(abs(Y-2) < tol)[0] + >>> id3 = np.where(abs(X) < tol)[0] + >>> id4 = np.where(abs(X-2) < tol)[0] + >>> bc = [{'id': id1, 'g': g}, + {'id': id2, 'g': g}, + {'id': id3, 'g': g1}, + {'id': id4, 'g': g}] + >>> A, b = fem.poissonfem(V, E, f=f, bc=bc) + >>> u = sla.spsolve(A, b) + >>> print(A.toarray()) + >>> print(b) + >>> print(u) + """ + if degree not in [1, 2]: + raise ValueError('degree = 1 or 2 supported') + + if f is None: + def f(x, y): + return 0.0 + + if kappa is None: + def kappa(x, y): + return 1.0 + + if not callable(f) or not callable(kappa): + raise ValueError('f, kappa must be callable functions') + + ne = mesh.ne + + if degree == 1: + E = mesh.E + V = mesh.V + X = mesh.X + Y = mesh.Y + + if degree == 2: + E = mesh.E2 + V = mesh.V2 + X = mesh.X2 + Y = mesh.Y2 + + # allocate sparse matrix arrays + m = 3 if degree == 1 else 6 + AA = np.zeros((ne, m**2)) + IA = np.zeros((ne, m**2), dtype=int) + JA = np.zeros((ne, m**2), dtype=int) + bb = np.zeros((ne, m)) + ib = np.zeros((ne, m), dtype=int) + jb = np.zeros((ne, m), dtype=int) + + # Assemble A and b + for ei in range(0, ne): + # Step 1: set the vertices and indices + K = E[ei, :] + x0, y0 = X[K[0]], Y[K[0]] + x1, y1 = X[K[1]], Y[K[1]] + x2, y2 = X[K[2]], Y[K[2]] + + # Step 2: compute the Jacobian, inv, and det + J = np.array([[x1 - x0, x2 - x0], + [y1 - y0, y2 - y0]]) + invJ = np.linalg.inv(J.T) + detJ = np.linalg.det(J) + + if degree == 1: + # Step 3, define the gradient of the basis + dbasis = np.array([[-1, 1, 0], + [-1, 0, 1]]) + + # Step 4 + dphi = invJ.dot(dbasis) + + # Step 5, 1-point gauss quadrature + Aelem = kappa(X[K].mean(), Y[K].mean()) * (detJ / 2.0) * (dphi.T).dot(dphi) + + # Step 6, 1-point gauss quadrature + belem = f(X[K].mean(), Y[K].mean()) * (detJ / 6.0) * np.ones((3,)) + + if degree == 2: + ww = np.array([0.44676317935602256, 0.44676317935602256, 0.44676317935602256, + 0.21990348731064327, 0.21990348731064327, 0.21990348731064327]) + xy = np.array([[-0.10810301816807008, -0.78379396366385990], + [-0.10810301816806966, -0.10810301816807061], + [-0.78379396366386020, -0.10810301816806944], + [-0.81684757298045740, -0.81684757298045920], + [0.63369514596091700, -0.81684757298045810], + [-0.81684757298045870, 0.63369514596091750]]) + xx, yy = (xy[:, 0]+1)/2, (xy[:, 1]+1)/2 + ww *= 0.5 + + Aelem = np.zeros((m, m)) + belem = np.zeros((m,)) + + for w, x, y in zip(ww, xx, yy): + # Step 3 + basis = np.array([(1-x-y)*(1-2*x-2*y), + x*(2*x-1), + y*(2*y-1), + 4*x*(1-x-y), + 4*x*y, + 4*y*(1-x-y)]) + + dbasis = np.array([ + [4*x + 4*y - 3, 4*x-1, 0, -8*x - 4*y + 4, 4*y, -4*y], + [4*x + 4*y - 3, 0, 4*y-1, -4*x, 4*x, -4*x - 8*y + 4] + ]) + + # Step 4 + dphi = invJ.dot(dbasis) + + # Step 5 + xt, yt = J.dot(np.array([x, y])) + np.array([x0, y0]) + Aelem += (detJ / 2) * w * kappa(xt, yt) * dphi.T.dot(dphi) + + # Step 6 + belem += (detJ / 2) * w * f(xt, yt) * basis + + # Step 7 + AA[ei, :] = Aelem.ravel() + IA[ei, :] = np.repeat(K[np.arange(m)], m) + JA[ei, :] = np.tile(K[np.arange(m)], m) + bb[ei, :] = belem.ravel() + ib[ei, :] = K[np.arange(m)] + jb[ei, :] = 0 + + # convert matrices + A = sparse.coo_matrix((AA.ravel(), (IA.ravel(), JA.ravel()))) + A.sum_duplicates() + b = sparse.coo_matrix((bb.ravel(), (ib.ravel(), jb.ravel()))).toarray().ravel() + + # A = A.tocsr() + return A, b + + +def divform(mesh): + """Calculate the (div u , p) form that arises in Stokes + assumes P2-P1 elements + """ + if mesh.V2 is None: + mesh.generate_quadratic() + + X, Y = mesh.X, mesh.Y + ne = mesh.ne + E = mesh.E2 + V = mesh.V2 + + m1 = 6 + m2 = 3 + DX = np.zeros((ne, m1*m2)) + DXI = np.zeros((ne, m1*m2), dtype=int) + DXJ = np.zeros((ne, m1*m2), dtype=int) + DY = np.zeros((ne, m1*m2)) + DYI = np.zeros((ne, m1*m2), dtype=int) + DYJ = np.zeros((ne, m1*m2), dtype=int) + + # Assemble A and b + for ei in range(0, ne): + K = E[ei, :] + x0, y0 = X[K[0]], Y[K[0]] + x1, y1 = X[K[1]], Y[K[1]] + x2, y2 = X[K[2]], Y[K[2]] + + J = np.array([[x1 - x0, x2 - x0], + [y1 - y0, y2 - y0]]) + invJ = np.linalg.inv(J.T) + detJ = np.linalg.det(J) + + ww = np.array([0.44676317935602256, 0.44676317935602256, 0.44676317935602256, + 0.21990348731064327, 0.21990348731064327, 0.21990348731064327]) + xy = np.array([[-0.10810301816807008, -0.78379396366385990], + [-0.10810301816806966, -0.10810301816807061], + [-0.78379396366386020, -0.10810301816806944], + [-0.81684757298045740, -0.81684757298045920], + [ 0.63369514596091700, -0.81684757298045810], + [-0.81684757298045870, 0.63369514596091750]]) + xx, yy = (xy[:, 0]+1)/2, (xy[:, 1]+1)/2 + ww *= 0.5 + + DXelem = np.zeros((3, 6)) + DYelem = np.zeros((3, 6)) + + for w, x, y in zip(ww, xx, yy): + basis1 = np.array([1-x-y, x, y]) + + basis2 = np.array([(1-x-y)*(1-2*x-2*y), + x*(2*x-1), + y*(2*y-1), + 4*x*(1-x-y), + 4*x*y, + 4*y*(1-x-y)]) + + dbasis = np.array([ + [4*x + 4*y - 3, 4*x-1, 0, -8*x - 4*y + 4, 4*y, -4*y], + [4*x + 4*y - 3, 0, 4*y-1, -4*x, 4*x, -4*x - 8*y + 4] + ]) + + dphi = invJ.dot(dbasis) + + DXelem += (detJ / 2) * w * (np.outer(basis1, dphi[0,:])) + DYelem += (detJ / 2) * w * (np.outer(basis1, dphi[1,:])) + dphi.T.dot(dphi) + + # Step 7 + DX[ei, :] = DXelem.ravel() + DXI[ei, :] = np.repeat(K[np.arange(m2)], m1) + DXJ[ei, :] = np.tile(K[np.arange(m1)], m2) + BX = sparse.coo_matrix((DX.ravel(), (DXI.ravel(), DXJ.ravel()))) + BX.sum_duplicates() + + DY[ei, :] = DYelem.ravel() + DYI[ei, :] = np.repeat(K[np.arange(m2)], m1) + DYJ[ei, :] = np.tile(K[np.arange(m1)], m2) + BY = sparse.coo_matrix((DY.ravel(), (DYI.ravel(), DYJ.ravel()))) + BY.sum_duplicates() + return BX, BY + + +def applybc(A, b, mesh, bc): + """ + bc : list + list of boundary conditions + bc = [bc1, bc2, ..., bck] + where bck = {'id': id, a list of vertices for boundary "k" + 'g': g, g = g(x,y) is a function for the vertices on boundary "k" + 'var': var the variable, given as a start in the dof list + 'degree': degree degree of the variable, either 1 or 2 + } + """ + + for c in bc: + if not callable(c['g']): + raise ValueError('each bc g must be callable functions') + + if 'degree' not in c.keys(): + c['degree'] = 1 + + if 'var' not in c.keys(): + c['var'] = 0 + + # now extend the BC + # for each new id, are the orignal neighboring ids in a bc? + for c in bc: + if c['degree'] == 2: + idx = c['id'] + newidx = [] + for j, ed in zip(mesh.newID, mesh.Edges): + if ed[0] in idx and ed[1] in idx: + newidx.append(j) + c['id'] = np.hstack((idx, newidx)) + + # set BC in the right hand side + # set the lifting function (1 of 3) + u0 = np.zeros((A.shape[0],)) + for c in bc: + idx = c['var'] + c['id'] + if c['degree'] == 1: + X = mesh.X + Y = mesh.Y + elif c['degree'] == 2: + X = mesh.X2 + Y = mesh.Y2 + u0[idx] = c['g'](X[idx], Y[idx]) + + # lift (2 of 3) + b = b - A * u0 + + # fix the values (3 of 3) + for c in bc: + idx = c['var'] + c['id'] + b[idx] = u0[idx] + + # set BC to identity in the matrix + # collect all BC indices (1 of 2) + Dflag = np.full((A.shape[0],), False) + for c in bc: + idx = c['var'] + c['id'] + Dflag[idx] = True + # write identity (2 of 2) + for k in range(0, len(A.data)): + i = A.row[k] + j = A.col[k] + if Dflag[i] or Dflag[j]: + if i == j: + A.data[k] = 1.0 + else: + A.data[k] = 0.0 + + return A, b + + +def stokes(mesh, fu, fv): + """Stokes Flow + """ + mesh.generate_quadratic() + Au, bu = gradgradform(mesh, f=fu, degree=2) + Av, bv = gradgradform(mesh, f=fv, degree=2) + BX, BY = divform(mesh) + + C = sparse.bmat([[Au, None, BX.T], + [None, Av, BY.T], + [BX, BY, None]]) + b = np.hstack((bu, bv, np.zeros((BX.shape[0],)))) + + return C, b + + +def model(num=0): + """A list of model (elliptic) problems + + Parameters + ---------- + num : int or string + A tag for a particular problem. See the notes below. + + Return + ------ + A + b + V + E + f + kappa + bc + + See Also + -------- + poissonfem - build the FE matrix and right hand side + Notes + ----- + """ diff --git a/main.py b/main.py new file mode 100644 index 0000000..9b82f3a --- /dev/null +++ b/main.py @@ -0,0 +1,311 @@ +import numpy as np +from MG_Agent import Agent +#from utils import plot_learning_curves +import Unstructured as uns +import time +import matplotlib.pyplot as plt +import random +from scipy.spatial import ConvexHull, convex_hull_plot_2d +import pygmsh +from Unstructured import MyMesh, grid, rand_Amesh_gen, rand_grid_gen, structured +import fem +from torch.utils.tensorboard import SummaryWriter +import sys +import torch as T +from Scott_greedy import greedy_coarsening +import copy + +writer= SummaryWriter("runs") + +brk = False + +if __name__ == '__main__': + + min_sofar = 100 + done = False + #grid_ = rand_grid_gen(mesh) + + #num_nodes = grid_.num_nodes + #grid_.plot() + load_ckeckpoint = False + K = 4 + learning_per_step = 5 + num_steps = 1000 + learn_every = 4 + count = 0 + dim_list = [32] + lr_list = [0.001] + loss_list = [] + iterat = 0 + + #const_grid = rand_grid_gen(None) + #T.save(const_grid, 'test_FC_grid1') + #nn = const_grid.num_nodes + + #loss = [] + + #writer.add_graph(agent.q_eval, input_to_model=rand_grid_gen(mesh).data) + for lr in lr_list: + for dim in dim_list: + + agent = Agent(dim = 32, K = K, gamma = 1, epsilon = 1, \ + lr= lr, mem_size = 5000, batch_size = 32, net_type = 'TAGConv',\ + eps_min = 0.01 , eps_dec = 1.25/num_steps, replace=10) + loss_list = [] + + + if load_ckeckpoint: + + agent.load_models() + + + for i in range(num_steps): + + if i == 2000: + agent.lr = lr/2 + + if i == 5000: + agent.lr = lr/10 + + if i == 7500: + agent.lr = lr/20 + + + + done = False + g_idx = np.random.randint(0,50) + + grid_ = copy.deepcopy(T.load('Train_Graphs/Graph'+str(g_idx))) + agent.decrement_epsilon() + while not done: + + observation = grid_.data + list_viols = grid_.viol_nodes()[0] + + action = agent.choose_action(observation, list_viols) + grid_.coarsen_node(action) + num_c_nodes = len(grid_.coarse_nodes) + next_list_viols = grid_.viol_nodes()[0] + next_observation = grid_.data + #reward = -100/grid_.num_nodes + reward = -1#200*num_c_nodes/(grid_.num_nodes**2) + done = True if grid_.viol_nodes()[2] == 0 else False + agent.store_transition(observation, list_viols,\ + None, action, reward,\ + next_observation, next_list_viols,\ + None, grid_.num_nodes, 1-int(done)) + + if count % learn_every == 0: + + for __ in range(learning_per_step): + agent.learn() + loss = agent.loss.item() + loss = loss*agent.memory.mem_size/len(agent.memory.replay_buffer) + loss_list.append(loss) + writer.add_scalar("training loss", loss, iterat) + #writer.add_histogram('q_eval GNN conv', agent.q_eval.nn1[0].weight) + #loss.append(agent.loss) + #print (count) + # if agent.epsilon<0.05 and np.mean(loss_list[-250:])<1: + # brk = True + # done = True + iterat += 1 + + count += 1 + # if agent.epsilon<0.05 and brk == True: + # print ("Minimum loss reached !!!") + # break + + if i % 10 == 0: + print ("Epsilon is = ", agent.epsilon) + print (i) + + if np.mean(loss_list[-100:]) < min_sofar: + + min_sofar = np.mean(loss_list[-100:]) + #T.save(agent.q_eval.state_dict(), "best_agent.pth") + if i % 100 == 0: + T.save(agent.q_eval.state_dict(), "Models/MPNN/Dueling_MPNN"+str(i)+".pth") + + # writer.add_hparams({'LR': lr, 'dim': dim}\ + # , {'Loss': np.mean(loss_list[-int(len(loss_list)/4):])}) + + + # loss_list = loss[np.nonzero(loss)[0]] + # count = [i+1 for i in range(len(loss_list))] + # plt.plot(count, loss_list) + # plt.xlabel('learning iteration') + # plt.ylabel('MSE loss') + # plt.yscale('log') + # plt.show() + + +def test(net_type, dim, costum_grid): + + K= 4 + agent = Agent(dim = dim, K = K, gamma = 1, epsilon = 1, \ + lr= 0.001, mem_size = 5000, batch_size = 32, net_type = net_type,\ + eps_min = 0.01 , eps_dec = 1.333/5000, replace=10) + #agent.q_eval.load_state_dict(T.load('random_trained_agent.pth')) + #agent.q_eval.load_state_dict(T.load('Dueling_1_agent.pth')) + + if net_type == 'TAGConv': + agent.q_eval.load_state_dict(T.load('Models/Dueling_batch_train_final.pth')) + #agent.q_eval.load_state_dict(T.load('Models/MPNN/Dueling_MPNN900.pth')) + #agent.q_eval.load_state_dict(T.load('Models/MPNN/Dueling_MPNN9900.pth')) + #agent.q_eval.load_state_dict(T.load('Models/New_TAG/Dueling_TAG9900.pth')) + + if net_type == 'MPNN': + + agent.q_eval.load_state_dict(T.load('Models/MPNN/Dueling_MPNN1800.pth')) + + if net_type == 'FC': + agent.q_eval.load_state_dict(T.load('FC_agent_1.pth')) + + agent.epsilon = 0 + + Q_list = [] + Ahat_list = [] + A_list = [] + done = False + + + if costum_grid!=None: + grid_ = copy.deepcopy(costum_grid) + grid_gr = copy.deepcopy(grid_) + + else: + + grid_ = rand_grid_gen(None) + grid_gr = copy.deepcopy(grid_) + + while not done: + + observation = grid_.data + #action = agent.choose_action(observation, grid_.viol_nodes()[0]) + with T.no_grad(): + + Q, advantage = agent.q_eval.forward(observation) + + A_list.append(advantage) + Q_list.append(Q) + Ahat_list.append(advantage-advantage.max()) + viol_nodes = grid_.viol_nodes()[0] + action = viol_nodes[T.argmax(Q[viol_nodes]).item()] + + # print ("VIOLS", grid_.viol_nodes()[0]) + # print (agent.q_eval.forward(grid_.data)) + grid_.coarsen_node(action) + done = True if grid_.viol_nodes()[2] == 0 else False + + print ("RL result", sum(grid_.active)/grid_.num_nodes) + #grid_.plot() + + grid_gr = greedy_coarsening(grid_gr) + + return grid_, grid_gr, Q_list, A_list, Ahat_list + +#gr, rl = test(0.1) + +def node_hop_neigh(K, node, list_neighbours): + + set_all = set([]) + + set_all = set_all.union(set([node])) + prev_set = copy.deepcopy(set_all) + this_hop = [node] + + for i in range(K): + + for node in this_hop: + + set_all = set_all.union(set(list_neighbours[node])) + + this_hop = list(set_all.difference(prev_set)) + prev_set = copy.deepcopy(set_all) + + return list(set_all) + +def regional_update_test (given_grid, net_type, Test_greedy = True): + + if given_grid == None: + grid_ = rand_grid_gen(None) + + else: + grid_ = copy.deepcopy(given_grid) + + if Test_greedy: + + grid_gr = copy.deepcopy(grid_) + + K=4 + + agent = Agent(dim = 32, K = K, gamma = 1, epsilon = 1, \ + lr= 0.001, mem_size = 5000, batch_size = 64, net_type = net_type,\ + eps_min = 0.01 , eps_dec = 1.333/5000, replace=10) + + K = 4 + + + if net_type == 'TAGConv': + #agent.q_eval.load_state_dict(T.load('Models/Dueling_batch_train_final.pth')) + #agent.q_eval.load_state_dict(T.load('Dueling_1_agent.pth')) + agent.q_eval.load_state_dict(T.load('Models/MPNN/Dueling_MPNN900.pth')) + #agent.q_eval.load_state_dict(T.load('Models/New_TAG/Dueling_TAG9900.pth')) + + if net_type == 'MPNN': + + agent.q_eval.load_state_dict(T.load('Models/MPNN/Dueling_MPNN1800.pth')) + + agent.epsilon = 0 + + done = False + + T_start = time.time() + + observation = grid_.data + with T.no_grad(): + Q, advantage = agent.q_eval.forward(observation) + + + adv_tensor = copy.deepcopy(advantage) #get all of the advantage values + + list_neighbours = grid_.list_neighbours + ##get k-hop neighbours of every violating node + + while not done: + + node_max = grid_.violating_nodes[T.argmax(adv_tensor[grid_.violating_nodes])] + grid_.coarsen_node(node_max) + + k_hop = node_hop_neigh(K, node_max, list_neighbours) + k2_hop = node_hop_neigh(2*K, node_max, list_neighbours) + + observation = grid_.subgrid(k2_hop) + + #action = agent.choose_action(observation, grid_.viol_nodes()[0]) + with T.no_grad(): + + _, advantage = agent.q_eval.forward(observation) + + update_list = [k2_hop.index(aa) for aa in k_hop] + + adv_tensor[k_hop] = advantage[update_list] + + done = True if len(grid_.violating_nodes) == 0 else False + + T_end = time.time() + computation_time = T_end-T_start + + rl_ffrac = sum(grid_.active)/grid_.num_nodes + print ("RL result", sum(grid_.active)/grid_.num_nodes) + print ("number of nodes = ", grid_.num_nodes) + + if Test_greedy: + grid_gr = greedy_coarsening(grid_gr) + gr_ffrac = sum(grid_gr.active)/grid_gr.num_nodes + + if Test_greedy: + return grid_, rl_ffrac, grid_gr, gr_ffrac, computation_time + else: + return grid_, rl_ffrac, computation_time