123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224 |
- from layers import *
- from metrics import *
- from inits import *
- flags = tf.app.flags
- FLAGS = flags.FLAGS
- class Model(object):
- def __init__(self, **kwargs):
- allowed_kwargs = {'name', 'logging'}
- for kwarg in kwargs.keys():
- assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
- name = kwargs.get('name')
- if not name:
- name = self.__class__.__name__.lower()
- self.name = name
- logging = kwargs.get('logging', False)
- self.logging = logging
- self.vars = {}
- self.placeholders = {}
- self.layers = []
- self.activations = []
- self.inputs = None
- self.outputs = None
- self.loss = 0
- self.accuracy = 0
- self.optimizer = None
- self.opt_op = None
- def _build(self):
- raise NotImplementedError
- def build(self):
- """ Wrapper for _build() """
- with tf.variable_scope(self.name):
- self._build()
- # Build sequential layer model
- self.activations.append(self.inputs)
- for layer in self.layers:
- hidden = layer(self.activations[-1])
- self.activations.append(hidden)
- self.outputs = self.activations[-1]
- # Store model variables for easy access
- variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)
- self.vars = {var.name: var for var in variables}
- # Build metrics
- self._loss()
- self._accuracy()
- self.opt_op = self.optimizer.minimize(self.loss)
- def predict(self):
- pass
- def _loss(self):
- raise NotImplementedError
- def _accuracy(self):
- raise NotImplementedError
- def save(self, sess=None):
- if not sess:
- raise AttributeError("TensorFlow session not provided.")
- saver = tf.train.Saver(self.vars)
- save_path = saver.save(sess, "tmp/%s.ckpt" % self.name)
- print("Model saved in file: %s" % save_path)
- def load(self, sess=None):
- if not sess:
- raise AttributeError("TensorFlow session not provided.")
- saver = tf.train.Saver(self.vars)
- save_path = "tmp/%s.ckpt" % self.name
- saver.restore(sess, save_path)
- print("Model restored from file: %s" % save_path)
- class MLP(Model):
- def __init__(self, placeholders, input_dim, **kwargs):
- super(MLP, self).__init__(**kwargs)
- self.inputs = placeholders['features']
- self.input_dim = input_dim
- # self.input_dim = self.inputs.get_shape().as_list()[1] # To be supported in future Tensorflow versions
- self.output_dim = placeholders['labels'].get_shape().as_list()[1]
- self.placeholders = placeholders
- self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
- self.build()
- def _loss(self):
- # Weight decay loss
- for var in self.layers[0].vars.values():
- self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
- # Cross entropy error
- self.loss += masked_softmax_cross_entropy(self.outputs, self.placeholders['labels'],
- self.placeholders['labels_mask'])
- def _accuracy(self):
- self.accuracy = masked_accuracy(self.outputs, self.placeholders['labels'],
- self.placeholders['labels_mask'])
- def _build(self):
- self.layers.append(Dense(input_dim=self.input_dim,
- output_dim=FLAGS.hidden1,
- placeholders=self.placeholders,
- act=tf.nn.relu,
- dropout=True,
- sparse_inputs=True,
- logging=self.logging))
- self.layers.append(Dense(input_dim=FLAGS.hidden1,
- output_dim=self.output_dim,
- placeholders=self.placeholders,
- act=lambda x: x,
- dropout=True,
- logging=self.logging))
- def predict(self):
- return tf.nn.softmax(self.outputs)
- class GCN(Model):
- def __init__(self, placeholders, input_dim, **kwargs):
- super(GCN, self).__init__(**kwargs)
- self.inputs = placeholders['features']
- self.input_dim = input_dim
- # self.input_dim = self.inputs.get_shape().as_list()[1] # To be supported in future Tensorflow versions
- self.output_dim = placeholders['labels'].get_shape().as_list()[1]
- self.placeholders = placeholders
- self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
- self.build()
- def _loss(self):
- # Weight decay loss
- for var in self.layers[0].vars.values():
- self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
- # Cross entropy error
- self.loss += masked_softmax_cross_entropy(self.outputs, self.placeholders['labels'],
- self.placeholders['labels_mask'])
- def _accuracy(self):
- self.accuracy = masked_accuracy(self.outputs, self.placeholders['labels'],
- self.placeholders['labels_mask'])
- def _build(self):
- self.layers.append(GraphConvolution(input_dim=self.input_dim,
- output_dim=FLAGS.hidden1,
- placeholders=self.placeholders,
- act=tf.nn.relu,
- dropout=True,
- sparse_inputs=True,
- logging=self.logging))
- self.layers.append(GraphConvolution(input_dim=FLAGS.hidden1,
- output_dim=self.output_dim,
- placeholders=self.placeholders,
- act=lambda x: x,
- dropout=True,
- logging=self.logging))
- def predict(self):
- return tf.nn.softmax(self.outputs)
- class GCN_Align(Model):
- def __init__(self, placeholders, input_dim, output_dim, ILL,
- sparse_inputs=False, featureless=True, AE=True, **kwargs):
- super(GCN_Align, self).__init__(**kwargs)
- self.inputs = placeholders['features']
- self.input_dim = input_dim
- self.output_dim = output_dim
- self.placeholders = placeholders
- self.ILL = ILL
- self.sparse_inputs = sparse_inputs
- self.featureless = featureless
- self.AE = AE
- self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
- self.build()
- def _loss(self):
- self.loss += align_loss(self.outputs, self.ILL, FLAGS.gamma, FLAGS.k, AE=self.AE)
- def _accuracy(self):
- pass
- def _build(self):
- self.layers.append(GraphConvolution(input_dim=self.input_dim,
- output_dim=self.output_dim,
- placeholders=self.placeholders,
- act=tf.nn.relu,
- dropout=False,
- featureless=self.featureless,
- sparse_inputs=self.sparse_inputs,
- transform=False,
- init=trunc_normal,
- logging=self.logging))
- self.layers.append(GraphConvolution(input_dim=self.output_dim,
- output_dim=self.output_dim,
- placeholders=self.placeholders,
- act=lambda x: x,
- dropout=False,
- transform=False,
- logging=self.logging))
|