| #!/usr/bin/env python3 |
| """ |
| Evolve network architecture on a classification dataset, while at the same time training the weights |
| with one of several learning algorithms. |
| """ |
| import joblib |
| import time |
| import torch.utils.data |
| import logging |
| import numpy as np |
| import copy |
| import os |
| import pickle |
|
|
| from networks import WeightLearningNetwork |
| from evolution import rank_by_dominance, reproduce_tournament |
| from datasets import load_preprocessed_dataset |
| from learning import train, test, train_and_evaluate, get_performance_value |
| import utils |
|
|
|
|
| # Set up parameters and output dir. |
| params = utils.load_params(mode='wlnn') # based on terminal input |
| params['script'] = 'run-wlnn-mnist.py' |
| writer, out_dir = utils.init_output(params, overwrite=params['overwrite_output']) |
| os.makedirs(os.path.join(out_dir, 'networks')) # dir to store all networks |
|
|
| if params['use_cuda'] and not torch.cuda.is_available(): |
| logging.info('use_cuda was set but cuda is not available, running on cpu') |
| params['use_cuda'] = False |
| device = 'cuda' if params['use_cuda'] else 'cpu' |
|
|
|
|
| # Ensure deterministic computation. |
| utils.seed_all(0) |
|
|
| ### Ensure that runs are reproducible even on GPU. Note, this slows down training! |
| torch.backends.cudnn.deterministic = True |
| torch.backends.cudnn.benchmark = False |
|
|
|
|
| # Load dataset. |
| train_images, train_labels, test_images, test_labels = load_preprocessed_dataset( |
| params['dataset'], flatten_images=True, use_torch=True) |
| train_dataset = torch.utils.data.TensorDataset(train_images, train_labels) |
| test_dataset = torch.utils.data.TensorDataset(test_images, test_labels) |
|
|
| # Create initial population. |
| # TODO: Make train_only_outputs a learning_rule. |
| train_only_outputs = (params['train_only_outputs'] or params['learning_rule'] == 'hebbian') |
| use_random_feedback = (params['learning_rule'] == 'feedback_alignment') |
| population = [ |
| WeightLearningNetwork(params['num_inputs'], params['num_outputs'], |
| params['p_initial_connection_enabled'], |
| p_add_connection=params['p_add_connection'], |
| p_add_node=params['p_add_node'], |
| inherit_weights=params['inherit_weights'], |
| train_only_outputs=train_only_outputs, |
| use_random_feedback=use_random_feedback, |
| add_only_hidden_connections=True) |
| for _ in range(params['population_size'])] |
|
|
| # Add some nodes manually at the beginning. |
| for net in population: |
| for _ in range(net.get_num_connections()): |
| if np.random.rand() < 0.5: |
| net.add_node() |
|
|
| # Evaluate the networks before doing any evolution or learning. |
| for net in population: |
| net.create_torch_layers(device=device) |
| with joblib.Parallel(n_jobs=params['num_workers']) as parallel: |
| # Select champion based on training set for consistency with evolution loop. |
| objectives = parallel(joblib.delayed(test)(net, \ |
| train_dataset, params, device=device) for net in population) |
| objectives = np.array(objectives) |
| rewards = -objectives[:, 0] |
| accs = objectives[:, 1] |
| best_index = rewards.argmax() |
| champion = {'net': copy.deepcopy(population[best_index]), |
| 'reward': rewards[best_index], |
| 'acc': accs[best_index], |
| 'connections': population[best_index].get_num_connections()} |
| logging.info(f'Pre-evolution and training champion net on test set: ' |
| f'reward: {champion["reward"]:.3f} ' |
| f'(acc: {champion["acc"]:.3f})') |
| for net in population: |
| net.delete_torch_layers() |
|
|
| # Store the current champion network. |
| champion['net'].delete_torch_layers() |
| champion['net'].save(os.path.join(out_dir, 'champion_network.json')) |
|
|
| # Evolution loop. |
| generation = -1 # necessary for logging info when there are 0 generations |
| with joblib.Parallel(n_jobs=params['num_workers']) as parallel: |
| for generation in range(params['num_generations']): |
| start_time_generation = time.time() |
|
|
| # Evaluate fitness of all networks. |
| start_time_evaluation = time.time() |
| objectives = parallel(joblib.delayed(train_and_evaluate)( |
| net, train_dataset, test_dataset, params, verbose=0, save_net=(generation % 100 == 0), |
| filename=os.path.join(out_dir, 'networks', f'generation{generation}-net{i}.json')) |
| for i, net in enumerate(population)) |
| objectives = np.array(objectives) # shape: population_size, 2 |
| rewards = objectives[:, 0] |
| accs = objectives[:, 1] |
| complexities = np.array([net.get_num_connections() for net in population]) |
| complexities = np.maximum(complexities, 1) # prevent 0 division |
| time_evaluation = time.time() - start_time_evaluation |
|
|
| # Pick best net from this generation (based on reward) and check |
| # if it's better than the previously observed best net (= champion). |
| start_time_champion_evaluation = time.time() |
| best_index = rewards.argmax() |
| if rewards[best_index] > champion['reward']: |
| # In contrast to run-wann-mnist.py, we don't have to check on the |
| # entire training set because the network was already evaluated on |
| # the complete set. |
| # TODO: Maybe train champion net on more epochs already here (it's |
| # done below right now) and compare against results of previous |
| # champion net. This would take quite a bit of time though because |
| # I probably need to do it at almost every generation. |
| champion = {'net': copy.deepcopy(population[best_index]), |
| 'reward': rewards[best_index], |
| 'acc': accs[best_index], |
| 'connections': population[best_index].get_num_connections()} |
| # Save new champion net to file. Note that this net doesn't have weight_matrices when |
| # using multiple workers (weight_matrices is only created within the worker process). |
| champion['net'].delete_torch_layers() |
| champion['net'].save(os.path.join(out_dir, 'champion_network.json')) |
| time_champion_evaluation = time.time() - start_time_champion_evaluation |
|
|
| # Write metrics to log and tensorboard. |
| logging.info(f'{generation} - Best net: reward: {rewards[best_index]:.3f} ' |
| f'(acc: {accs[best_index]:.3f}) - evaluation: {time_evaluation:.1f} s, ' |
| f'champion evaluation: {time_champion_evaluation:.1f} s') |
| writer.add_scalar('best/reward', rewards[best_index], generation) |
| writer.add_scalar('best/acc', accs[best_index], generation) |
|
|
| if generation % 20 == 0: |
| if 'long_training_reward' not in champion: |
|
|
| # Train champion net for more epochs. |
| # TODO: Do this more elegantly. Maybe make an additional |
| # parameter num_epochs_long. |
| long_params = params.copy() |
| long_params['num_epochs'] = 10 |
| champion['net'].create_torch_layers(device) |
| loss, acc = train(champion['net'], train_dataset, long_params, device=device) |
| champion['long_training_reward'] = - get_performance_value(loss, period='last_epoch') |
| champion['long_training_acc'] = get_performance_value(acc, period='last_epoch') |
|
|
| # Evaluate this long trained net on test set. |
| loss, acc = test(champion['net'], test_dataset, params, device=device) |
| champion['test_reward'] = -loss |
| champion['test_acc'] = acc |
|
|
| # Manually delete weight matrices, so they don't block memory |
| # (important on cuda). |
| champion['net'].delete_torch_layers() |
|
|
| utils.log_champion_info(champion) |
| utils.write_champion_info(writer, champion, generation) |
| utils.write_networks_stats(writer, population, generation) |
|
|
|
|
|
|
| utils.log_network_stats(population, writer, generation) |
| logging.info('') |
|
|
| # TODO: Is this necessary? |
| #writer.add_histogram('final_acc', accs, generation) |
| writer.add_histogram('population/acc', accs, generation) |
| writer.add_histogram('population/connections', [net.get_num_connections() for net |
| in population], generation) |
|
|
| # Store all accuracies and connections (for learning rate plots). |
| for i, (net, acc) in enumerate(zip(population, accs)): |
| writer.add_scalar(f'population/net{i}_acc', acc, generation) |
| writer.add_scalar(f'population/net{i}_connections', net.get_num_connections(), generation) |
|
|
| # Rank networks based on the evaluation metrics. |
| start_time_ranking = time.time() |
| # TODO: This is a dirty hack, I am using rewards for both mean_rewards |
| # and max_rewards for now. Think about how to make this better. Also, |
| # should maybe adapt parameters of how often complexity is used vs. |
| # reward. |
| ranks = rank_by_dominance(rewards, rewards, complexities, |
| p_complexity_objective=params['p_complexity_objective']) |
| time_ranking = time.time() - start_time_ranking |
|
|
| # Make new population by picking parent networks via tournament |
| # selection and mutating them. |
| start_time_reproduction = time.time() |
| new_population = reproduce_tournament(population, ranks, params['tournament_size'], |
| cull_ratio=params['cull_ratio'], |
| elite_ratio=params['elite_ratio'], |
| num_mutations=params['num_mutations_per_generation']) |
| population = new_population |
| time_reproduction = time.time() - start_time_reproduction |
|
|
| time_generation = time.time() - start_time_generation |
| writer.add_scalar('times/complete_generation', time_generation, generation) |
| writer.add_scalar('times/evaluation', time_evaluation, generation) |
| writer.add_scalar('times/champion_evaluation', time_champion_evaluation, generation) |
| writer.add_scalar('times/ranking', time_ranking, generation) |
| writer.add_scalar('times/reproduction', time_reproduction, generation) |
|
|
| # Log final results and close writer. |
| logging.info('\nResults at the end of evolution:') |
| utils.log_champion_info(champion) |
| utils.write_networks_stats(writer, population, generation) |
| utils.log_network_stats(population, writer, generation) |
| writer.close() |
|
|
| # Store performance summary. |
| utils.store_performance(objectives, out_dir=params['out_dir']) |
|
|
| <filename>backend/api/migrations/0170_auto_20190819_0126.py |
| # -*- coding: utf-8 -*- |
| # Generated by Django 1.11.21 on 2019-08-19 01:26 |
| from __future__ import unicode_literals |
|
|
| import db_comments.model_mixins |
| from django.conf import settings |
| import django.contrib.postgres.fields.jsonb |
| import django.core.serializers.json |
| from django.db import migrations, models |
| import django.db.models.deletion |
|
|
|
|
| class Migration(migrations.Migration): |
|
|
| dependencies = [ |
| ('api', '0169_add_view_compliance_report_permission'), |
| ] |
|
|
| operations = [ |
| migrations.CreateModel( |
| name='ComplianceReportSnapshot', |
| fields=[ |
| ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), |
| ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), |
| ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), |
| ('snapshot', django.contrib.postgres.fields.jsonb.JSONField(encoder=django.core.serializers.json.DjangoJSONEncoder, null=True)), |
| ], |
| options={ |
| 'db_table': 'compliance_report_snapshot', |
| }, |
| bases=(models.Model, db_comments.model_mixins.DBComments), |
| ), |
| migrations.AlterField( |
| model_name='compliancereport', |
| name='schedule_a', |
| field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='compliance_report', to='api.ScheduleA'), |
| ), |
| migrations.AlterField( |
| model_name='compliancereport', |
| name='schedule_b', |
| field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='compliance_report', to='api.ScheduleB'), |
| ), |
| migrations.AlterField( |
| model_name='compliancereport', |
| name='schedule_c', |
| field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='compliance_report', to='api.ScheduleC'), |
| ), |
| migrations.AlterField( |
| model_name='compliancereport', |
| name='schedule_d', |
| field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='compliance_report', to='api.ScheduleD'), |
| ), |
| migrations.AlterField( |
| model_name='compliancereport', |
| name='summary', |
| field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='compliance_report', to='api.ScheduleSummary'), |
| ), |
| migrations.AddField( |
| model_name='compliancereportsnapshot', |
| name='compliance_report', |
| field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='api.ComplianceReport'), |
| ), |
| migrations.AddField( |
| model_name='compliancereportsnapshot', |
| name='create_user', |
| field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='api_compliancereportsnapshot_CREATE_USER', to=settings.AUTH_USER_MODEL), |
| ), |
| migrations.AddField( |
| model_name='compliancereportsnapshot', |
| name='update_user', |
| field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='api_compliancereportsnapshot_UPDATE_USER', to=settings.AUTH_USER_MODEL), |
| ), |
| ] |
|
|
| <reponame>RuigerS/Tuturials |
| f=open("./CoA/2020/data/03a.txt","r") |
| count1=0 |
| positionr1=0 |
| count3=0 |
| positionr3=0 |
| count5=0 |
| positionr5=0 |
| count7=0 |
| positionr7=0 |
| countdouble=0 |
| positionrdouble=0 |
| line_count=0 |
| for line in f: |
| line=line.strip() |
| relpos1=positionr1%(len(line)) |
| relpos3=positionr3%(len(line)) |
| relpos5=positionr5%(len(line)) |
| relpos7=positionr7%(len(line)) |
| relposdouble=positionrdouble%(len(line)) |
| if line[relpos1]=="#": |
| count1+=1 |
| if line[relpos3]=="#": |
| count3+=1 |
| if line[relpos5]=="#": |
| count5+=1 |
| if line[relpos7]=="#": |
| count7+=1 |
| if line_count%2==0: |
| if line[relposdouble]=="#": |
| countdouble+=1 |
| positionrdouble+=1 |
| positionr1+=1 |
| positionr3+=3 |
| positionr5+=5 |
| positionr7+=7 |
| line_count+=1 |
| print(count1) |
| print(count3) |
| print(count5) |
| print(count7) |
| print(countdouble) |
| print(count1*count3*count5*count7*countdouble) |
| import tensorflow as tf |
| from tensorflow.keras.layers import Dense, Flatten, Conv2D |
| from tensorflow.keras import Model |
| from tensorflow.keras import regularizers |
|
|
| class ConnectNN(Model): |
|
|
| def __init__(self): |
| super(ConnectNN, self).__init__() |
| self.d1 = Dense(100, activation='relu') |
| self.d2 = Dense(100, activation='relu') |
|
|
| self.p1 = Dense(30, activation='relu', |
| kernel_regularizer=regularizers.l2(0.0001)) |
| self.policy_head = Dense(7, activation='tanh') |
|
|
| self.v1 = Dense(10, activation='relu') |
| self.value_head = Dense(1, activation='tanh') |
|
|
| def body(self, x): |
| x = self.d1(x) |
| x = self.d2(x) |
| return x |
|
|
| def policy(self, x): |
| x = self.body(x) |
| x = self.p1(x) |
| return self.policy_head(x) |
|
|
| def value(self, x): |
| x = self.body(x) |
| x = self.v1(x) |
| return self.value_head(x) |
|
|
| <reponame>amirRamirfatahi/beautstertest |
| # Override default cache to use memcache for tests |
|
|
| CACHES = { |
| 'default': { |
| 'BACKEND':'django.core.cache.backends.locmem.LocMemCache', |
| } |
| } |
|
|
| <gh_stars>0 |
| #! /usr/bin/env python |
| import argparse, sys, os, errno |
| import logging |
| logging.basicConfig(level=logging.INFO, format='%(asctime)s %(name)s [%(levelname)s] : %(message)s') |
| logger = logging.getLogger('test_model') |
|
|
| def prepare_output_file(filename): |
| try: |
| os.makedirs(os.path.dirname(filename)) |
| except OSError as e: |
| if e.errno != errno.EEXIST: |
| raise e |
|
|
| if __name__ == '__main__': |
| main_parser = argparse.ArgumentParser(description='Train models for classification of chest X-ray radiography') |
| subparsers = main_parser.add_subparsers(dest='command') |
|
|
| parser = subparsers.add_parser('unet_vgg16', |
| help='a simple classifier for types') |
| parser.add_argument('-m', '--model-file', type=str, required=True) |
| parser.add_argument('-o', '--output-file', type=str, required=True, |
| help='output model file') |
| args = main_parser.parse_args() |
|
|
| logger = logging.getLogger('test_model.' + args.command) |
|
|
| if args.command == 'unet_vgg16': |
| from models import unet_from_vgg16 |
| from keras.models import load_model |
| from keras.utils.vis_utils import plot_model |
|
|
| model = load_model(args.model_file) |
| model = unet_from_vgg16(model) |
| plot_model(model, args.output_file, show_shapes=True) |
|
|
|
|
|
|
|
|
|
|
| <gh_stars>1-10 from collections import defaultdict from noise_robust_cobras.noise_robust.datastructures.constraint import Constraint from noise_robust_cobras.noise_robust.datastructures.constraint_index import ( ConstraintIndex, ) class Cycle: """ A class that represents a valid constraint cycle attributes: - constraints: a list of constraints the way they appear in the cycle (starts at a random point in the cycle) - sorted_constraints: a tuple of constraints that is sorted for __eq__ and __hash__ - number_of_CLs: the number of CL constraints in this cycle """ def __init__(self, constraints, composed_from=None, number_of_CLs=None): assert Cycle.is_valid_constraint_set_for_cycle(constraints) self.constraints = set(constraints) self.sorted_constraints = Cycle.sort_constraints(constraints) self.composed_from = set(composed_from) if composed_from is not None else {self} if number_of_CLs is None: self.number_of_CLs = sum( 1 for constraint in constraints if constraint.is_CL() ) else: self.number_of_CLs = number_of_CLs @staticmethod def compose_multiple_cycles_ordered(cycles): composed_cycle = cycles[0] for to_compose in cycles[1:]: composed_cycle = composed_cycle.compose_with(to_compose) if composed_cycle is None: break return composed_cycle @staticmethod def compose_multiple_cycles(cycles): composed_constraints = set(cycles[0].constraints) composed_from = set(cycles[0].composed_from) for to_compose in cycles[1:]: composed_constraints.symmetric_difference_update(to_compose.constraints) composed_from.symmetric_difference_update(to_compose.composed_from) if not Cycle.is_valid_constraint_set_for_cycle(composed_constraints): return None return Cycle(composed_constraints, composed_from=composed_from) @staticmethod def make_cycle_from_raw_cons(raw_constraints): constraints = Constraint.raw_constraints_to_constraints(raw_constraints) return Cycle(constraints) @staticmethod def cycle_from_instances(instances): instances = [int(i) for i in instances] raw_constraints = list(zip(instances[:-1], instances[1:])) + [ (instances[0], instances[-1]) ] return Cycle.make_cycle_from_raw_cons(raw_constraints) @staticmethod def cycle_from_instances_constraint_index(instances, constraint_index): instances = [int(i) for i in instances] raw_constraints = list(zip(instances[:-1], instances[1:])) + [ (instances[0], instances[-1]) ] return Cycle(constraint_index.instance_tuples_to_constraints(raw_constraints)) @staticmethod def is_valid_constraint_set_for_cycle(constraints): if len(constraints) == 0: return False # check if each instance occurs twice count = |
|
|