code
stringlengths
17
6.64M
class UnityTask(): def __init__(self, name): self.brain = None self.brain_name = None self.env = self.create_unity_env() self.action_space = self.brain.vector_action_space_size self.observation_space = self.brain.vector_observation_space_size print(f'Action space {self.action_space}') print(f'State space {self.observation_space}') self.name = name self.action_dim = self.action_space self.state_dim = int(np.prod(self.observation_space)) self.train_mode = True def extract_env_details(self, env_info): next_state = env_info.vector_observations reward = env_info.rewards done = env_info.local_done return (next_state, reward, done) def create_unity_env(self): env = UnityEnvironment(file_name='Env\\Tennis_Windows_x86_64\\Tennis.exe') self.brain_name = env.brain_names[0] self.brain = env.brains[self.brain_name] return env def reset(self): env_info = self.env.reset(train_mode=self.train_mode)[self.brain_name] return self.extract_env_details(env_info)[0] def step(self, actions): actions = ((actions - 0.5) * 2.0) self.env.step(actions)[self.brain_name] env_info = self.env.step(actions)[self.brain_name] (next_states, rewards, dones) = self.extract_env_details(env_info) return (next_states, rewards, np.array(dones))
class PPOAgent_Unity(): def __init__(self, config): self.config = config self.task = UnityTask('reacher') self.network = PPONetwork(self.config.state_dim, self.config.action_dim, 1000).to('cuda:0') self.opt = torch.optim.Adam(self.network.parameters(), config.lr, amsgrad=True) self.total_steps = 0 self.online_rewards = np.zeros(config.num_workers) self.episode_rewards = [] self.states = self.task.reset() self.state_normalizer = None self.min_lr = (self.config.lr * 0.3) self.lr_scheduler = torch.optim.lr_scheduler.StepLR(self.opt, gamma=0.8, step_size=200) self.max_score = 0 self.episode_score = CountScore() if config.play_only: self.load_model() def load_model(self): self.network.load_state_dict(torch.load(self.config.saved_checkpoint)) self.network.to('cuda:0') def update_lr(self): if (self.total_steps < 1000): return if (self.total_steps % 40000): if (self.config.ppo_ratio_clip > 0.1): self.config.ppo_ratio_clip = 0.07 if (((self.total_steps % 30000) == 0) and (self.config.entropy_weight > 0)): self.config.entropy_weight -= 0.04 if (self.config.entropy_weight < 0): self.config.entropy_weight = 0.0 def build_trajectory(self, memory_buffer): states = self.states episode = 0 for _ in range(self.config.rollout_length): states = tensor(states) prediction = self.network(states) (next_states, rewards, terminals) = self.task.step(prediction['a'].cpu().numpy()) self.online_rewards += rewards if np.any(terminals): self.episode_score.add_score(np.max(self.online_rewards)) self.score_data.append(self.episode_score.mean_score()) self.episode_rewards.append(self.online_rewards[(- 1)]) self.online_rewards[:] = 0 memory_buffer.add(prediction) memory_buffer.add({'r': tensor(rewards).unsqueeze((- 1)), 'm': tensor((1 - terminals)).unsqueeze((- 1)), 's': tensor(states)}) states = next_states current_score = self.episode_score.mean_score() print(f'Ep={episode}s current score {current_score} online rewards {self.online_rewards.mean()}') if (self.max_score < self.online_rewards.mean()): self.max_score = current_score return memory_buffer def step(self): self.update_lr() if self.config.play_only: self.validate(False) return config = self.config memory_buffer = Storage(config.rollout_length) states = self.states self.network.eval() memory_buffer = self.build_trajectory(memory_buffer) current_score = self.online_rewards.mean() print(f'Current score {current_score}') if (not config.play_only): if ((self.max_score < current_score) and (not config.play_only)): torch.save(self.network, '/checkpoint/PPO-{}.pth'.format(int(current_score))) self.max_score = current_score self.states = states states = tensor(states) prediction = self.network(states) memory_buffer.add(prediction) memory_buffer.placeholder() advantages = tensor(np.zeros((config.num_workers, 1))) returns = prediction['v'].detach() for i in reversed(range(config.rollout_length)): returns = (memory_buffer.r[i] + ((config.discount * memory_buffer.m[i]) * returns)) td_error = ((memory_buffer.r[i] + ((config.discount * memory_buffer.m[i]) * memory_buffer.v[(i + 1)])) - memory_buffer.v[i]) advantages = ((((advantages * config.gae_tau) * config.discount) * memory_buffer.m[i]) + td_error) memory_buffer.adv[i] = advantages.detach() memory_buffer.ret[i] = returns.detach() batch_steps = self.train_agent(memory_buffer) steps = batch_steps self.total_steps += steps self.lr_scheduler.step() if ((self.total_steps % 50000) == 0): self.validate(False) else: self.validate(False) def train_agent(self, memory_buffer): (states, actions, log_probs_old, returns, advantages) = memory_buffer.cat(['s', 'a', 'log_pi_a', 'ret', 'adv']) actions = actions.detach() log_probs_old = log_probs_old.detach() advantages = ((advantages - advantages.mean()) / advantages.std()) sum_returns = 0 sum_advantage = 0 sum_policy_loss = 0 sum_critic_loss = 0 sum_entropy = 0 batch_steps = 0 self.network.train() config = self.config for ep in range(config.optimization_epochs): sampler = random_sample(np.arange(states.size(0)), config.mini_batch_size) for batch_indices in sampler: batch_indices = tensor(batch_indices).long() sampled_states = states[batch_indices] sampled_actions = actions[batch_indices] sampled_log_probs_old = log_probs_old[batch_indices] sampled_returns = returns[batch_indices] sampled_advantages = advantages[batch_indices] prediction = self.network(sampled_states.cuda(), sampled_actions.cuda()) ratio = (prediction['log_pi_a'] - sampled_log_probs_old).exp() obj = (ratio * sampled_advantages) obj_clipped = (ratio.clamp((1.0 - self.config.ppo_ratio_clip), (1.0 + self.config.ppo_ratio_clip)) * sampled_advantages) policy_loss = (torch.min(obj, obj_clipped).mean() + (config.entropy_weight * prediction['ent'].mean())) value_loss = F.smooth_l1_loss(prediction['v'], sampled_returns.view((- 1), 1)) (sum_returns, sum_advantage, sum_policy_loss, sum_critic_loss, sum_entropy) = self.log_stats(sampled_returns, sampled_advantages, policy_loss, value_loss, prediction['ent'].mean(), batch_steps, sum_returns, sum_advantage, sum_critic_loss, sum_policy_loss, sum_entropy) batch_steps += 1 self.opt.zero_grad() (- (policy_loss - value_loss)).backward() nn.utils.clip_grad_norm_(self.network.parameters(), config.gradient_clip) self.opt.step() return batch_steps def get_lr(self): for param_group in self.opt.param_groups: return param_group['lr'] def log_stats(self, returns, advantage, loss, critic_loss, entropy, batch_step, sum_returns, sum_advantage, sum_critic, sum_loss, sum_entropy): sum_returns += returns.mean() sum_advantage += advantage.mean() sum_critic += critic_loss sum_loss += loss sum_entropy += entropy.mean() logger = self.config.logger frame_idx = self.total_steps batch_count = (self.config.optimization_epochs * (self.config.rollout_length / self.config.mini_batch_size)) step_idx = (batch_step + frame_idx) batch_step += 1 logger.add_scalar('returns', (sum_returns / batch_step), step_idx) logger.add_scalar('advantage', (sum_advantage / batch_step), step_idx) logger.add_scalar('loss_critic', (sum_critic / batch_step), step_idx) logger.add_scalar('entropy', (sum_entropy / batch_step), step_idx) logger.add_scalar('loss_total', (sum_loss / batch_step), step_idx) logger.add_scalar('lr', self.get_lr(), step_idx) return (sum_returns, sum_advantage, sum_loss, sum_critic, sum_entropy) def validate(self, fast_test=True): score = np.zeros(self.config.num_workers) self.network.eval() self.task.train_mode = fast_test actual_score = 0 for i in range(10): print(f'Testing {i} score={np.mean(score)}') terminals = np.zeros(2) states = self.task.reset() ep_scores = [] while (not all(terminals)): states = tensor(states) prediction = self.network(states) (next_states, rewards, terminals) = self.task.step(prediction['a'].cpu().numpy()) score += rewards states = next_states self.task.train_mode = False actual_score = np.mean(score) print(f'Ep: 100 {actual_score}')
class UnityEnv(): def __init__(self, env_path, train_mode=True): self.brain = None self.brain_name = None self.train_mode = train_mode self.env = self.create_unity_env(env_path) self.action_space = self.brain.vector_action_space_size self.observation_space = self.brain.vector_observation_space_size print(f'Action space {self.action_space}') print(f'State space {self.observation_space}') self.action_dim = self.action_space self.state_dim = int(np.prod(self.observation_space)) def extract_env_details(self, env_info): next_state = env_info.vector_observations reward = env_info.rewards done = env_info.local_done return (next_state, reward, done) def create_unity_env(self, env_path): env = UnityEnvironment(file_name=env_path) self.brain_name = env.brain_names[0] self.brain = env.brains[self.brain_name] return env def reset(self): env_info = self.env.reset(train_mode=self.train_mode)[self.brain_name] return self.extract_env_details(env_info)[0] def step(self, actions): actions = np.clip(actions, (- 1), 1) self.env.step(actions)[self.brain_name] env_info = self.env.step(actions)[self.brain_name] (next_states, rewards, dones) = self.extract_env_details(env_info) return (next_states, rewards, np.array(dones))
class Config(): DEVICE = (torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')) def __init__(self): self.device = ('cuda:0' if torch.cuda.is_available() else 'cpu') self.action_size = 2 self.state_dim = 8 self.action_dim = 2 self.play_only = True self.lr = 0.0001 self.discount = 0.97 self.gae_tau = 0.95 self.gradient_clip = 4.7 self.rollout_length = 2000 self.optimization_epochs = 10 self.mini_batch_size = 200 self.ppo_ratio_clip = 0.1 self.log_interval = 100 self.max_steps = 400000.0 self.entropy_weight = 0.09 self.num_workers = 2 self.saved_checkpoint = 'checkpoint/ppo.pth'
class Env_store(): def __init__(self, dim, state_dim): self.actions = tensor(dim) self.rewards = tensor(dim) self.advantage = tensor(dim) self.states = tensor(state_dim) self.network_output = None self.dones = tensor(dim) def populate(self, states, rewards, dones, model_output): self.states = tensor(states) self.rewards = rewards self.not_dones = tensor((1 - dones)).unsqueeze((- 1)) self.network_output = model_output
class PPONetwork(nn.Module): 'Actor (Policy) Model.' def __init__(self, state_size, action_size, hidden_size): 'Initialize parameters and build model.\n Params\n ======\n state_size (int): Dimension of each state\n action_size (int): Dimension of each action\n seed (int): Random seed\n ' super(PPONetwork, self).__init__() second_hidden_size = 500 third = (second_hidden_size - 100) frames = 3 agents = 2 self.input_size = (state_size * frames) self.input = nn.Linear(self.input_size, hidden_size) self.hidden = nn.Linear(hidden_size, second_hidden_size) self.actor_body = nn.Linear(third, third) self.actor_head = nn.Linear(third, action_size) self.critic_body = nn.Linear(third, third) self.critic_head = nn.Linear(third, 1) self.policy_body = nn.Linear(second_hidden_size, third) self.policy_head = nn.Linear(third, third) init_layers = [self.input, self.hidden, self.actor_body, self.critic_body, self.policy_body] self.init_weights(init_layers) self.batch_norm = nn.BatchNorm1d(second_hidden_size) self.batch_norm_input = nn.BatchNorm1d(hidden_size) self.alpha = nn.Linear(third, 2, bias=False) self.beta = nn.Linear(third, 2, bias=False) self.alpha.weight.data.mul_(0.125) self.beta.weight.data.mul_(0.125) self.std = nn.Parameter(torch.zeros(2)) self.state_size = state_size device = 'cuda:0' self.to(device) summary(self, (1, self.input_size)) def init_weights(self, layers): for layer in layers: nn.init.kaiming_normal_(layer.weight) layer.bias.data.mul_(0.1) def forward(self, state, action=None): x = state.view((- 1), self.input_size) x = F.leaky_relu(self.batch_norm_input(self.input(x))) x = F.leaky_relu(self.batch_norm(self.hidden(x))) x = F.leaky_relu(self.policy_body(x)) act_x = F.tanh(self.actor_body(x)) mean = F.tanh(self.actor_head(act_x)) alpha = (F.softplus(self.alpha(act_x)) + 1) beta = (F.softplus(self.beta(act_x)) + 1) policy_dist = torch.distributions.Beta(alpha, beta) if (action is None): action = policy_dist.sample() log_prob = policy_dist.log_prob(action).sum((- 1)).unsqueeze((- 1)) entropy = policy_dist.entropy().sum((- 1)).unsqueeze((- 1)) critic_x = F.leaky_relu(self.critic_body(x)) value = self.critic_head(critic_x) return {'a': action, 'log_pi_a': log_prob, 'ent': entropy, 'mean': mean, 'v': value}
class Storage(): def __init__(self, size, keys=None): if (keys is None): keys = [] keys = (keys + ['s', 'a', 'r', 'm', 'v', 'q', 'pi', 'log_pi', 'ent', 'adv', 'ret', 'q_a', 'log_pi_a', 'mean']) self.keys = keys self.size = size self.reset() def add(self, data): for (k, v) in data.items(): assert (k in self.keys) getattr(self, k).append(v) def placeholder(self): for k in self.keys: v = getattr(self, k) if (len(v) == 0): setattr(self, k, ([None] * self.size)) def reset(self): for key in self.keys: setattr(self, key, []) def cat(self, keys): data = [getattr(self, k)[:self.size] for k in keys] return map((lambda x: torch.cat(x, dim=0)), data)
def random_sample(indices, batch_size): indices = np.asarray(np.random.permutation(indices)) batches = indices[:((len(indices) // batch_size) * batch_size)].reshape((- 1), batch_size) for batch in batches: (yield batch) r = (len(indices) % batch_size) if r: (yield indices[(- r):])
def tensor(x): if isinstance(x, torch.Tensor): return x x = torch.tensor(x, device=Config.DEVICE, dtype=torch.float32) return x
@jit def function(x): return x
@njit def njit_f(x): return x
@jit('int32(int32, int32)') def int32_sum(a, b): return (a + b)
@jit def int32_sum_r1(a: int, b: int): return (a + b)
def list_norm_inplace(buff): r_mean = np.mean(buff) r_std = np.std(buff) for ii in range(len(buff)): buff[ii] = ((buff[ii] - r_mean) / r_std)
def plot_durations(episode_durations): plt.figure(2) plt.clf() durations_t = TC.FloatTensor(episode_durations) plt.title('Training...') plt.xlabel('Episode') plt.ylabel('Duration') plt.plot(durations_t.numpy()) if (len(durations_t) >= 100): means = durations_t.unfold(0, 100, 1).mean(1).view((- 1)) means = TC.cat((TC.zeros(99), means)) plt.plot(means.numpy()) plt.show()
def plot_durations_ii(ii, episode_durations, ee, ee_duration=100): episode_durations.append((ii + 1)) if (((ee + 1) % ee_duration) == 0): clear_output() plot_durations(episode_durations)
class PGNET(nn.Module): def __init__(self, num_state): super(PGNET, self).__init__() self.fc_in = nn.Linear(num_state, 24) self.fc_hidden = nn.Linear(24, 36) self.fc_out = nn.Linear(36, 1) def forward(self, x): x = F.relu(self.fc_in(x)) x = F.relu(self.fc_hidden(x)) x = TC.sigmoid(self.fc_out(x)) return x
class PGNET_AGENT(PGNET): def run(self, env): for ee in range(self.num_episode): self.run_episode(env, ee) self.train_episode(ee)
def list_norm_inplace(buff): r_mean = np.mean(buff) r_std = np.std(buff) for ii in range(len(buff)): buff[ii] = ((buff[ii] - r_mean) / r_std)
def plot_durations(episode_durations): plt.figure(2) plt.clf() durations_t = TC.FloatTensor(episode_durations) plt.title('Training...') plt.xlabel('Episode') plt.ylabel('Duration') plt.plot(durations_t.numpy()) if (len(durations_t) >= 100): means = durations_t.unfold(0, 100, 1).mean(1).view((- 1)) means = TC.cat((TC.zeros(99), means)) plt.plot(means.numpy()) plt.show()
def plot_durations_ii(ii, episode_durations, ee, ee_duration=100): episode_durations.append((ii + 1)) if (((ee + 1) % ee_duration) == 0): clear_output() plot_durations(episode_durations)
class PGNET(nn.Module): def __init__(self, num_state): super(PGNET, self).__init__() self.fc_in = nn.Linear(num_state, 24) self.fc_hidden = nn.Linear(24, 36) self.fc_out = nn.Linear(36, 1) def forward(self, x): x = F.relu(self.fc_in(x)) x = F.relu(self.fc_hidden(x)) x = TC.sigmoid(self.fc_out(x)) return x
class PGNET_MACHINE(PGNET): def __init__(self, num_state, render_flag=False): self.forget_factor = 0.99 self.learning_rate = 0.01 self.num_episode = 5000 self.num_batch = 5 self.render_flag = render_flag self.steps_in_batch = 0 self.episode_durations = [] super(PGNET_MACHINE, self).__init__(num_state) self.optimizer = TC.optim.RMSprop(self.parameters(), lr=self.learning_rate) self.init_buff() def forward(self, state): state_var = Variable(TC.from_numpy(state).float()) prob = super(PGNET_MACHINE, self).forward(state_var) return Bernoulli(prob) def push_buff_done(self, reward, state, action, done_flag=False): if done_flag: self.reward_buff.append(0) else: self.reward_buff.append(reward) self.state_buff.append(state) self.action_buff.append(action) def pull_buff(self, ii): return (self.reward_buff[ii], self.state_buff[ii], self.action_buff[ii]) def init_buff(self): self.reward_buff = [] self.state_buff = [] self.action_buff = [] def transform_discount_reward(self, steps): future_reward = 0 for ii in reversed(range(steps)): if (self.reward_buff[ii] == 0): future_reward = 0 else: future_reward = ((future_reward * self.forget_factor) + self.reward_buff[ii]) self.reward_buff[ii] = future_reward list_norm_inplace(self.reward_buff) def train(self, steps): self.transform_discount_reward(steps) self.optimizer.zero_grad() for ii in range(steps): (reward, state, action) = self.pull_buff(ii) action_var = Variable(TC.FloatTensor([float(action)])) policy = self.forward(state) loss = ((- policy.log_prob(action_var)) * reward) loss.backward() self.optimizer.step() self.init_buff() def step(self, env, state, ee, ii, ee_duration=100): policy = self.forward(state) action = policy.sample().data.numpy().astype(int)[0] (next_state, reward, done_flag, _) = env.step(action) if self.render_flag: env.render() self.push_buff_done(reward, state, action, done_flag) self.steps_in_batch += 1 state = next_state return (state, done_flag) def run_episode(self, env, ee): state = env.reset() for ii in count(): (state, done_flag) = self.step(env, state, ee, ii, ee_duration=100) if done_flag: plot_durations_ii(ii, self.episode_durations, ee, ee_duration=100) break def train_episode(self, ee): if ((ee > 0) and ((ee % self.num_batch) == 0)): self.train(self.steps_in_batch) self.steps_in_batch = 0 def run(self, env): for ee in range(self.num_episode): self.run_episode(env, ee) self.train_episode(ee)
def main(): env = gym.make('CartPole-v0') mypgnet = PGNET_MACHINE(env.observation_space.shape[0], render_flag=False) mypgnet.run(env) env.close()
def list_norm_inplace(buff): r_mean = np.mean(buff) r_std = np.std(buff) for ii in range(len(buff)): buff[ii] = ((buff[ii] - r_mean) / r_std)
def plot_durations(episode_durations): plt.figure(2) plt.clf() durations_t = TC.FloatTensor(episode_durations) plt.title('Training...') plt.xlabel('Episode') plt.ylabel('Duration') plt.plot(durations_t.numpy()) if (len(durations_t) >= 100): means = durations_t.unfold(0, 100, 1).mean(1).view((- 1)) means = TC.cat((TC.zeros(99), means)) plt.plot(means.numpy()) plt.show()
def plot_durations_ii(ii, episode_durations, ee, ee_duration=100): episode_durations.append((ii + 1)) if (((ee + 1) % ee_duration) == 0): clear_output() plot_durations(episode_durations)
class PGNET(nn.Module): def __init__(self, num_state): super(PGNET, self).__init__() self.fc_in = nn.Linear(num_state, 24) self.fc_hidden = nn.Linear(24, 36) self.fc_out = nn.Linear(36, 1) def forward(self, x): x = F.relu(self.fc_in(x)) x = F.relu(self.fc_hidden(x)) x = TC.sigmoid(self.fc_out(x)) return x
class PGNET_AGENT(PGNET): def __init__(self, num_state, render_flag=False): self.forget_factor = 0.99 self.learning_rate = 0.01 self.num_episode = 5000 self.num_batch = 5 self.render_flag = render_flag self.steps_in_batch = 0 self.episode_durations = [] super().__init__(num_state) self.optimizer = TC.optim.RMSprop(self.parameters(), lr=self.learning_rate) self.init_buff() def forward(self, state): state_var = Variable(TC.from_numpy(state).float()) prob = super().forward(state_var) return Bernoulli(prob) def push_buff_done(self, reward, state, action, done_flag=False): if done_flag: self.reward_buff.append(0) else: self.reward_buff.append(reward) self.state_buff.append(state) self.action_buff.append(action) def pull_buff(self, ii): return (self.reward_buff[ii], self.state_buff[ii], self.action_buff[ii]) def init_buff(self): self.reward_buff = [] self.state_buff = [] self.action_buff = [] def transform_discount_reward(self, steps): future_reward = 0 for ii in reversed(range(steps)): if (self.reward_buff[ii] == 0): future_reward = 0 else: future_reward = ((future_reward * self.forget_factor) + self.reward_buff[ii]) self.reward_buff[ii] = future_reward list_norm_inplace(self.reward_buff) def train(self, steps): self.transform_discount_reward(steps) self.optimizer.zero_grad() for ii in range(steps): (reward, state, action) = self.pull_buff(ii) action_var = Variable(TC.FloatTensor([float(action)])) policy = self.forward(state) loss = ((- policy.log_prob(action_var)) * reward) loss.backward() self.optimizer.step() self.init_buff() def step(self, env, state, ee, ii, ee_duration=100): policy = self.forward(state) action = policy.sample().data.numpy().astype(int)[0] (next_state, reward, done_flag, _) = env.step(action) if self.render_flag: env.render() self.push_buff_done(reward, state, action, done_flag) self.steps_in_batch += 1 state = next_state return (state, done_flag) def run_episode(self, env, ee): state = env.reset() for ii in count(): (state, done_flag) = self.step(env, state, ee, ii, ee_duration=100) if done_flag: plot_durations_ii(ii, self.episode_durations, ee, ee_duration=100) break def train_episode(self, ee): if ((ee > 0) and ((ee % self.num_batch) == 0)): self.train(self.steps_in_batch) self.steps_in_batch = 0 def run(self, env): for ee in range(self.num_episode): self.run_episode(env, ee) self.train_episode(ee)
class CDENSE(Layer): def __init__(self, No, **kwargs): self.No = No super().__init__(**kwargs) def build(self, inshape_l): inshape = inshape_l[0] self.w_r = self.add_weight('w_r', (inshape[1], self.No), initializer=igu) self.w_i = self.add_weight('w_i', (inshape[1], self.No), initializer=igu) self.b_r = self.add_weight('b_r', (self.No,), initializer=iz) self.b_i = self.add_weight('b_i', (self.No,), initializer=iz) self.w = tf.complex(self.w_r, self.w_i) self.b = tf.complex(self.b_r, self.b_i) super().build(inshape) def call(self, x_l): (x_r, x_i) = x_l x = tf.complex(x_r, x_i) y = (tf.matmul(x, self.w) + self.b) y_r = tf.real(y) y_i = tf.imag(y) return [y_r, y_i] def compute_output_shape(self, inshape_l): return [(inshape_l[0], self.No), (inshape_l[0], self.No)]
def modeling(input_shape): x_r = keras.layers.Input(input_shape) x_i = keras.layers.Input(input_shape) [y_r, y_i] = CDENSE(1, input_shape=(1,))([x_r, x_i]) return keras.models.Model([x_r, x_i], [y_r, y_i])
def cfit(model, x, y, **kwargs): x_l = [np.real(x), np.imag(x)] y_l = [np.real(y), np.imag(y)] return model.fit(x_l, y_l, **kwargs)
def cpredict(model, x, **kwargs): x_l = [np.real(x), np.imag(x)] y_l = model.predict(x_l) return (y_l[0] + (1j * y_l[1]))
def cget_weights(model): [w_r, w_i, b_r, b_i] = model.get_weights() return ([(w_r + (1j * w_i))], [(b_r + (1j * b_i))])
def cmain(): model = modeling((1,)) model.compile(keras.optimizers.sgd(), 'mse') x = (np.array([0, 1, 2, 3, 4]) + (1j * np.array([4, 3, 2, 1, 0]))) y = ((x * (2 + 1j)) + (1 + 2j)) h = cfit(model, x[:2], y[:2], epochs=5000, verbose=0) y_pred = cpredict(model, x[2:]) print('Targets:', y[2:]) print(y_pred) [w, b] = cget_weights(model) print('weight:', w) print('bias:', b)
def ANN_models_func(Nin, Nh, Nout): x = layers.Input(shape=(Nin,)) h = layers.Activation('relu')(layers.Dense(Nh)(x)) y = layers.Activation('softmax')(layers.Dense(Nout)(h)) model = models.Model(x, y) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
def ANN_seq_func(Nin, Nh, Nout): model = models.Sequential() model.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,))) model.add(layers.Dense(Nout, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
class ANN_models_class(models.Model): def __init__(self, Nin, Nh, Nout): hidden = layers.Dense(Nh) output = layers.Dense(Nout) relu = layers.Activation('relu') softmax = layers.Activation('softmax') x = layers.Input(shape=(Nin,)) h = relu(hidden(x)) y = softmax(output(h)) super().__init__(x, y) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
class ANN_seq_class(models.Sequential): def __init__(self, Nin, Nh, Nout): super().__init__() self.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,))) self.add(layers.Dense(Nout, activation='softmax')) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
def Data_func(): ((X_train, y_train), (X_test, y_test)) = datasets.mnist.load_data() Y_train = np_utils.to_categorical(y_train) Y_test = np_utils.to_categorical(y_test) (L, W, H) = X_train.shape X_train = X_train.reshape((- 1), (W * H)) X_test = X_test.reshape((- 1), (W * H)) X_train = (X_train / 255.0) X_test = (X_test / 255.0) return ((X_train, Y_train), (X_test, Y_test))
def plot_acc(history, title=None): if (not isinstance(history, dict)): history = history.history plt.plot(history['acc']) plt.plot(history['val_acc']) if (title is not None): plt.title(title) plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Training', 'Verification'], loc=0)
def plot_loss(history, title=None): if (not isinstance(history, dict)): history = history.history plt.plot(history['loss']) plt.plot(history['val_loss']) if (title is not None): plt.title(title) plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Training', 'Verification'], loc=0)
def main(): Nin = 784 Nh = 100 number_of_class = 10 Nout = number_of_class model = ANN_seq_class(Nin, Nh, Nout) ((X_train, Y_train), (X_test, Y_test)) = Data_func() history = model.fit(X_train, Y_train, epochs=15, batch_size=100, validation_split=0.2) performace_test = model.evaluate(X_test, Y_test, batch_size=100) print('Test Loss and Accuracy ->', performace_test) plot_loss(history) plt.show() plot_acc(history) plt.show()
class ANN(models.Model): def __init__(self, Nin, Nh, Nout): hidden = layers.Dense(Nh) output = layers.Dense(Nout) relu = layers.Activation('relu') x = layers.Input(shape=(Nin,)) h = relu(hidden(x)) y = output(h) super().__init__(x, y) self.compile(loss='mse', optimizer='sgd')
def Data_func(): ((X_train, y_train), (X_test, y_test)) = datasets.boston_housing.load_data() scaler = preprocessing.MinMaxScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) return ((X_train, y_train), (X_test, y_test))
def main(): Nin = 13 Nh = 5 Nout = 1 model = ANN(Nin, Nh, Nout) ((X_train, y_train), (X_test, y_test)) = Data_func() history = model.fit(X_train, y_train, epochs=100, batch_size=100, validation_split=0.2, verbose=2) performace_test = model.evaluate(X_test, y_test, batch_size=100) print('\nTest Loss -> {:.2f}'.format(performace_test)) plot_loss(history) plt.show()
class DNN(models.Sequential): def __init__(self, Nin, Nh_l, Nout): super().__init__() self.add(layers.Dense(Nh_l[0], activation='relu', input_shape=(Nin,), name='Hidden-1')) self.add(layers.Dense(Nh_l[1], activation='relu', name='Hidden-2')) self.add(layers.Dense(Nout, activation='softmax')) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
class DNN(models.Sequential): def __init__(self, Nin, Nh_l, Nout): super().__init__() self.add(layers.Dense(Nh_l[0], activation='relu', input_shape=(Nin,), name='Hidden-1')) self.add(layers.Dense(Nh_l[1], activation='relu', name='Hidden-2')) self.add(layers.Dense(Nout, activation='softmax')) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
def Data_func(): ((X_train, y_train), (X_test, y_test)) = datasets.mnist.load_data() Y_train = np_utils.to_categorical(y_train) Y_test = np_utils.to_categorical(y_test) (L, W, H) = X_train.shape X_train = X_train.reshape((- 1), (W * H)) X_test = X_test.reshape((- 1), (W * H)) X_train = (X_train / 255.0) X_test = (X_test / 255.0) return ((X_train, Y_train), (X_test, Y_test))
def main(): Nin = 784 Nh_l = [100, 50] number_of_class = 10 Nout = number_of_class ((X_train, Y_train), (X_test, Y_test)) = Data_func() model = DNN(Nin, Nh_l, Nout) history = model.fit(X_train, Y_train, epochs=10, batch_size=100, validation_split=0.2) performace_test = model.evaluate(X_test, Y_test, batch_size=100) print('Test Loss and Accuracy ->', performace_test) plot_acc(history) plt.show() plot_loss(history) plt.show()
class DNN(models.Sequential): def __init__(self, Nin, Nh_l, Pd_l, Nout): super().__init__() self.add(layers.Dense(Nh_l[0], activation='relu', input_shape=(Nin,), name='Hidden-1')) self.add(layers.Dropout(Pd_l[0])) self.add(layers.Dense(Nh_l[1], activation='relu', name='Hidden-2')) self.add(layers.Dropout(Pd_l[1])) self.add(layers.Dense(Nout, activation='softmax')) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
def Data_func(): ((X_train, y_train), (X_test, y_test)) = datasets.cifar10.load_data() Y_train = np_utils.to_categorical(y_train) Y_test = np_utils.to_categorical(y_test) (L, W, H, C) = X_train.shape X_train = X_train.reshape((- 1), ((W * H) * C)) X_test = X_test.reshape((- 1), ((W * H) * C)) X_train = (X_train / 255.0) X_test = (X_test / 255.0) return ((X_train, Y_train), (X_test, Y_test))
def main(Pd_l=[0.0, 0.0]): Nh_l = [100, 50] number_of_class = 10 Nout = number_of_class ((X_train, Y_train), (X_test, Y_test)) = Data_func() model = DNN(X_train.shape[1], Nh_l, Pd_l, Nout) history = model.fit(X_train, Y_train, epochs=100, batch_size=100, validation_split=0.2) performace_test = model.evaluate(X_test, Y_test, batch_size=100) print('Test Loss and Accuracy ->', performace_test) plot_acc(history, '(a) ν•™μŠ΅μ„ ν†΅ν•œ μ •ν™•λ„μ˜ λ³€ν™”') plt.show() plot_loss(history, '(b) ν•™μŠ΅μ„ ν†΅ν•œ μ†μ‹€μ˜ λ³€ν™”') plt.show()
class CNN(models.Sequential): def __init__(self, input_shape, num_classes): super().__init__() self.add(layers.Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) self.add(layers.Conv2D(64, (3, 3), activation='relu')) self.add(layers.MaxPooling2D(pool_size=(2, 2))) self.add(layers.Dropout(0.25)) self.add(layers.Flatten()) self.add(layers.Dense(128, activation='relu')) self.add(layers.Dropout(0.5)) self.add(layers.Dense(num_classes, activation='softmax')) self.compile(loss=keras.losses.categorical_crossentropy, optimizer='rmsprop', metrics=['accuracy'])
class DATA(): def __init__(self): num_classes = 10 ((x_train, y_train), (x_test, y_test)) = datasets.mnist.load_data() (img_rows, img_cols) = x_train.shape[1:] if (backend.image_data_format() == 'channels_first'): x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols) x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols) input_shape = (1, img_rows, img_cols) else: x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) self.input_shape = input_shape self.num_classes = num_classes (self.x_train, self.y_train) = (x_train, y_train) (self.x_test, self.y_test) = (x_test, y_test)
def main(): batch_size = 128 epochs = 10 data = DATA() model = CNN(data.input_shape, data.num_classes) history = model.fit(data.x_train, data.y_train, batch_size=batch_size, epochs=epochs, validation_split=0.2) score = model.evaluate(data.x_test, data.y_test) print() print('Test loss:', score[0]) print('Test accuracy:', score[1]) plot_loss(history) plt.show() plot_acc(history) plt.show()
class Machine(aicnn.Machine): def __init__(self): ((X, y), (x_test, y_test)) = datasets.cifar10.load_data() super().__init__(X, y, nb_classes=10)
def main(): m = Machine() m.run()
class Data(): def __init__(self, max_features=20000, maxlen=80): ((x_train, y_train), (x_test, y_test)) = imdb.load_data(num_words=max_features) x_train = sequence.pad_sequences(x_train, maxlen=maxlen) x_test = sequence.pad_sequences(x_test, maxlen=maxlen) (self.x_train, self.y_train) = (x_train, y_train) (self.x_test, self.y_test) = (x_test, y_test)
class RNN_LSTM(models.Model): def __init__(self, max_features, maxlen): x = layers.Input((maxlen,)) h = layers.Embedding(max_features, 128)(x) h = layers.LSTM(128, dropout=0.2, recurrent_dropout=0.2)(h) y = layers.Dense(1, activation='sigmoid')(h) super().__init__(x, y) self.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
class Machine(): def __init__(self, max_features=20000, maxlen=80): self.data = Data(max_features, maxlen) self.model = RNN_LSTM(max_features, maxlen) def run(self, epochs=3, batch_size=32): data = self.data model = self.model print('Training stage') print('==============') model.fit(data.x_train, data.y_train, batch_size=batch_size, epochs=epochs, validation_data=(data.x_test, data.y_test)) (score, acc) = model.evaluate(data.x_test, data.y_test, batch_size=batch_size) print('Test performance: accuracy={0}, loss={1}'.format(acc, score))
def main(): m = Machine() m.run()
def main(): machine = Machine() machine.run(epochs=400)
class Machine(): def __init__(self): self.data = Dataset() shape = self.data.X.shape[1:] self.model = rnn_model(shape) def run(self, epochs=400): d = self.data (X_train, X_test, y_train, y_test) = (d.X_train, d.X_test, d.y_train, d.y_test) (X, y) = (d.X, d.y) m = self.model h = m.fit(X_train, y_train, epochs=epochs, validation_data=[X_test, y_test], verbose=0) skeras.plot_loss(h) plt.title('History of training') plt.show() yp = m.predict(X_test) print('Loss:', m.evaluate(X_test, y_test)) plt.plot(yp, label='Origial') plt.plot(y_test, label='Prediction') plt.legend(loc=0) plt.title('Validation Results') plt.show() yp = m.predict(X_test).reshape((- 1)) print('Loss:', m.evaluate(X_test, y_test)) print(yp.shape, y_test.shape) df = pd.DataFrame() df['Sample'] = (list(range(len(y_test))) * 2) df['Normalized #Passengers'] = np.concatenate([y_test, yp], axis=0) df['Type'] = ((['Original'] * len(y_test)) + (['Prediction'] * len(yp))) plt.figure(figsize=(7, 5)) sns.barplot(x='Sample', y='Normalized #Passengers', hue='Type', data=df) plt.ylabel('Normalized #Passengers') plt.show() yp = m.predict(X) plt.plot(yp, label='Origial') plt.plot(y, label='Prediction') plt.legend(loc=0) plt.title('All Results') plt.show()
def rnn_model(shape): m_x = layers.Input(shape=shape) m_h = layers.LSTM(10)(m_x) m_y = layers.Dense(1)(m_h) m = models.Model(m_x, m_y) m.compile('adam', 'mean_squared_error') m.summary() return m
class Dataset(): def __init__(self, fname='international-airline-passengers.csv', D=12): data_dn = load_data(fname=fname) (X, y) = get_Xy(data_dn, D=D) (X_train, X_test, y_train, y_test) = model_selection.train_test_split(X, y, test_size=0.2, random_state=42) (self.X, self.y) = (X, y) (self.X_train, self.X_test, self.y_train, self.y_test) = (X_train, X_test, y_train, y_test)
def load_data(fname='international-airline-passengers.csv'): dataset = pd.read_csv(fname, usecols=[1], engine='python', skipfooter=3) data = dataset.values.reshape((- 1)) plt.plot(data) plt.xlabel('Time') plt.ylabel('#Passengers') plt.title('Original Data') plt.show() data_dn = (((data - np.mean(data)) / np.std(data)) / 5) plt.plot(data_dn) plt.xlabel('Time') plt.ylabel('Normalized #Passengers') plt.title('Normalized data by $E[]$ and $5\\sigma$') plt.show() return data_dn
def get_Xy(data, D=12): X_l = [] y_l = [] N = len(data) assert (N > D), 'N should be larger than D, where N is len(data)' for ii in range(((N - D) - 1)): X_l.append(data[ii:(ii + D)]) y_l.append(data[(ii + D)]) X = np.array(X_l) X = X.reshape(X.shape[0], X.shape[1], 1) y = np.array(y_l) print(X.shape, y.shape) return (X, y)
class AE(models.Model): def __init__(self, x_nodes=784, z_dim=36): x_shape = (x_nodes,) x = layers.Input(shape=x_shape) z = layers.Dense(z_dim, activation='relu')(x) y = layers.Dense(x_nodes, activation='sigmoid')(z) super().__init__(x, y) self.x = x self.z = z self.z_dim = z_dim self.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy']) def Encoder(self): return models.Model(self.x, self.z) def Decoder(self): z_shape = (self.z_dim,) z = layers.Input(shape=z_shape) y_layer = self.layers[(- 1)] y = y_layer(z) return models.Model(z, y)
def show_ae(autoencoder): encoder = autoencoder.Encoder() decoder = autoencoder.Decoder() encoded_imgs = encoder.predict(X_test) decoded_imgs = decoder.predict(encoded_imgs) n = 10 plt.figure(figsize=(20, 6)) for i in range(n): ax = plt.subplot(3, n, (i + 1)) plt.imshow(X_test[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax = plt.subplot(3, n, ((i + 1) + n)) plt.stem(encoded_imgs[i].reshape((- 1))) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax = plt.subplot(3, n, (((i + 1) + n) + n)) plt.imshow(decoded_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show()
def main(): x_nodes = 784 z_dim = 36 autoencoder = AE(x_nodes, z_dim) history = autoencoder.fit(X_train, X_train, epochs=10, batch_size=256, shuffle=True, validation_data=(X_test, X_test)) plot_acc(history, '(a) ν•™μŠ΅ 경과에 λ”°λ₯Έ 정확도 λ³€ν™” 좔이') plt.show() plot_loss(history, '(b) ν•™μŠ΅ 경과에 λ”°λ₯Έ 손싀값 λ³€ν™” 좔이') plt.show() show_ae(autoencoder) plt.show()
def Conv2D(filters, kernel_size, padding='same', activation='relu'): return layers.Conv2D(filters, kernel_size, padding=padding, activation=activation)
class AE(models.Model): def __init__(self, org_shape=(1, 28, 28)): original = layers.Input(shape=org_shape) x = Conv2D(4, (3, 3))(original) x = layers.MaxPooling2D((2, 2), padding='same')(x) x = Conv2D(8, (3, 3))(x) x = layers.MaxPooling2D((2, 2), padding='same')(x) z = Conv2D(1, (7, 7))(x) y = Conv2D(16, (3, 3))(z) y = layers.UpSampling2D((2, 2))(y) y = Conv2D(8, (3, 3))(y) y = layers.UpSampling2D((2, 2))(y) y = Conv2D(4, (3, 3))(y) decoded = Conv2D(1, (3, 3), activation='sigmoid')(y) super().__init__(original, decoded) self.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])
def show_ae(autoencoder, data): x_test = data.x_test decoded_imgs = autoencoder.predict(x_test) print(decoded_imgs.shape, data.x_test.shape) if (backend.image_data_format() == 'channels_first'): (N, n_ch, n_i, n_j) = x_test.shape else: (N, n_i, n_j, n_ch) = x_test.shape x_test = x_test.reshape(N, n_i, n_j) decoded_imgs = decoded_imgs.reshape(decoded_imgs.shape[0], n_i, n_j) n = 10 plt.figure(figsize=(20, 4)) for i in range(n): ax = plt.subplot(2, n, (i + 1)) plt.imshow(x_test[i], cmap='gray') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax = plt.subplot(2, n, ((i + 1) + n)) plt.imshow(decoded_imgs[i], cmap='gray') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show()
def main(epochs=20, batch_size=128): data = DATA() autoencoder = AE(data.input_shape) history = autoencoder.fit(data.x_train, data.x_train, epochs=epochs, batch_size=batch_size, shuffle=True, validation_split=0.2) plot_acc(history, '(a) 정확도 ν•™μŠ΅ 곑선') plt.show() plot_loss(history, '(b) 손싀 ν•™μŠ΅ 곑선') plt.show() show_ae(autoencoder, data) plt.show()
def add_decorate(x): '\n axis = -1 --> last dimension in an array\n ' m = K.mean(x, axis=(- 1), keepdims=True) d = K.square((x - m)) return K.concatenate([x, d], axis=(- 1))
def add_decorate_shape(input_shape): shape = list(input_shape) assert (len(shape) == 2) shape[1] *= 2 return tuple(shape)
def model_compile(model): return model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
class GAN(): def __init__(self, ni_D, nh_D, nh_G): self.ni_D = ni_D self.nh_D = nh_D self.nh_G = nh_G self.D = self.gen_D() self.G = self.gen_G() self.GD = self.make_GD() def gen_D(self): ni_D = self.ni_D nh_D = self.nh_D D = models.Sequential() D.add(Lambda(add_decorate, output_shape=add_decorate_shape, input_shape=(ni_D,))) D.add(Dense(nh_D, activation='relu')) D.add(Dense(nh_D, activation='relu')) D.add(Dense(1, activation='sigmoid')) model_compile(D) return D def gen_G(self): ni_D = self.ni_D nh_G = self.nh_D G = models.Sequential() G.add(Reshape((ni_D, 1), input_shape=(ni_D,))) G.add(Conv1D(nh_G, 1, activation='relu')) G.add(Conv1D(nh_G, 1, activation='sigmoid')) G.add(Conv1D(1, 1)) G.add(Flatten()) model_compile(G) return G def make_GD(self): (G, D) = (self.G, self.D) GD = models.Sequential() GD.add(G) GD.add(D) D.trainable = False model_compile(GD) D.trainable = True return GD def D_train_on_batch(self, Real, Gen): D = self.D X = np.concatenate([Real, Gen], axis=0) y = np.array((([1] * Real.shape[0]) + ([0] * Gen.shape[0]))) D.train_on_batch(X, y) def GD_train_on_batch(self, Z): GD = self.GD y = np.array(([1] * Z.shape[0])) GD.train_on_batch(Z, y)
class Data(): def __init__(self, mu, sigma, ni_D): self.real_sample = (lambda n_batch: np.random.normal(mu, sigma, (n_batch, ni_D))) self.in_sample = (lambda n_batch: np.random.rand(n_batch, ni_D))
class Machine(): def __init__(self, n_batch=10, ni_D=100): data_mean = 4 data_stddev = 1.25 self.n_iter_D = 1 self.n_iter_G = 5 self.data = Data(data_mean, data_stddev, ni_D) self.gan = GAN(ni_D=ni_D, nh_D=50, nh_G=50) self.n_batch = n_batch def train_D(self): gan = self.gan n_batch = self.n_batch data = self.data Real = data.real_sample(n_batch) Z = data.in_sample(n_batch) Gen = gan.G.predict(Z) gan.D.trainable = True gan.D_train_on_batch(Real, Gen) def train_GD(self): gan = self.gan n_batch = self.n_batch data = self.data Z = data.in_sample(n_batch) gan.D.trainable = False gan.GD_train_on_batch(Z) def train_each(self): for it in range(self.n_iter_D): self.train_D() for it in range(self.n_iter_G): self.train_GD() def train(self, epochs): for epoch in range(epochs): self.train_each() def test(self, n_test): '\n generate a new image\n ' gan = self.gan data = self.data Z = data.in_sample(n_test) Gen = gan.G.predict(Z) return (Gen, Z) def show_hist(self, Real, Gen, Z): plt.hist(Real.reshape((- 1)), histtype='step', label='Real') plt.hist(Gen.reshape((- 1)), histtype='step', label='Generated') plt.hist(Z.reshape((- 1)), histtype='step', label='Input') plt.legend(loc=0) def test_and_show(self, n_test): data = self.data (Gen, Z) = self.test(n_test) Real = data.real_sample(n_test) self.show_hist(Real, Gen, Z) Machine.print_stat(Real, Gen) def run_epochs(self, epochs, n_test): '\n train GAN and show the results\n for showing, the original and the artificial results will be compared\n ' self.train(epochs) self.test_and_show(n_test) def run(self, n_repeat=200, n_show=200, n_test=100): for ii in range(n_repeat): print('Stage', ii, '(Epoch: {})'.format((ii * n_show))) self.run_epochs(n_show, n_test) plt.show() @staticmethod def print_stat(Real, Gen): def stat(d): return (np.mean(d), np.std(d)) print('Mean and Std of Real:', stat(Real)) print('Mean and Std of Gen:', stat(Gen))
class GAN_Pure(GAN): def __init__(self, ni_D, nh_D, nh_G): '\n Discriminator input is not added\n ' super().__init__(ni_D, nh_D, nh_G) def gen_D(self): ni_D = self.ni_D nh_D = self.nh_D D = models.Sequential() D.add(Dense(nh_D, activation='relu', input_shape=(ni_D,))) D.add(Dense(nh_D, activation='relu')) D.add(Dense(1, activation='sigmoid')) model_compile(D) return D
class Machine_Pure(Machine): def __init__(self, n_batch=10, ni_D=100): data_mean = 4 data_stddev = 1.25 self.data = Data(data_mean, data_stddev, ni_D) self.gan = GAN_Pure(ni_D=ni_D, nh_D=50, nh_G=50) self.n_batch = n_batch
def main(): machine = Machine(n_batch=1, ni_D=100) machine.run(n_repeat=200, n_show=200, n_test=100)
class Machine(aigen.Machine_Generator): def __init__(self): ((x_train, y_train), (x_test, y_test)) = datasets.cifar10.load_data() (_, X, _, y) = model_selection.train_test_split(x_train, y_train, test_size=0.02) X = X.astype(float) gen_param_dict = {'rotation_range': 10} super().__init__(X, y, nb_classes=10, gen_param_dict=gen_param_dict)
def main(): m = Machine() m.run()
class Machine(aiprt.Machine_Generator): def __init__(self): ((x_train, y_train), (x_test, y_test)) = datasets.cifar10.load_data() (_, X, _, y) = model_selection.train_test_split(x_train, y_train, test_size=0.02) X = X.astype(float) super().__init__(X, y, nb_classes=10)
def main(): m = Machine() m.run()
def Lambda_with_lambda(): from keras.layers import Lambda, Input from keras.models import Model x = Input((1,)) y = Lambda((lambda x: (x + 1)))(x) m = Model(x, y) yp = m.predict_on_batch([1, 2, 3]) print('np.array([1,2,3]) + 1:') print(yp)
def Lambda_function(): from keras.layers import Lambda, Input from keras.models import Model def kproc(x): return (((x ** 2) + (2 * x)) + 1) def kshape(input_shape): return input_shape x = Input((1,)) y = Lambda(kproc, kshape)(x) m = Model(x, y) yp = m.predict_on_batch([1, 2, 3]) print('np.array([1,2,3]) + 1:') print(yp)
def Backend_for_Lambda(): from keras.layers import Lambda, Input from keras.models import Model from keras import backend as K def kproc_concat(x): m = K.mean(x, axis=1, keepdims=True) d1 = K.abs((x - m)) d2 = K.square((x - m)) return K.concatenate([x, d1, d2], axis=1) def kshape_concat(input_shape): output_shape = list(input_shape) output_shape[1] *= 3 return tuple(output_shape) x = Input((3,)) y = Lambda(kproc_concat, kshape_concat)(x) m = Model(x, y) yp = m.predict_on_batch([[1, 2, 3], [3, 4, 8]]) print(yp)
def TF_for_Lamda(): from keras.layers import Lambda, Input from keras.models import Model import tensorflow as tf def kproc_concat(x): m = tf.reduce_mean(x, axis=1, keep_dims=True) d1 = tf.abs((x - m)) d2 = tf.square((x - m)) return tf.concat([x, d1, d2], axis=1) def kshape_concat(input_shape): output_shape = list(input_shape) output_shape[1] *= 3 return tuple(output_shape) x = Input((3,)) y = Lambda(kproc_concat, kshape_concat)(x) m = Model(x, y) yp = m.predict_on_batch([[1, 2, 3], [3, 4, 8]]) print(yp)
def main(): print('Lambda with lambda') Lambda_with_lambda() print('Lambda function') Lambda_function() print('Backend for Lambda') Backend_for_Lambda() print('TF for Lambda') TF_for_Lamda()
class SFC(Layer): def __init__(self, No, **kwargs): self.No = No super().__init__(**kwargs) def build(self, inshape): self.w = self.add_weight('w', (inshape[1], self.No), initializer=igu) self.b = self.add_weight('b', (self.No,), initializer=iz) super().build(inshape) def call(self, x): return (K.dot(x, self.w) + self.b) def compute_output_shape(self, inshape): return (inshape[0], self.No)
def main(): x = np.array([0, 1, 2, 3, 4]) y = ((x * 2) + 1) model = keras.models.Sequential() model.add(SFC(1, input_shape=(1,))) model.compile('SGD', 'mse') model.fit(x[:2], y[:2], epochs=1000, verbose=0) print('Targets:', y[2:]) print('Predictions:', model.predict(x[2:]).flatten())
class DNN(): def __init__(self, Nin, Nh_l, Nout): self.X_ph = tf.placeholder(tf.float32, shape=(None, Nin)) self.L_ph = tf.placeholder(tf.float32, shape=(None, Nout)) H = Dense(Nh_l[0], activation='relu')(self.X_ph) H = Dropout(0.5)(H) H = Dense(Nh_l[1], activation='relu')(H) H = Dropout(0.25)(H) self.Y_tf = Dense(Nout, activation='softmax')(H) self.Loss_tf = tf.reduce_mean(categorical_crossentropy(self.L_ph, self.Y_tf)) self.Train_tf = tf.train.AdamOptimizer().minimize(self.Loss_tf) self.Acc_tf = categorical_accuracy(self.L_ph, self.Y_tf) self.Init_tf = tf.global_variables_initializer()
def Data_func(): ((X_train, y_train), (X_test, y_test)) = datasets.mnist.load_data() Y_train = np_utils.to_categorical(y_train) Y_test = np_utils.to_categorical(y_test) (L, W, H) = X_train.shape X_train = X_train.reshape((- 1), (W * H)) X_test = X_test.reshape((- 1), (W * H)) X_train = (X_train / 255.0) X_test = (X_test / 255.0) return ((X_train, Y_train), (X_test, Y_test))
def run(model, data, sess, epochs, batch_size=100): ((X_train, Y_train), (X_test, Y_test)) = data sess.run(model.Init_tf) with sess.as_default(): N_tr = X_train.shape[0] for epoch in range(epochs): for b in range((N_tr // batch_size)): X_tr_b = X_train[(batch_size * (b - 1)):(batch_size * b)] Y_tr_b = Y_train[(batch_size * (b - 1)):(batch_size * b)] model.Train_tf.run(feed_dict={model.X_ph: X_tr_b, model.L_ph: Y_tr_b, K.learning_phase(): 1}) loss = sess.run(model.Loss_tf, feed_dict={model.X_ph: X_test, model.L_ph: Y_test, K.learning_phase(): 0}) acc = model.Acc_tf.eval(feed_dict={model.X_ph: X_test, model.L_ph: Y_test, K.learning_phase(): 0}) print('Epoch {0}: loss = {1:.3f}, acc = {2:.3f}'.format(epoch, loss, np.mean(acc)))
def main(): Nin = 784 Nh_l = [100, 50] number_of_class = 10 Nout = number_of_class data = Data_func() model = DNN(Nin, Nh_l, Nout) run(model, data, sess, 10, 100)
class CNN(Model): def __init__(model, nb_classes, in_shape=None): model.nb_classes = nb_classes model.in_shape = in_shape model.build_model() super().__init__(model.x, model.y) model.compile() def build_model(model): nb_classes = model.nb_classes in_shape = model.in_shape x = Input(in_shape) h = Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=in_shape)(x) h = Conv2D(64, (3, 3), activation='relu')(h) h = MaxPooling2D(pool_size=(2, 2))(h) h = Dropout(0.25)(h) h = Flatten()(h) z_cl = h h = Dense(128, activation='relu')(h) h = Dropout(0.5)(h) z_fl = h y = Dense(nb_classes, activation='softmax', name='preds')(h) model.cl_part = Model(x, z_cl) model.fl_part = Model(x, z_fl) (model.x, model.y) = (x, y) def compile(model): Model.compile(model, loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])