index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
28,925,898
|
sidney-tio/rl-playground
|
refs/heads/master
|
/rl_trainer.py
|
import copy
import os
import logging
import sys
import random
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from rl_networks import ConvMLPNetwork
from utilities.rl_utils import flip_array, vectorize_world_state, setup_logger, init_layers
from utilities.Epsilon_Greedy_Exploration import Epsilon_Greedy_Exploration
from utilities.Utility_Functions import normalise_rewards, create_actor_distribution
class PPOTrainer():
"""Master class to orchestrate training of PPO Algorithm in Overcooked AI"""
def __init__(self, config):
self.config = config
self.logger = setup_logger(self.config.results_filepath)
self.set_random_seeds(self.config.seed)
self.device = "cuda:0" if self.config.use_GPU and torch.cuda.is_available() else "cpu"
self.exploration_strategy = Epsilon_Greedy_Exploration(self.config)
self.episode_number = 0
self.timesteps = 0
self.reward_horizon = self.config.reward_horizon
self.all_states = []
self.all_actions = []
self.all_rewards = []
self.states_batched = []
self.actions_batched = []
self.rewards_batched = []
self.average_score_required_to_win = 100 # reward for 1 serve
self.init_step = False
def first_step(self, world_state):
self.layers = init_layers(len(world_state['agents']))
self.config.hyperparameters["obs_space"] = len(self.layers)
self.policy_new = self.create_NN(self.config.hyperparameters["obs_space"],
self.config.hyperparameters["action_space"],
self.config.hyperparameters["nn_params"])
self.policy_old = self.create_NN(self.config.hyperparameters["obs_space"],
self.config.hyperparameters["action_space"],
self.config.hyperparameters["nn_params"])
if self.config.old_policy_path:
self.policy_new.load_state_dict(torch.load(self.config.old_policy_path))
self.policy_old.load_state_dict(copy.deepcopy(self.policy_new.state_dict()))
self.policy_new_optim = optim.Adam(self.policy_new.parameters(
), lr=self.config.hyperparameters['learning_rate'], eps=1e-4)
self.init_step = True
def init_batch_lists(self):
if self.states_batched:
self.all_states.extend(self.states_batched)
self.all_actions.extend(self.actions_batched)
self.all_rewards.extend(self.rewards_batched)
self.states_batched = []
self.actions_batched = []
self.rewards_batched = []
def setup_agents(self, agent_list):
self.agents = agent_list
self.num_agents = len(agent_list)
self.reset_game()
def update_learning_rate(self, starting_lr, optimizer):
"""Lowers the learning rate according to how close we are to the solution"""
if len(self.rewards_batched) > 0:
last_rolling_score = self.rewards_batched[-1][-1]
if last_rolling_score > 0.75 * self.average_score_required_to_win:
new_lr = starting_lr / 100.0
elif last_rolling_score > 0.6 * self.average_score_required_to_win:
new_lr = starting_lr / 20.0
elif last_rolling_score > 0.5 * self.average_score_required_to_win:
new_lr = starting_lr / 10.0
elif last_rolling_score > 0.25 * self.average_score_required_to_win:
new_lr = starting_lr / 2.0
else:
new_lr = starting_lr
for g in optimizer.param_groups:
g['lr'] = new_lr
if random.random() < 0.001:
self.logger.info("Learning rate {}".format(new_lr))
def reset_game(self):
self.current_episode_state = {}
self.current_episode_action = {}
self.current_episode_reward = {}
for agent in self.agents:
self.current_episode_state[agent] = []
self.current_episode_action[agent] = []
self.current_episode_reward[agent] = []
def pick_action(self, state, exploration_episilon):
if self.config.hyperparameters['random_policy'] and random.random() <= exploration_episilon:
action = random.randint(0, self.config.hyperparameters['action_space'] - 1)
return action
state = torch.from_numpy(state).float()
actor_output = self.policy_new.forward(state)
action_distribution = create_actor_distribution(
"DISCRETE", actor_output, self.config.hyperparameters['action_space'])
action = action_distribution.sample().cpu()
return action.item()
def step(self, agent_id, world_state):
if not self.init_step:
self.first_step(world_state)
self.exploration_epsilon = self.exploration_strategy.get_updated_epsilon_exploration(
{"episode_number": self.episode_number})
world_state_np = vectorize_world_state(world_state, self.layers)
flipped_arr = flip_array(agent_id, world_state_np, self.layers)
action = self.pick_action(flipped_arr, self.exploration_epsilon)
self.current_episode_state[agent_id].append(flipped_arr[0])
self.current_episode_action[agent_id].append(action)
return action
def policy_learn(self):
all_discounted_returns = self.calculate_all_discounted_returns()
if self.config.hyperparameters["normalise_rewards"]:
all_discounted_returns = normalise_rewards(all_discounted_returns)
# number of epochs
for _ in range(self.config.hyperparameters["learning_iterations_per_round"]):
all_ratio_of_policy_probabilities = self.calculate_all_ratio_of_policy_probabilities()
loss = self.calculate_loss([all_ratio_of_policy_probabilities], all_discounted_returns)
self.take_policy_new_optimisation_step(loss)
if self.config.save_model:
torch.save(self.policy_new.state_dict(), self.config.model_path)
self.init_batch_lists()
def end_episode(self):
self.episode_number += 1
self.logger.info("Epsilon = {:.4f} @ Episode {}".format(
self.exploration_epsilon, self.episode_number))
self.states_batched.extend(list(self.current_episode_state.values()))
self.actions_batched.extend(list(self.current_episode_action.values()))
self.rewards_batched.extend(list(self.current_episode_reward.values()))
if (self.episode_number % self.config.hyperparameters['episodes_per_learning_round'] == 0) and (self.config.train):
self.policy_learn()
self.update_learning_rate(
self.config.hyperparameters['learning_rate'], self.policy_new_optim)
self.equalise_policies()
self.write_results()
self.reset_game()
self.logger.info("======== END OF EPISODE =======")
def calculate_all_ratio_of_policy_probabilities(self):
"""For each action calculates the ratio of the probability that the new policy would have picked the action vs.
the probability the old policy would have picked it. This will then be used to inform the loss"""
all_states = [state for states in self.states_batched for state in states]
all_actions = [[action] for actions in self.actions_batched for action in actions]
all_states = torch.stack([torch.Tensor(states).float().to(self.device)
for states in all_states])
all_actions = torch.stack([torch.Tensor(actions).float().to(self.device)
for actions in all_actions])
all_actions = all_actions.view(-1, len(all_states))
new_policy_distribution_log_prob = self.calculate_log_probability_of_actions(
self.policy_new, all_states, all_actions)
old_policy_distribution_log_prob = self.calculate_log_probability_of_actions(
self.policy_old, all_states, all_actions)
ratio_of_policy_probabilities = torch.exp(
new_policy_distribution_log_prob) / (torch.exp(old_policy_distribution_log_prob) + 1e-8)
return ratio_of_policy_probabilities
def calculate_log_probability_of_actions(self, policy, states, actions):
"""Calculates the log probability of an action occuring given a policy and starting state"""
policy_output = policy.forward(states).to(self.device)
policy_distribution = create_actor_distribution(
"DISCRETE", policy_output, self.config.hyperparameters["action_space"])
policy_distribution_log_prob = policy_distribution.log_prob(actions)
return policy_distribution_log_prob
def calculate_loss(self, all_ratio_of_policy_probabilities, all_discounted_returns):
"""Calculates the PPO loss"""
all_ratio_of_policy_probabilities = torch.squeeze(
torch.stack(all_ratio_of_policy_probabilities))
all_ratio_of_policy_probabilities = torch.clamp(input=all_ratio_of_policy_probabilities,
min=-sys.maxsize,
max=sys.maxsize)
all_discounted_returns = torch.tensor(
all_discounted_returns).to(all_ratio_of_policy_probabilities)
potential_loss_value_1 = all_discounted_returns * all_ratio_of_policy_probabilities
potential_loss_value_2 = all_discounted_returns * \
self.clamp_probability_ratio(all_ratio_of_policy_probabilities)
loss = torch.min(potential_loss_value_1, potential_loss_value_2)
loss = -torch.mean(loss)
self.logger.info(f'Loss: {loss}')
return loss
def take_policy_new_optimisation_step(self, loss):
"""Takes an optimisation step for the new policy"""
self.policy_new_optim.zero_grad() # reset gradients to 0
loss.backward() # this calculates the gradients
torch.nn.utils.clip_grad_norm_(self.policy_new.parameters(), self.config.hyperparameters[
"gradient_clipping_norm"]) # clip gradients to help stabilise training
self.policy_new_optim.step() # this applies the gradients
def clamp_probability_ratio(self, value):
"""Clamps a value between a certain range determined by hyperparameter clip epsilon"""
return torch.clamp(input=value, min=1.0 - self.config.hyperparameters["clip_epsilon"],
max=1.0 + self.config.hyperparameters["clip_epsilon"])
def equalise_policies(self):
"""Sets the old policy's parameters equal to the new policy's parameters"""
for old_param, new_param in zip(self.policy_old.parameters(), self.policy_new.parameters()):
old_param.data.copy_(new_param.data)
def create_NN(self, input_dim, output_dim, hyperparameters):
return ConvMLPNetwork(input_dim, output_dim, hyperparameters)
def anneal_reward(self, reward):
if reward == -1:
return -1
else:
reward_annealed = reward * (self.reward_horizon - self.timesteps)/self.reward_horizon
return reward_annealed
def receive_rewards(self, rewards):
for agent_id in self.agents:
reward_annealed = self.anneal_reward(rewards[agent_id])
if not self.current_episode_reward[agent_id]:
self.current_episode_reward[agent_id].append(reward_annealed)
else:
accum_rewards = self.current_episode_reward[agent_id][-1] + reward_annealed
self.current_episode_reward[agent_id].append(accum_rewards)
self.timesteps += 1
def set_random_seeds(self, random_seed=None):
"""Sets all possible random seeds so results can be reproduced"""
if not random_seed:
random_seed = np.random.randint(100)
self.logger.info("Random seed @ {}".format(random_seed))
os.environ['PYTHONHASHSEED'] = str(random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(random_seed)
random.seed(random_seed)
np.random.seed(random_seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(random_seed)
torch.cuda.manual_seed(random_seed)
def calculate_all_discounted_returns(self):
"""Calculates the cumulative discounted return for each episode which we will then use in a learning iteration"""
all_discounted_returns = []
for episode in range(len(self.states_batched)):
discounted_returns = [0]
for ix in range(len(self.states_batched[episode])):
return_value = self.rewards_batched[episode][-(
ix + 1)] + self.config.hyperparameters["discount_rate"]*discounted_returns[-1]
discounted_returns.append(return_value)
discounted_returns = discounted_returns[1:]
all_discounted_returns.extend(discounted_returns[::-1])
return all_discounted_returns
def write_to_output(self, mode, array):
if mode == 'state':
for agent, state in array.items():
filepath = f"{self.config.results_filepath}state/agent{agent}_{self.episode_number}.npy"
with open(filepath, 'wb+') as output:
np.save(filepath, np.array(state))
output.close()
else:
filepath = self.config.results_filepath + mode + '.txt'
with open(filepath, 'a+') as output:
output.write(str(array) + '\n')
output.close()
def write_results(self):
self.write_to_output('state', self.current_episode_state)
self.write_to_output('reward', self.current_episode_reward)
self.write_to_output('action', self.current_episode_action)
def log_explicit_results(self, chop_rewards, cook_rewards, serve_rewards):
self.logger.info(f'Current explicit CHOP rewards: {chop_rewards}')
self.logger.info(f'Current explicit COOK rewards: {cook_rewards}')
self.logger.info(f'Current explicit SERVE rewards: {serve_rewards}')
current_episode_explicit = (chop_rewards, cook_rewards, serve_rewards)
self.write_to_output('explicit', current_episode_explicit)
|
{"/env.py": ["/rl_trainer.py"], "/rl_trainer.py": ["/ac_base.py", "/rl_networks.py"], "/ac_base.py": ["/rl_networks.py"]}
|
28,925,899
|
sidney-tio/rl-playground
|
refs/heads/master
|
/env.py
|
import gym
env = gym.make('CartPole-v0')
env.reset()
for _ in range(100):
env.render()
print(env.step(env.action_space.sample()))
env.close()
|
{"/env.py": ["/rl_trainer.py"], "/rl_trainer.py": ["/ac_base.py", "/rl_networks.py"], "/ac_base.py": ["/rl_networks.py"]}
|
28,985,455
|
ekaster/Project2-2020-US-Presidential-Election
|
refs/heads/master
|
/app.py
|
from flask import render_template
from flask import Flask, jsonify
from fetch_from_db import fetch_states, fetch_national, fetch_popular, fetch_table
from bson import json_util, ObjectId
from flask.json import JSONEncoder
class CustomJSONEncoder(JSONEncoder):
def default(self, obj): return json_util.default(obj)
app = Flask(__name__)
app.static_folder = 'static'
app.json_encoder = CustomJSONEncoder
# Define routes
# Homepage
@app.route("/")
def welcome():
return render_template("index.html")
# Comparisons
@app.route("/comparisons")
def comparisons():
return render_template("comparisons.html")
# Timeline/Table
@app.route("/timeline")
def timeline():
return render_template("timeline.html")
# Votepower
@app.route("/votepower")
def votepower():
return render_template("votepower.html")
# Data Table
@app.route("/datatable")
def datatable():
return render_template("datatable.html")
# About/Resources/Sources
@app.route("/about")
def about():
return render_template("about.html")
# API Homepage
@app.route('/api/v1.0')
def apis():
return(
f'<h1 align=center>2020 Presidential Election API</h1><br/>'
f'<b>List of available routes </b> - <i>access data using paths below:</i><br/>'
f'<a href="/api/v1.0/states">/api/states</a><br/>'
f'<a href="/api/v1.0/national">/api/national</a><br/>'
f'<a href="/api/v1.0/popular">/api/popular</a><br/>'
f'<a href="/api/v1.0/table">/api/table</a><br/>'
)
# All API items
@app.route('/api/v1.0/states')
def get_items():
states = fetch_states()
return jsonify(states)
@app.route('/api/v1.0/national')
def get_national():
national = fetch_national()
return jsonify(national)
@app.route('/api/v1.0/popular')
def get_popular():
popular = fetch_popular()
return jsonify(popular)
@app.route('/api/v1.0/table')
def get_table():
table = fetch_table()
return jsonify(table)
if __name__ == '__main__':
app.run(debug=True)
|
{"/app.py": ["/fetch_from_db.py"]}
|
28,985,456
|
ekaster/Project2-2020-US-Presidential-Election
|
refs/heads/master
|
/fetch_from_db.py
|
import pymongo
from pymongo import MongoClient
# from secrets import credentials
def get_db():
# make connection to DB and return database
connection = f'mongodb+srv://dbUser:Project2@cluster0.zfx73.mongodb.net/project2_?retryWrites=true&w=majority'
client = pymongo.MongoClient(connection)
return client.project2_
def fetch_states():
# return States collection from Mongodb
db = get_db()
states = [state for state in db.presidential_state_toplines_2020.find({},{"state":1,"vpi":1,"tipping":1})]
return states
def fetch_popular():
# return States collection from Mongodb
db = get_db()
popular = [state for state in db.presidential_state_toplines_2020.find({},{"modeldate":1,"state":1, "voteshare_inc":1,"voteshare_chal":1})]
return popular
def fetch_national():
# return National collection from Mongodb
db = get_db()
national = [day for day in db.presidential_national_toplines_2020.find({},{"modeldate":1,"ev_inc":1,"ev_chal":1})]
return national
def fetch_table():
# return National collection from Mongodb
db = get_db()
table = [day for day in db.presidential_state_toplines_2020.find({},{"modeldate":1,"state":1,"tipping":1, "vpi":1, "winstate_inc":1,
"winstate_chal":1, "voteshare_inc":1, "voteshare_chal":1, "margin":1})]
return table
if __name__ == '__main__':
print(fetch_states())
print(fetch_national())
|
{"/app.py": ["/fetch_from_db.py"]}
|
29,005,726
|
Trateotu/course-rl-project
|
refs/heads/main
|
/main.py
|
from constants import *
from env import Simulator
from agent import Net
def main():
# load datasets
mazes = np.load('datasets/mazes.npy')
paths_length = np.load('datasets/paths_length.npy')
agent = Net().to(device)
# sim = Simulator(mazes[0])
# sim = Simulator(mazes[314])
# T = int(paths_length[0]*3)
# T = int(paths_length[0] * 2)
# frq = np.zeros((GRID_SIZE, GRID_SIZE))
Q_values_mazes = np.zeros((mazes.shape[0], 66, 66))
# train over multiple MDPs
for i, maze in enumerate(tqdm(mazes)):
# define simulator, horizon (maximum number of steps per training episode) and set epsilon parameter to its initial value every new MDP
sim = Simulator(maze, i)
T = int(paths_length[i] * 3)
frq = np.zeros((GRID_SIZE, GRID_SIZE)) # This grid is used to visualize which regions of the maze the agent visits the most
agent.epsilon = agent.epsilon0
# start training of the maze
for e in range(EPISODES):
sim.reset()
st = np.expand_dims(np.expand_dims(sim.grid.copy(), 0), 0)
# st = np.expand_dims(np.reshape(sim.grid.copy(), -1), 0)
tot_reward = 0
final_r = 0
# move in the maze for at most T steps following the exploration strategy (epsilon greedy) and push to the memory buffer each step
for t in range(T):
frq[sim.actual_pos_x][sim.actual_pos_y] += 1
a = agent.get_action(st)
r, done = sim.step(a)
st1 = np.expand_dims(np.expand_dims(sim.grid.copy(), 0), 0)
# st1 = np.expand_dims(np.reshape(sim.grid.copy(), -1), 0)
tot_reward += r
final_r = r
agent.push_memory(st, a, r, (not done), st1)
if done:
break
st = st1
# Update the networks
agent.update_Q()
agent.update_target(e, sim.grid.copy())
agent.write_reward(tot_reward, final_r)
# perform a test of the policy where there is no exploration
if e % 1000 == 999:
sim.reset()
st = np.expand_dims(np.expand_dims(sim.grid.copy(), 0), 0)
# st = np.expand_dims(np.reshape(sim.grid.copy(), -1), 0)
tot_reward = 0
grid_frq = -10 * sim.grid.copy()
for t in range(T):
a = agent.get_action(st, test=True)
r, done = sim.step(a)
st1 = np.expand_dims(np.expand_dims(sim.grid.copy(), 0), 0)
# st1 = np.expand_dims(np.reshape(sim.grid.copy(), -1), 0)
tot_reward += r
if done:
break
grid_frq[sim.actual_pos_x, sim.actual_pos_y] += 1
st = st1
agent.writer.add_scalar("Test reward", tot_reward, int(i * 50 + e / 1000))
fig1 = plt.figure()
plt.imshow(frq)
agent.writer.add_figure('Exploration frq', fig1, int(i * 50 + e / 1000))
fig2 = plt.figure()
plt.imshow(grid_frq)
agent.writer.add_figure("Test path", fig2, int(i * 50 + e / 1000))
# Once trained in a new maze, test the perfrormance in the previous mazes.
if i != 0:
tot_reward = 0
for x, temp_maze in enumerate(mazes[:i]):
sim = Simulator(temp_maze, x)
T = int(paths_length[x] * 3)
# sim.reset()
st = np.expand_dims(np.expand_dims(sim.grid.copy(), 0), 0)
# st = np.expand_dims(np.reshape(sim.grid.copy(), -1), 0)
tmp_reward = 0
for t in range(T):
a = agent.get_action(st, test=True)
r, done = sim.step(a)
st1 = np.expand_dims(np.expand_dims(sim.grid.copy(), 0), 0)
# st1 = np.expand_dims(np.reshape(sim.grid.copy(), -1), 0)
tot_reward += r
tmp_reward += r
if done:
break
st = st1
print('maze: ' + str(x) + ' reward: ' + str(tmp_reward), end=' ')
print()
agent.writer.add_scalar("Previous mazes average reward", tot_reward / i, int(i))
Q_values_mazes[i] = agent.get_Q_grid(maze)
np.save('Q_values_retraining_NO_eps_decay.npy', Q_values_mazes)
print()
if __name__ == '__main__':
main()
|
{"/agent.py": ["/constants.py"], "/meta_agent.py": ["/constants.py"], "/maml.py": ["/constants.py", "/meta_agent.py", "/maze_gen.py"], "/main.py": ["/constants.py", "/agent.py", "/maze_gen.py"]}
|
29,005,727
|
Trateotu/course-rl-project
|
refs/heads/main
|
/constants.py
|
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
import random
import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
POS_VAL = 20
GOAL_VAL = 10
OBSTACLE_VAL = 1 # FIXED
GRID_SIZE = 22 # FIXED
RWD_DEATH = 0
EPISODES = 50000
LOG_DIR = './logs/exp5'
|
{"/agent.py": ["/constants.py"], "/meta_agent.py": ["/constants.py"], "/maml.py": ["/constants.py", "/meta_agent.py", "/maze_gen.py"], "/main.py": ["/constants.py", "/agent.py", "/maze_gen.py"]}
|
29,089,290
|
Martincic/kova-je-nasa
|
refs/heads/master
|
/slave.py
|
import time
import board
import digitalio
from circuitpython_nrf24l01.rf24 import RF24
from Sensors import Sensors
# change these (digital output) pins accordingly
ce = digitalio.DigitalInOut(board.D17)
csn = digitalio.DigitalInOut(board.D16)
# using board.SPI() automatically selects the MCU's
# available SPI pins, board.SCK, board.MOSI, board.MISO
spi = board.SPI() # init spi bus object
nrf = RF24(spi, csn, ce)
nrf.ack = True # enable ack upon recieving packets
#set power level
nrf.pa_level = -12
# addresses needs to be in a buffer protocol object (bytearray)
address = [b"1Node", b"2Node"]
#using bool so TX and RX can switch with simple not
radio_number = True
# set TX address of RX node into the TX pipe
nrf.open_tx_pipe(address[radio_number]) # always uses pipe 0
# set RX address of TX node into an RX pipe
nrf.open_rx_pipe(1, address[not radio_number]) # using pipe 1
def listen(timeout=3):
nrf.listen = True # put radio into RX mode and power up
start_timer = time.monotonic() # used as a timeout
while (time.monotonic() - start_timer) < timeout:
if nrf.available():
length = nrf.any() # grab payload length info
question = nrf.read(length) # clears info from any() and nrf.pipe
nrf.listen = False # put the radio in TX mode
result = False
ack_timeout = time.monotonic_ns() + 200000000
while not result and time.monotonic_ns() < ack_timeout:
# try to send reply for 200 milliseconds (at most)
answer = bytes(Sensors.getAnswer(question), 'utf-8') #convert answer to bytes
result = nrf.send(answer)
nrf.listen = True # put the radio back in RX mode
if not result:
print("Response failed or timed out")
start_timer = time.monotonic() # reset timeout
nrf.listen = False # put the nRF24L01 in TX mode + Standby-I power state
Sensors = Sensors()
if __name__ == "__main__":
try:
while True:
Sensors.populateAnswers()
listen()
except KeyboardInterrupt:
print(" Keyboard Interrupt detected. Powering down radio...")
nrf.power = False
|
{"/master.py": ["/Database.py"], "/slave.py": ["/Sensors.py"]}
|
29,089,291
|
Martincic/kova-je-nasa
|
refs/heads/master
|
/master.py
|
import time
import board
import digitalio
from Database import Database
from circuitpython_nrf24l01.rf24 import RF24
# change these (digital output) pins accordingly
ce = digitalio.DigitalInOut(board.D4)
csn = digitalio.DigitalInOut(board.D5)
# using board.SPI() automatically selects the MCU's
# available SPI pins, board.SCK, board.MOSI, board.MISO
spi = board.SPI() # init spi bus object
nrf = RF24(spi, csn, ce)
nrf.ack = True # enable ack upon recieving packets
#set power level
nrf.pa_level = -12
# addresses needs to be in a buffer protocol object (bytearray)
address = [b"1Node", b"2Node"]
#using bool so TX and RX can switch with simple not
radio_number = True
# set TX address of RX node into the TX pipe
nrf.open_tx_pipe(address[radio_number]) # always uses pipe 0
# set RX address of TX node into an RX pipe
nrf.open_rx_pipe(1, address[not radio_number]) # using pipe 1
def askQuestion(question, count=5): # count = times question is asked
nrf.listen = False # ensures the nRF24L01 is in TX mode
while count:
# construct a payload to send
buffer = bytes(question, 'utf-8')
answer = nrf.send(buffer) # save the response (ACK payload)
if not answer:
print("send() failed or timed out")
else: # sent successful; listen for a response
nrf.listen = True # switch to RX mode
timeout = time.monotonic_ns() + 200000000 # set timeout 200ms
while not nrf.available() and time.monotonic_ns() < timeout:
# this loop hangs until response is received or timed out
pass
nrf.listen = False # switch to TX mode
print(
"Transmission successful! Sent: {}?".format(
buffer.decode("utf-8")
),
end=" ",
)
if nrf.pipe is None: # is there a payload?
print("Received no response.")
else:
length = nrf.any()
pipe_number = nrf.pipe
received = nrf.read() # grab the response & return it
print("Receieved: {}".format(bytes(received).decode("utf-8")))
return received
count -= 1
if __name__ == "__main__":
#array of questions/sensors/database tables (they match exactly)
questions = ['temp', 'humid', 'pressure']
Connection = Database() #init database class
try:
while True:
for question in questions:
try:
answer = askQuestion(question)
answer = answer.decode("utf-8")
Connection.storeValue(question, answer)
except AttributeError:
pass
time.sleep(5)
except KeyboardInterrupt:
print(" Keyboard Interrupt detected. Powering down radio...")
nrf.power = False
|
{"/master.py": ["/Database.py"], "/slave.py": ["/Sensors.py"]}
|
29,117,310
|
castaned/gem-daq-code
|
refs/heads/develop
|
/gemdaq-testing/setup/scripts/python/ctp7_test.py
|
#!/bin/env python
import sys, re
import time, datetime, os
sys.path.append('${GEM_PYTHON_PATH}')
import uhal
from registers_uhal import *
#from glib_clock_src import *
#from optparse import OptionParser
#parser = OptionParser()
#(options, args) = parser.parse_args()
uhal.setLogLevelTo( uhal.LogLevel.FATAL )
ipaddr = '192.168.250.53'
address_table = "file://${GEM_ADDRESS_TABLE_PATH}/glib_address_table.xml"
uri = "ipbustcp-2.0://eagle45:60002"
ctp7 = uhal.getDevice( "CTP7" , uri, address_table )
########################################
# IP address
########################################
print
print "--=======================================--"
print " Opening CTP7 with IP", ipaddr
print "--=======================================--"
print
print
print "--=======================================--"
print "-> DAQ INFORMATION"
print "--=======================================--"
print
print "-> DAQ control reg :0x%08x"%(readRegister(ctp7,"GLIB.DAQ.CONTROL"))
print "-> DAQ status reg :0x%08x"%(readRegister(ctp7,"GLIB.DAQ.STATUS"))
|
{"/ldqm-browser/LightDQM/LightDQM/urls.py": ["/ldqm-browser/LightDQM/LightDQM/views.py"]}
|
29,144,831
|
lyclqq/BreezeAdmin
|
refs/heads/master
|
/config.py
|
import os
import datetime
DEBUG=True
SECRET_KEY =os.urandom(24)
PERMANENT_SESSION_LIFETIME = datetime.timedelta(minutes=20)
UPLOAD_FOLDER='static\\files\\'
|
{"/main.py": ["/common/__init__.py", "/controller/admin.py"]}
|
29,144,832
|
lyclqq/BreezeAdmin
|
refs/heads/master
|
/main.py
|
#coding=utf-8
from flask import Flask,render_template,request,make_response,session,jsonify,current_app,redirect,flash,url_for
from io import BytesIO
from common import LoginForm,getKey,getVerifyCode,userLogin
app=Flask(__name__,static_url_path='/',template_folder='templates')
app.config.from_pyfile("config.py")
#自定义出错页
@app.errorhandler(404)
def page_not_found(e):
return 'there is not'
#验证是否登陆
@app.before_request
def islogin():
url = request.path
#不验证页面与文件
pass_list = ['/login','/code','/','/imgCode','/css','/fonts','/img','/static/js','/ueditor']
suffix=url.endswith('.png') or url.endswith('.jpg') or url.endswith('.css')
if request.path in pass_list or suffix:
return None
if not session.get("username"):
return redirect("/login")
@app.route('/imgCode')
def imgCode():
return getImgCode()
@app.route('/login',methods=['POST','GET'])
def login():
form = LoginForm()
if request.method == 'POST':
captcha = request.form.get('verify_code')
username=request.form.get('username')
password=request.form.get('password')
if session.get('imageCode')==captcha:
if userLogin(username=username,password=password):
#验证成功,跳转
return redirect(url_for('admin.admin'))
else:
flash('用户名或密码错误')
return render_template('login.html', form=form)
else:
flash('验证码错误')
return render_template('login.html',form=form)
else:
return render_template('login.html',form=form)
#生成验证码图片
def getImgCode():
imgKey=getKey()
image=getVerifyCode(imgKey)
buf = BytesIO()
image.save(buf, 'jpeg')
buf_str = buf.getvalue()
# 把buf_str作为response返回前端,并设置首部字段
response = make_response(buf_str)
response.headers['Content-Type'] = 'image/gif'
# 将验证码字符串储存在session中
session['imageCode'] = imgKey
return response
if __name__=='__main__':
#引用蓝图
from controller.admin import *
app.register_blueprint(admin_con)
app.run('0.0.0.0', port=80, debug=True)
|
{"/main.py": ["/common/__init__.py", "/controller/admin.py"]}
|
29,144,833
|
lyclqq/BreezeAdmin
|
refs/heads/master
|
/controller/admin.py
|
from flask import Blueprint,render_template,jsonify,session,current_app
import json
admin_con=Blueprint('admin',__name__)
#写session测试
@admin_con.route('/set')
def temp_set():
session['username']='刘德华' #用户名
session['usermenu']='101010' #菜单权限,1表示有权限,0表示无权限
return 'set ok'
@admin_con.route('/admin')
def admin():
username = session.get('username')
usermenu = str(session.get('usermenu'))
#从menu.json文件读取所有菜单
f = open(current_app.config['UPLOAD_FOLDER'] + 'menu.json', 'r')
allmenu = json.loads(f.readline())
menu = []
#按照菜单权限生成用户菜单
for item in allmenu:
ii = item.get('id')
if usermenu[ii] == '1':
menu.append(item)
return render_template('temp.html', username=username, menu=menu)
|
{"/main.py": ["/common/__init__.py", "/controller/admin.py"]}
|
29,144,834
|
lyclqq/BreezeAdmin
|
refs/heads/master
|
/common/__init__.py
|
from flask import session
from wtforms import StringField,PasswordField,SubmitField,Form,widgets
from wtforms.validators import DataRequired,Length,Email
from PIL import Image, ImageFont, ImageDraw, ImageFilter
import random
import string
#获取验证码文本
def getKey():
return ''.join(random.sample(string.digits, 4))
#生成自体颜色
def rndColor():
return (random.randint(16, 128), random.randint(16, 128), random.randint(16, 128))
#生成图形验证码
def getVerifyCode(imgKey):
width, height = 120, 50
# 新图片对象
im = Image.new('RGB', (width, height), 'white')
# 字体
font = ImageFont.truetype('app/static/arial.ttf', 40)
# draw对象
draw = ImageDraw.Draw(im)
# 绘制字符串
for item in range(4):
draw.text((5 + random.randint(-3, 3) + 23 * item, 5 + random.randint(-3, 3)),text=imgKey[item], fill=rndColor(), font=font)
return im
#验证用户名密码并向session注入权限
def userLogin(username,password):
session['username']='刘德华' #用户名
session['usermenu']='101010' #菜单权限,1表示有权限,0表示无权限
return True
class LoginForm(Form):
username = StringField('用户名', validators=[DataRequired(), Length(1, 20)])
username = StringField(
validators=[
DataRequired(message='用户名不能为空'),
Length(min=4, max=18, message='用户名长度必须大于%(min)d且小于%(max)d')
],
widget=widgets.TextInput(),
render_kw={'class': 'form-control',
"placeholder":"输入注册用户名"}
)
password = PasswordField(
# label='用户密码:',
validators=[
DataRequired(message='密码不能为空'),
],
widget=widgets.PasswordInput(),
render_kw={'class': 'form-control',
"placeholder": "输入用户密码"}
)
verify_code = StringField('验证码', validators=[DataRequired(), Length(1, 4)],render_kw={'class': 'form-control',
"placeholder":"输入验证码"})
submit = SubmitField('登录',render_kw={'class':'btn btn-block btn-info'})
|
{"/main.py": ["/common/__init__.py", "/controller/admin.py"]}
|
29,157,554
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/analyze_runs.py
|
import os.path as osp
import numpy as np
import pickle
import matplotlib.pyplot as plt
experiment_map = {
"pointbot0": {
"algs": {
"recovery":
"2020-04-15_18-07-44_SAC_simplepointbot0_Gaussian_",
"sac_norecovery":
"2020-04-15_17-18-59_SAC_simplepointbot0_Gaussian_",
"sac_penalty1":
"2020-04-15_17-21-40_SAC_simplepointbot0_Gaussian_",
"sac_penalty10":
"2020-04-15_18-01-42_SAC_simplepointbot0_Gaussian_",
"sac_penalty100":
"2020-04-15_18-24-50_SAC_simplepointbot0_Gaussian_",
"sac_lagrange_fixed":
"2020-05-21_13-32-32_SAC_simplepointbot0_Gaussian_",
"sac_lagrange_fixed":
"2020-05-21_15-27-49_SAC_simplepointbot0_Gaussian_",
"sac_ddpg_recovery":
"2020-05-21_14-30-13_SAC_simplepointbot0_Gaussian_"
},
"outfile": "pointbot0.png"
},
"pointbot1": {
"algs": {
"recovery": "2020-04-15_20-35-58_SAC_simplepointbot1_Gaussian_",
"sac_norecovery":
"2020-04-15_21-42-14_SAC_simplepointbot1_Gaussian_",
"sac_penalty1":
"2020-04-15_21-42-32_SAC_simplepointbot1_Gaussian_",
"sac_penalty10":
"2020-04-15_21-43-02_SAC_simplepointbot1_Gaussian_",
"sac_penalty100":
"2020-04-15_21-43-28_SAC_simplepointbot1_Gaussian_",
"q-filter": "2020-04-16_12-47-18_SAC_simplepointbot1_Gaussian_"
},
"outfile": "pointbot1.png"
}
}
names = {
"sac_norecovery": "SAC",
"sac_penalty1": "SAC (penalty 1)",
"sac_penalty10": "SAC (penalty 10)",
"sac_penalty100": "SAC (penalty 100)",
"recovery": "SAC + Recovery",
"q-filter": "Q-Filter",
"sac_lagrange_fixed": "SAC + Recovery + Critic Ascent",
"sac_ddpg_recovery": "SAC + DDPG Recovery"
}
colors = {
"sac_norecovery": "g",
"sac_penalty1": "orange",
"sac_penalty10": "black",
"sac_penalty100": "purple",
"recovery": "red",
"q-filter": "blue",
"sac_lagrange_fixed": "blue",
"sac_ddpg_recovery": "grey"
}
def plot_experiment(experiment):
fig, axs = plt.subplots(2, figsize=(16, 9))
axs[0].title.set_text("Constraint Violations vs. Episode")
# axs[0].set_ylim(-0.1, 1.1)
axs[0].set_xlabel("Episode")
axs[0].set_ylabel("Num Constraint Violations")
axs[1].title.set_text("Reward vs. Episode")
axs[1].set_ylim(-4000, -1000)
axs[1].set_xlabel("Episode")
axs[1].set_ylabel("Reward")
for alg in experiment_map[experiment]["algs"]:
exp_dir = experiment_map[experiment]["algs"][alg]
fname = osp.join("runs", exp_dir, "run_stats.pkl")
with open(fname, "rb") as f:
data = pickle.load(f)
train_stats = data['train_stats']
train_violations = []
train_rewards = []
for traj_stats in train_stats:
train_violations.append([])
train_rewards.append(0)
for step_stats in traj_stats:
train_violations[-1].append(step_stats['constraint'])
train_rewards[-1] += step_stats['reward']
train_violations = np.array(train_violations).sum(1) > 0
train_violations = np.cumsum(train_violations)
train_rewards = np.array(train_rewards)
axs[0].plot(train_violations, c=colors[alg], label=names[alg])
axs[1].plot(train_rewards, c=colors[alg], label=names[alg])
axs[0].legend(loc="lower right")
axs[1].legend(loc="lower right")
plt.savefig(experiment_map[experiment]["outfile"])
# plt.show()
experiment = "pointbot0"
if __name__ == '__main__':
plot_experiment(experiment)
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,555
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/env/tall_cartgripper.py
|
'''
All cartgripper env modules built on cartrgipper implementation in
https://github.com/SudeepDasari/visual_foresight
'''
import copy
import cv2
import numpy as np
from dmbrl.env.cartgripper_env.cartgripper_rot_grasp import CartgripperRotGraspEnv
from dmbrl.env.util.action_util import no_rot_dynamics, clip_target_qpos
from dmbrl.env.cartgripper_env.util.sensor_util import is_touching
from gym.spaces import Box
ENV_PARAMS = {}
def zangle_to_quat(zangle):
"""
:param zangle in rad
:return: quaternion
"""
return np.array([np.cos(zangle / 2), 0, 0, np.sin(zangle / 2)])
def quat_to_zangle(quat):
"""
:param quat: quaternion with only
:return: zangle in rad
"""
theta = np.arctan2(2 * quat[0] * quat[3], 1 - 2 * quat[3]**2)
return np.array([theta])
# repository specific params
class TallCartgripperEnv(CartgripperRotGraspEnv):
def __init__(self, env_params={}, reset_state=None):
assert 'mode_rel' not in env_params, "Autograsp sets mode_rel"
params = copy.deepcopy(ENV_PARAMS)
new_params = copy.deepcopy(env_params)
for k in new_params:
params[k] = new_params[k]
if 'autograsp' in params:
ag_dict = params.pop('autograsp')
for k in ag_dict:
params[k] = ag_dict[k]
super().__init__(params, reset_state)
self._adim = 4
self._goal_reached, self._ground_zs = False, None
self.unwrapped = self
self.ac_low_bound = np.array([-0.25, -0.25, -0.25, -0.25])
self.ac_high_bound = np.array([0.25, 0.25, 0.25, 0.25])
self.action_space = Box(self.ac_low_bound, self.ac_high_bound)
self.goal_image_shape = (48, 64, 3)
self.observation_space = self.goal_image_shape
# self.goal_image = None
def _default_hparams(self):
ag_params = {
'x_range': 0.06,
'y_range': 0.06,
# 'x_range': 0.12,
# 'y_range': 0.12,
# 'default_y': 0.,
'default_theta': 0.,
'no_motion_goal': False,
'reopen': False,
'zthresh': -0.06,
'touchthresh': 0.0,
'lift_height': 0.01,
'pos_lower_bound': np.array([-0.2, -0.15]),
'pos_upper_bound': np.array([0.2, 0.15])
}
parent_params = super()._default_hparams()
parent_params.set_hparam('finger_sensors', True)
parent_params.set_hparam('ncam', 2)
for k in ag_params:
parent_params.add_hparam(k, ag_params[k])
return parent_params
def _init_dynamics(self):
self._goal_reached = False
self._gripper_closed = False
self._ground_zs = self._last_obs['object_poses_full'][:, 2].copy()
def _next_qpos(self, action):
# print("action", action)
assert action.shape[0] == self._adim
gripper_z = self._previous_target_qpos[2]
z_thresh = self._hp.zthresh
delta_z_cond = np.amax(self._last_obs['object_poses_full'][:, 2] -
self._ground_zs) > 0.01
target, self._gripper_closed = no_rot_dynamics(
self._previous_target_qpos, action, self._gripper_closed,
gripper_z, z_thresh, self._hp.reopen, delta_z_cond)
target = clip_target_qpos(target, self._hp.pos_lower_bound,
self._hp.pos_upper_bound)
return target
def _post_step(self):
#if np.amax(self._last_obs['object_poses_full'][:, 2] - self._ground_zs) > 0.05:
self._goal_reached = True
def cost_fn(self, obs):
# NOTE: obs_cost_fn takes in a processed obs right now
return np.sum((obs - self.goal_image)**2)
def has_goal(self):
return True
def is_stable(self, obs):
return self._goal_reached
def get_object_poses(self, idx=None):
if idx is None:
return self._last_obs['object_poses_full']
return self._last_obs['object_poses_full'][idx]
def get_object_mask(self, idx, obs):
pose = self.get_object_poses(idx)
def get_grasp_action(self, idx=1, noise_std=0.00, drop=False):
position = np.copy(self.sim.get_state().qpos[:])
obj_position = self.get_object_poses(1)[:3]
control = np.zeros(self._adim)
gain = 1
if np.abs(position[1] - 0.15) > 0.02 and np.abs(
position[0] - obj_position[0]) > 0.04:
print(0)
control[1] = 0.15 - position[1]
elif np.abs(position[0] - obj_position[0]) > 0.04:
print(1)
control[0] = obj_position[0] - position[0]
elif np.abs(position[1] - obj_position[1]) > 0.02:
print(2)
control[1] = obj_position[1] - position[1]
elif drop:
control[3] = 0.01
noise_std = 0.03
else:
print(3)
control[3] = 0.1
control[2] = 0.015
control[:-2] += np.random.randn(self._adim - 2) * 0.01
# print(position[:3], obj_position, control)
return control * gain + np.random.randn(self._adim) * noise_std
def _create_pos(self):
object_poses = super()._create_pos()
positions = []
for i in range(self.num_objects):
object_poses[i][0] = np.random.uniform(-self._hp.x_range,
self._hp.x_range)
# object_poses[i][1] = np.random.uniform(-self._hp.y_range, self._hp.y_range)
object_poses[i][1] = -0.12
object_poses[i][3:] = zangle_to_quat(self._hp.default_theta)
while len(positions) > 0 and \
np.linalg.norm(np.array(positions) - np.array([object_poses[i][0], object_poses[i][1]]), axis=1).min() < 0.03:
object_poses[i][0] = np.random.uniform(-self._hp.x_range,
self._hp.x_range)
# object_poses[i][1] = np.random.uniform(-self._hp.y_range, self._hp.y_range)
object_poses[i][1] = -0.12
positions.append((object_poses[i][0], object_poses[i][1]))
return object_poses
def goal_reached(self):
return self._goal_reached
def generate_goal_image(self):
self.reset(randomize_objects=False)
actions = np.tile(np.array([0, 0, -0.02, 0]), (5, 1))
actions = np.vstack(
[np.tile(np.array([0.02, 0, 0, 0]), (5, 1)), actions])
for ac in actions:
obs = self.step(ac)
# im = obs[0]['images'][0]
im = self.render()[0]
target_img_height, target_img_width, _ = self.goal_image_shape
im = cv2.resize(
im, (target_img_width, target_img_height),
interpolation=cv2.INTER_AREA)
self.goal_image = im
print("GOAL_IMAGE", self.goal_image.shape)
self.reset(randomize_objects=False)
import scipy.misc
scipy.misc.imsave("goal_image.jpg", im)
return im
def get_armpos(self, object_pos):
xpos0 = super().get_armpos(object_pos)
xpos0[3] = 0
xpos0[4:6] = [0.05, -0.05]
return xpos0
def topple_check(self, debug=False):
quat = self.get_object_poses()[:, 3:]
phi = np.arctan2(
2 *
(np.multiply(quat[:, 0], quat[:, 1]) + quat[:, 2] * quat[:, 3]),
1 - 2 * (np.power(quat[:, 1], 2) + np.power(quat[:, 2], 2)))
theta = np.arcsin(2 * (np.multiply(quat[:, 0], quat[:, 2]) -
np.multiply(quat[:, 3], quat[:, 1])))
psi = np.arctan2(
2 * (np.multiply(quat[:, 0], quat[:, 3]) + np.multiply(
quat[:, 1], quat[:, 2])),
1 - 2 * (np.power(quat[:, 2], 2) + np.power(quat[:, 3], 2)))
euler = np.stack([phi, theta, psi]).T[:, :2] * 180. / np.pi
if debug:
return np.abs(euler).max() > 15 or np.isnan(euler).sum() > 0, euler
return np.abs(euler).max() > 15 or np.isnan(euler).sum() > 0
def true_cost(self):
return -(self.get_object_poses()[1, 2] > 0.08).astype(int)
@staticmethod
def get_real_state(env, sim_state, actions=[]):
env.set_state(sim_state)
env.sim.forward()
for action in actions:
env.step(action)
im = env.render()[0]
state = env.sim.get_state()
return im
@staticmethod
def get_object_masks_from_sim_state(env, sim_state):
im_list = []
num_objects = (len(sim_state.qpos) - 6) // 7
original_qpos = sim_state.qpos.copy()
for i in range(num_objects):
new_qpos = original_qpos.copy()
new_qpos[:3] = [1.5, 0.2, 0.05]
for j in range(num_objects):
if i == j:
continue
else:
obj_idx = j * 7 + 6
new_qpos[obj_idx:obj_idx + 3] = [1.2, 0.2, 0.05]
sim_state.qpos[:] = new_qpos
env.sim.set_state(sim_state)
env.sim.forward()
im = env.render()[0]
im_list.append(im)
sim_state.qpos[:] = original_qpos
masks = []
for im in im_list:
# mask = np.zeros_like(hsv)
# for i in range(3):
# mask[:,:, i] = hsv[:,:,0] > 40
# frame = np.multiply(hsv, mask)
mask = hsv[:, :, 0] > 40
masks.append(mask)
return masks
@staticmethod
def get_mask(im):
# import matplotlib.pyplot as plt
# plt.imshow(im)
# plt.show()
mask = np.logical_and(im[:, :, 0] > 65, im[:, :, 1] < 65)
mask = np.logical_and(mask, im[:, :, 2] < 40)
mask1 = mask.copy()
mask = np.logical_and(im[:, :, 0] > 60, im[:, :, 1] > 40)
mask = np.logical_and(mask, im[:, :, 2] < im[:, :, 1] / 2)
mask2 = mask.copy()
# mask = np.expand_dims((mask1), axis=-1)
mask = np.stack((mask1, mask2), axis=-1)
# mask = np.logical_and(np.max(im, axis=2) - np.min(im, axis=2) > 10, np.abs(im.max(2) - 82 ) > 10)
# mask = np.logical_and(im[:,:,0] > 60, mask)
# plt.imshow(mask2.squeeze())
# plt.show()
# mask2 = np.maximum(np.stack((mask1, mask1, mask1), axis=-1), np.stack((mask2, mask2, mask2), axis=-1))
# im2 = np.multiply(im, mask2)
# print(im2.shape)
# plt.imshow(im2)
# plt.show()
# assert len(mask.shape) == 3, mask.shape
return mask
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,556
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/utils.py
|
'''
Built on on SAC implementation from
https://github.com/pranz24/pytorch-soft-actor-critic
except for video processing uitls, which are built on
Goal-Aware Prediction: Learning to Model What Matters (ICML 2020)
'''
import os
import cv2
import numpy as np
import plotly
from plotly.graph_objs import Scatter
from plotly.graph_objs.scatter import Line
import math
import torch
# Plots min, max and mean + standard deviation bars of a population over time
def lineplot(xs, ys_population, title, path='', xaxis='episode'):
max_colour, mean_colour, std_colour, transparent = 'rgb(0, 132, 180)', 'rgb(0, 172, 237)', 'rgba(29, 202, 255, 0.2)', 'rgba(0, 0, 0, 0)'
if isinstance(ys_population[0], list) or isinstance(
ys_population[0], tuple):
ys = np.asarray(ys_population, dtype=np.float32)
ys_min, ys_max, ys_mean, ys_std, ys_median = ys.min(1), ys.max(
1), ys.mean(1), ys.std(1), np.median(ys, 1)
ys_upper, ys_lower = ys_mean + ys_std, ys_mean - ys_std
trace_max = Scatter(
x=xs,
y=ys_max,
line=Line(color=max_colour, dash='dash'),
name='Max')
trace_upper = Scatter(
x=xs,
y=ys_upper,
line=Line(color=transparent),
name='+1 Std. Dev.',
showlegend=False)
trace_mean = Scatter(
x=xs,
y=ys_mean,
fill='tonexty',
fillcolor=std_colour,
line=Line(color=mean_colour),
name='Mean')
trace_lower = Scatter(
x=xs,
y=ys_lower,
fill='tonexty',
fillcolor=std_colour,
line=Line(color=transparent),
name='-1 Std. Dev.',
showlegend=False)
trace_min = Scatter(
x=xs,
y=ys_min,
line=Line(color=max_colour, dash='dash'),
name='Min')
trace_median = Scatter(
x=xs, y=ys_median, line=Line(color=max_colour), name='Median')
data = [
trace_upper, trace_mean, trace_lower, trace_min, trace_max,
trace_median
]
else:
data = [Scatter(x=xs, y=ys_population, line=Line(color=mean_colour))]
plotly.offline.plot(
{
'data':
data,
'layout':
dict(
title=title,
xaxis={'title': xaxis},
yaxis={'title': title})
},
filename=os.path.join(path, title + '.html'),
auto_open=False)
def write_video(frames, title, path=''):
frames = np.multiply(np.stack(frames, axis=0).transpose(
0, 2, 3, 1), 255).clip(0, 255).astype(
np.uint8)[:, :, :, ::-1] # VideoWrite expects H x W x C in BGR
_, H, W, _ = frames.shape
writer = cv2.VideoWriter(
os.path.join(path, '%s.mp4' % title), cv2.VideoWriter_fourcc(*'mp4v'),
30., (W, H), True)
for frame in frames:
writer.write(frame)
writer.release()
def create_log_gaussian(mean, log_std, t):
quadratic = -((0.5 * (t - mean) / (log_std.exp())).pow(2))
l = mean.shape
log_z = log_std
z = l[-1] * math.log(2 * math.pi)
log_p = quadratic.sum(dim=-1) - log_z.sum(dim=-1) - 0.5 * z
return log_p
def logsumexp(inputs, dim=None, keepdim=False):
if dim is None:
inputs = inputs.view(-1)
dim = 0
s, _ = torch.max(inputs, dim=dim, keepdim=True)
outputs = s + (inputs - s).exp().sum(dim=dim, keepdim=True).log()
if not keepdim:
outputs = outputs.squeeze(dim)
return outputs
def soft_update(target, source, tau):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) +
param.data * tau)
def hard_update(target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,557
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/main.py
|
'''
Built on on SAC implementation from
https://github.com/pranz24/pytorch-soft-actor-critic
'''
# -*- coding: utf-8 -*-
import argparse
import datetime
import gym
import os.path as osp
import pickle
import numpy as np
import itertools
import torch
from sac import SAC
from tensorboardX import SummaryWriter
from replay_memory import ReplayMemory, ConstraintReplayMemory
from MPC import MPC
from VisualRecovery import VisualRecovery
from dotmap import DotMap
from config import create_config
import os
from env.simplepointbot0 import SimplePointBot
import moviepy.editor as mpy
from video_recorder import VideoRecorder
import cv2
from model import VisualEncoderAttn, TransitionModel, VisualReconModel
from torch import nn, optim
from gen_pointbot0_demos import get_random_transitions_pointbot0
from gen_pointbot1_demos import get_random_transitions_pointbot1
from env.cartpole import transition_function
from env.half_cheetah_disabled import HalfCheetahEnv
from env.ant_disabled import AntEnv
TORCH_DEVICE = torch.device(
'cuda') if torch.cuda.is_available() else torch.device('cpu')
torchify = lambda x: torch.FloatTensor(x).to('cuda')
def linear_schedule(startval, endval, endtime):
return lambda t: startval + t / endtime * (endval - startval) if t < endtime else endval
def set_seed(seed, env):
torch.manual_seed(args.seed)
np.random.seed(args.seed)
env.seed(args.seed)
def dump_logs(test_rollouts, train_rollouts, logdir):
data = {"test_stats": test_rollouts, "train_stats": train_rollouts}
with open(osp.join(logdir, "run_stats.pkl"), "wb") as f:
pickle.dump(data, f)
def print_episode_info(rollout):
num_violations = 0
for inf in rollout:
if 'constraint' in inf:
num_violations += int(inf['constraint'])
if "reward" in rollout[-1] and "state" in rollout[-1]:
print("final reward: %f" % rollout[-1]["reward"])
if len(rollout[-1]["state"].shape) < 3:
print(rollout[-1]["state"])
print("num violations: %d" % num_violations)
def recovery_config_setup(args):
ctrl_args = DotMap(**{key: val for (key, val) in args.ctrl_arg})
cfg = create_config(args.env_name, "MPC", ctrl_args, args.override, logdir)
cfg.ctrl_cfg.pred_time = args.pred_time
cfg.ctrl_cfg.opt_cfg.reachability_hor = args.reachability_hor
if args.use_value:
cfg.ctrl_cfg.use_value = True
elif args.use_qvalue:
cfg.ctrl_cfg.use_qvalue = True
else:
assert (False)
cfg.pprint()
return cfg
def experiment_setup(logdir, args):
if args.use_recovery and not args.disable_learned_recovery and not (
args.ddpg_recovery or args.Q_sampling_recovery):
cfg = recovery_config_setup(args)
env = cfg.ctrl_cfg.env
if not args.vismpc_recovery:
recovery_policy = MPC(cfg.ctrl_cfg)
else:
encoder = VisualEncoderAttn(
args.env_name, args.hidden_size, ch=3).to(device=TORCH_DEVICE)
transition_model = TransitionModel(
args.hidden_size,
env.action_space.shape[0]).to(device=TORCH_DEVICE)
residual_model = VisualReconModel(
args.env_name, args.hidden_size).to(device=TORCH_DEVICE)
dynamics_param_list = list(transition_model.parameters()) + list(
residual_model.parameters()) + list(encoder.parameters())
dynamics_optimizer = optim.Adam(
dynamics_param_list, lr=3e-4, eps=1e-4)
dynamics_finetune_optimizer = optim.Adam(
transition_model.parameters(), lr=3e-4, eps=1e-4)
if args.load_vismpc:
if 'maze' in args.env_name:
model_dicts = torch.load(
os.path.join('models', args.model_fname,
'model_19500.pth'))
else:
model_dicts = torch.load(
os.path.join('models', args.model_fname,
'model_199900.pth'))
transition_model.load_state_dict(
model_dicts['transition_model'])
residual_model.load_state_dict(model_dicts['residual_model'])
encoder.load_state_dict(model_dicts['encoder'])
dynamics_optimizer.load_state_dict(
model_dicts['dynamics_optimizer'])
else:
logdir = os.path.join('models', args.model_fname)
os.makedirs(logdir, exist_ok=True)
if args.vismpc_recovery:
cfg.ctrl_cfg.encoder = encoder
cfg.ctrl_cfg.transition_model = transition_model
cfg.ctrl_cfg.residual_model = residual_model
cfg.ctrl_cfg.dynamics_optimizer = dynamics_optimizer
cfg.ctrl_cfg.dynamics_finetune_optimizer = dynamics_finetune_optimizer
cfg.ctrl_cfg.hidden_size = args.hidden_size
cfg.ctrl_cfg.beta = args.beta
cfg.ctrl_cfg.logdir = logdir
cfg.ctrl_cfg.batch_size = args.batch_size
recovery_policy = VisualRecovery(cfg.ctrl_cfg)
else:
recovery_policy = None
if "HalfCheetah" in args.env_name:
env = HalfCheetahEnv()
elif "Ant-Disabled" in args.env_name:
env = AntEnv()
else:
env = gym.make(ENV_ID[args.env_name])
set_seed(args.seed, env)
agent = agent_setup(env, logdir, args)
if args.use_recovery and not args.disable_learned_recovery and not (
args.ddpg_recovery or args.Q_sampling_recovery):
if args.use_value:
recovery_policy.update_value_func(agent.V_safe)
elif args.use_qvalue:
recovery_policy.update_value_func(agent.Q_safe)
return agent, recovery_policy, env
def agent_setup(env, logdir, args):
if "HalfCheetah" in args.env_name:
tmp_env = HalfCheetahEnv()
elif "Ant-Disabled" in args.env_name:
tmp_env = AntEnv()
elif "reacher" in args.env_name:
tmp_env = None
else:
tmp_env = gym.make(ENV_ID[args.env_name])
agent = SAC(
env.observation_space,
env.action_space,
args,
logdir,
tmp_env=tmp_env)
return agent
def get_action(state, env, agent, recovery_policy, args, train=True):
def recovery_thresh(state, action, agent, recovery_policy, args):
if not args.use_recovery:
return False
critic_val = agent.safety_critic.get_value(
torchify(state).unsqueeze(0),
torchify(action).unsqueeze(0))
if args.reachability_test: # reachability test combined with safety check
return not recovery_policy.reachability_test(
state, action, args.eps_safe)
if args.lookahead_test:
return not recovery_policy.lookahead_test(state, action,
args.eps_safe)
if critic_val > args.eps_safe and not args.pred_time:
return True
elif critic_val < args.t_safe and args.pred_time:
return True
return False
policy_state = state
if args.start_steps > total_numsteps and train:
action = env.action_space.sample() # Sample random action
elif train:
action = agent.select_action(policy_state) # Sample action from policy
else:
action = agent.select_action(
policy_state, eval=True) # Sample action from policy
# print("test", test)
if recovery_thresh(state, action, agent, recovery_policy, args):
recovery = True
if not args.disable_learned_recovery:
if args.ddpg_recovery or args.Q_sampling_recovery:
real_action = agent.safety_critic.select_action(state)
else:
real_action = recovery_policy.act(state, 0)
else:
real_action = env.safe_action(state)
else:
recovery = False
real_action = np.copy(action)
return action, real_action, recovery
ENV_ID = {
'simplepointbot0': 'SimplePointBot-v0',
'simplepointbot1': 'SimplePointBot-v1',
'cliffwalker': 'CliffWalker-v0',
'cliffcheetah': 'CliffCheetah-v0',
'maze': 'Maze-v0',
'maze_1': 'Maze1-v0',
'maze_2': 'Maze2-v0',
'maze_3': 'Maze3-v0',
'maze_4': 'Maze4-v0',
'maze_5': 'Maze5-v0',
'maze_6': 'Maze6-v0',
'image_maze': 'ImageMaze-v0',
'shelf_env': 'Shelf-v0',
'shelf_dynamic_env': 'ShelfDynamic-v0',
'shelf_long_env': 'ShelfLong-v0',
'shelf_dynamic_long_env': 'ShelfDynamicLong-v0',
'shelf_reach_env': 'ShelfReach-v0',
'cliffpusher': 'CliffPusher-v0',
'reacher': 'DVRKReacher-v0',
'car': 'Car-v0',
'minitaur': 'Minitaur-v0',
'cartpole': 'CartPoleLength-v0',
"HalfCheetah-v2": "HalfCheetah-v2",
"HalfCheetah-Disabled": "HalfCheetah-Disabled-v0",
"Ant-Disabled": "Ant-Disabled-v0",
"Push-v0": "Push-v0",
"Ant-v2": "Ant-v2",
}
def npy_to_gif(im_list, filename, fps=20):
clip = mpy.ImageSequenceClip(im_list, fps=fps)
clip.write_gif(filename + '.gif')
def get_constraint_demos(env, args):
# Get demonstrations
task_demo_data = None
obs_seqs = []
ac_seqs = []
constraint_seqs = []
if not args.task_demos:
if args.env_name == 'reacher':
constraint_demo_data = pickle.load(
open(
osp.join("demos", "dvrk_reach", "constraint_demos.pkl"),
"rb"))
if args.cnn:
constraint_demo_data = constraint_demo_data['images']
else:
constraint_demo_data = constraint_demo_data['lowdim']
elif 'maze' in args.env_name:
if args.env_name == 'maze':
constraint_demo_data = pickle.load(
open(
osp.join("demos", args.env_name,
"constraint_demos.pkl"), "rb"))
else:
# constraint_demo_data, obs_seqs, ac_seqs, constraint_seqs = env.transition_function(args.num_constraint_transitions)
demo_data = pickle.load(
open(osp.join("demos", args.env_name, "demos.pkl"), "rb"))
constraint_demo_data = demo_data['constraint_demo_data']
obs_seqs = demo_data['obs_seqs']
ac_seqs = demo_data['ac_seqs']
constraint_seqs = demo_data['constraint_seqs']
elif args.env_name == 'minitaur':
constraint_demo_data = pickle.load(
open(
osp.join("demos", args.env_name, "constraint_demos.pkl"),
"rb"))
constraint_demo_data_random = pickle.load(
open(
osp.join("demos", args.env_name,
"constraint_demos_random.pkl"), "rb"))
constraint_demo_data_kinda_random = pickle.load(
open(
osp.join("demos", args.env_name,
"constraint_demos_kinda_random.pkl"), "rb"))
constraint_demo_data_total = constraint_demo_data + constraint_demo_data_random + constraint_demo_data_kinda_random
constraint_demo_data_list_safe = []
constraint_demo_data_list_viol = []
for i in range(len(constraint_demo_data_total)):
if constraint_demo_data_total[i][2] == 1:
constraint_demo_data_list_viol.append(
constraint_demo_data_total[i])
for i in range(len(constraint_demo_data_total)):
if constraint_demo_data_total[i][2] == 0:
constraint_demo_data_list_safe.append(
constraint_demo_data_total[i])
import random
random.shuffle(constraint_demo_data_list_safe)
constraint_demo_data = constraint_demo_data_list_viol + constraint_demo_data_list_safe
elif 'shelf' in args.env_name:
folder_name = args.env_name.split('_env')[0]
# if not args.vismpc_recovery:
if not args.cnn:
constraint_demo_data = pickle.load(
open(
osp.join("demos", folder_name, "constraint_demos.pkl"),
"rb"))
else:
constraint_demo_data = pickle.load(
open(
osp.join("demos", folder_name,
"constraint_demos_images.pkl"), "rb"))
else:
if args.env_name =='simplepointbot0' and args.multitask:
constraint_demo_data = []
for i in range(24):
data = pickle.load(open("demos/pointbot0_dynamics/constraint_demos_" + str(i) + ".pkl", "rb"))
constraint_demo_data.extend(data)
elif args.env_name =='simplepointbot0' and args.meta:
constraint_demo_data = get_random_transitions_pointbot0(w1=0.0, w2=0.0, discount=args.gamma_safe, num_transitions=args.num_constraint_transitions)[:200]
elif args.env_name =='simplepointbot0':
constraint_demo_data = get_random_transitions_pointbot0(w1=0.0, w2=0.0, discount=args.gamma_safe, num_transitions=args.num_constraint_transitions)
elif args.env_name =='simplepointbot1' and args.multitask:
constraint_demo_data = []
for i in range(25):
data = pickle.load(open("demos/pointbot1_dynamics/constraint_demos_" + str(i) + ".pkl", "rb"))
constraint_demo_data.extend(data)
elif args.env_name =='simplepointbot1' and args.meta:
constraint_demo_data = get_random_transitions_pointbot1(w1=0.0, w2=0.0, discount=args.gamma_safe, num_transitions=args.num_constraint_transitions)[:200]
elif args.env_name =='simplepointbot1':
constraint_demo_data = get_random_transitions_pointbot1(w1=0.0, w2=0.0, discount=args.gamma_safe, num_transitions=args.num_constraint_transitions)
elif args.env_name =='cartpole' and args.multitask:
constraint_demo_data = []
for i in range(20):
data = pickle.load(open("demos/cartpole_no_task/constraint_demos_" + str(i) + ".pkl", "rb"))
constraint_demo_data.extend(data)
elif args.env_name == 'cartpole':
constraint_demo_data = []
data = pickle.load(open("demos/cartpole_no_task/constraint_demos_" + "test" + ".pkl", "rb"))
import random
data = random.sample(data, args.test_size)
constraint_demo_data.extend(data)
elif args.env_name == "HalfCheetah-Disabled" and args.multitask:
constraint_demo_data = []
for i in range(1, 5):
data = pickle.load(open("demos/halfcheetah_disabled_no_task/constraint_demos_" + str(i) + ".pkl", "rb"))
constraint_demo_data.extend(data)
elif args.env_name == "HalfCheetah-Disabled":
# Loading Test Set Data for MESA or for RRL Baseline
constraint_demo_data = []
data = pickle.load(open("demos/halfcheetah_disabled_no_task/constraint_demos_" + "5" + ".pkl", "rb"))
import random
data = random.sample(data, args.test_size)
constraint_demo_data.extend(data)
elif args.env_name == "Ant-Disabled" and args.multitask:
constraint_demo_data = []
for i in range(0, 3):
data = pickle.load(open("demos/ant_disabled_no_task/constraint_demos_" + str(i) + ".pkl", "rb"))
constraint_demo_data.extend(data)
elif args.env_name == "Ant-Disabled":
# Loading Test Set Data for MESA or for RRL Baseline
constraint_demo_data = []
data = pickle.load(open("demos/ant_disabled_no_task/constraint_demos_" + "3" + ".pkl", "rb"))
import random
data = random.sample(data, args.test_size)
constraint_demo_data.extend(data)
else:
constraint_demo_data = env.transition_function(
args.num_constraint_transitions)
else:
if args.cnn and args.env_name == 'maze':
constraint_demo_data, task_demo_data_images = env.transition_function(
args.num_constraint_transitions,
task_demos=args.task_demos,
images=True)
constraint_demo_data = pickle.load(
open(osp.join("demos", "maze", "constraint_demos.pkl"), "rb"))
elif 'shelf' in args.env_name:
folder_name = args.env_name.split('_env')[0]
if args.cnn:
task_demo_data = pickle.load(
open(
osp.join("demos", folder_name,
"task_demos_images.pkl"), "rb"))
else:
task_demo_data = pickle.load(
open(
osp.join("demos", folder_name, "task_demos.pkl"),
"rb"))
if not args.vismpc_recovery:
if args.cnn:
constraint_demo_data = pickle.load(
open(
osp.join("demos", folder_name,
"constraint_demos_images.pkl"), "rb"))
else:
constraint_demo_data = pickle.load(
open(
osp.join("demos", folder_name,
"constraint_demos.pkl"), "rb"))
# Get all violations in front to get as many violations as possible
constraint_demo_data_list_safe = []
constraint_demo_data_list_viol = []
for i in range(len(constraint_demo_data)):
if constraint_demo_data[i][2] == 1:
constraint_demo_data_list_viol.append(
constraint_demo_data[i])
else:
constraint_demo_data_list_safe.append(
constraint_demo_data[i])
constraint_demo_data = constraint_demo_data_list_viol[:int(
0.5 * args.num_constraint_transitions
)] + constraint_demo_data_list_safe
else:
constraint_demo_data = []
data = pickle.load(
open(
osp.join("demos", folder_name,
"constraint_demos_images_seqs.pkl"), "rb"))
obs_seqs = data['obs'][:args.num_constraint_transitions // 25]
ac_seqs = data['ac'][:args.num_constraint_transitions // 25]
constraint_seqs = data[
'constraint'][:args.num_constraint_transitions // 25]
for i in range(len(ac_seqs)):
ac_seqs[i] = np.array(ac_seqs[i])
for i in range(len(obs_seqs)):
obs_seqs[i] = np.array(obs_seqs[i])
for i in range(len(constraint_seqs)):
constraint_seqs[i] = np.array(constraint_seqs[i])
ac_seqs = np.array(ac_seqs)
obs_seqs = np.array(obs_seqs)
constraint_seqs = np.array(constraint_seqs)
for i in range(obs_seqs.shape[0]):
for j in range(obs_seqs.shape[1] - 1):
constraint_demo_data.append(
(obs_seqs[i, j], ac_seqs[i, j],
constraint_seqs[i, j], obs_seqs[i, j + 1], False))
else:
constraint_demo_data, task_demo_data = env.transition_function(
args.num_constraint_transitions, task_demos=args.task_demos)
return constraint_demo_data, task_demo_data, obs_seqs, ac_seqs, constraint_seqs
def train_recovery(states, actions, next_states=None, epochs=50):
if next_states is not None:
recovery_policy.train(
states, actions, random=True, next_obs=next_states, epochs=epochs)
else:
recovery_policy.train(states, actions)
# TODO: fix this for shelf env...
def process_obs(obs, env_name):
if 'shelf' in args.env_name:
obs = cv2.resize(obs, (64, 48), interpolation=cv2.INTER_AREA)
im = np.transpose(obs, (2, 0, 1))
return im
parser = argparse.ArgumentParser(description='PyTorch Soft Actor-Critic Args')
parser.add_argument(
'--env-name',
default="HalfCheetah-v2",
help='Mujoco Gym environment (default: HalfCheetah-v2)')
parser.add_argument('--logdir', default="runs", help='exterior log directory')
parser.add_argument('--logdir_suffix', default="", help='log directory suffix')
parser.add_argument(
'--policy',
default="Gaussian",
help='Policy Type: Gaussian | Deterministic (default: Gaussian)')
parser.add_argument(
'--eval',
type=bool,
default=True,
help='Evaluates a policy a policy every 10 episode (default: True)')
parser.add_argument(
'--gamma',
type=float,
default=0.99,
metavar='G',
help='discount factor for reward (default: 0.99)')
parser.add_argument(
'--pos_fraction',
type=float,
default=-1,
metavar='G',
help='fraction of positive examples for critic training')
parser.add_argument(
'--gamma_safe',
type=float,
default=0.5,
metavar='G',
help='discount factor for constraints (default: 0.9)')
parser.add_argument(
'--eps_safe',
type=float,
default=0.1,
metavar='G',
help='threshold constraints (default: 0.8)')
parser.add_argument(
'--t_safe',
type=float,
default=80,
metavar='G',
help='threshold constraints (default: 0.8)')
parser.add_argument(
'--tau',
type=float,
default=0.005,
metavar='G', # TODO: idk if this should be 0.005 or 0.0002...
help='target smoothing coefficient(τ) (default: 0.005)')
parser.add_argument(
'--tau_safe',
type=float,
default=0.0002,
metavar='G',
help='target smoothing coefficient(τ) (default: 0.005)')
parser.add_argument(
'--lr',
type=float,
default=0.0003,
metavar='G',
help='learning rate (default: 0.0003)')
parser.add_argument(
'--alpha',
type=float,
default=0.2,
metavar='G',
help=
'Temperature parameter α determines the relative importance of the entropy\
term against the reward (default: 0.2)')
parser.add_argument(
'--automatic_entropy_tuning',
type=bool,
default=False,
metavar='G',
help='Automaically adjust α (default: False)')
parser.add_argument(
'--seed',
type=int,
default=123456,
metavar='N',
help='random seed (default: 123456)')
parser.add_argument(
'--batch_size',
type=int,
default=256,
metavar='N',
help='batch size (default: 256)')
parser.add_argument(
'--num_steps',
type=int,
default=1000000,
metavar='N',
help='maximum number of steps (default: 1000000)')
parser.add_argument(
'--num_eps',
type=int,
default=1000000,
metavar='N',
help='maximum number of episodes (default: 1000000)')
parser.add_argument(
'--hidden_size',
type=int,
default=256,
metavar='N',
help='hidden size (default: 256)')
parser.add_argument(
'--updates_per_step',
type=int,
default=1,
metavar='N',
help='model updates per simulator step (default: 1)')
parser.add_argument(
'--start_steps',
type=int,
default=100,
metavar='N',
help='Steps sampling random actions (default: 10000)')
parser.add_argument(
'--target_update_interval',
type=int,
default=1,
metavar='N',
help='Value target update per no. of updates per step (default: 1)')
parser.add_argument(
'--replay_size',
type=int,
default=1000000,
metavar='N',
help='size of replay buffer (default: 100000)')
parser.add_argument(
'--safe_replay_size',
type=int,
default=2000000,
metavar='N',
help='size of replay buffer for V safe (default: 100000)')
parser.add_argument(
'--cuda', action="store_true", help='run on CUDA (default: False)')
parser.add_argument(
'--cnn', action="store_true", help='visual observations (default: False)')
parser.add_argument('--critic_pretraining_steps', type=int, default=3000)
parser.add_argument('--critic_safe_pretraining_steps', type=int, default=10000)
parser.add_argument('--constraint_reward_penalty', type=float, default=0)
parser.add_argument('--safety_critic_penalty', type=float, default=-1)
# For recovery policy
parser.add_argument('--use_target_safe', action="store_true")
parser.add_argument('--disable_learned_recovery', action="store_true")
parser.add_argument('--use_recovery', action="store_true")
parser.add_argument('--ddpg_recovery', action="store_true")
parser.add_argument('--Q_sampling_recovery', action="store_true")
parser.add_argument('--reachability_test', action="store_true")
parser.add_argument('--lookahead_test', action="store_true")
parser.add_argument('--SAC_recovery', action="store_true")
parser.add_argument('--recovery_policy_update_freq', type=int, default=1)
parser.add_argument('--critic_safe_update_freq', type=int, default=1)
parser.add_argument('--task_demos', action="store_true")
parser.add_argument('--filter', action="store_true")
parser.add_argument('--num_filter_samples', type=int, default=100)
parser.add_argument('--max_filter_iters', type=int, default=5)
parser.add_argument('--Q_safe_start_ep', type=int, default=10)
parser.add_argument('--use_value', action="store_true")
parser.add_argument('--use_qvalue', action="store_true")
parser.add_argument('--pred_time', action="store_true")
parser.add_argument('--opt_value', action="store_true")
parser.add_argument('--lagrangian_recovery', action="store_true")
parser.add_argument(
'--recovery_lambda', type=float, default=0.01, metavar='G',
help='todo') # TODO: needs some tuning
parser.add_argument('--num_task_transitions', type=int, default=10000000)
parser.add_argument(
'--num_constraint_transitions', type=int, default=10000
) # Make this 20K+ for original shelf env stuff, trying with fewer rn
parser.add_argument('--reachability_hor', type=int, default=2)
# Ablations
parser.add_argument('--disable_offline_updates', action="store_true")
parser.add_argument('--disable_online_updates', action="store_true")
parser.add_argument('--disable_action_relabeling', action="store_true")
parser.add_argument('--add_both_transitions', action="store_true")
# Lagrangian, RSPO
parser.add_argument('--DGD_constraints', action="store_true")
parser.add_argument('--use_constraint_sampling', action="store_true")
parser.add_argument(
'--nu', type=float, default=0.01, metavar='G',
help='todo') # TODO: needs some tuning
parser.add_argument('--update_nu', action="store_true")
parser.add_argument('--nu_schedule', action="store_true")
parser.add_argument(
'--nu_start',
type=float,
default=1e3,
metavar='G',
help='start value for nu (high)')
parser.add_argument(
'--nu_end',
type=float,
default=0,
metavar='G',
help='end value for nu (low)')
# RCPO
parser.add_argument('--RCPO', action="store_true")
parser.add_argument(
'--lambda_RCPO', type=float, default=0.01, metavar='G',
help='todo') # TODO: needs some tuning
# PLaNet Recoverry
parser.add_argument('--beta', type=float, default=10)
parser.add_argument('--vismpc_recovery', action="store_true")
parser.add_argument('--load_vismpc', action="store_true")
parser.add_argument('--model_fname', default='model1')
# Reward Conditioning
parser.add_argument('--eps_condition', type=float, default=0.3)
parser.add_argument('--conditional', action="store_true")
# Goal-based RL
parser.add_argument('--goal', action="store_true")
parser.add_argument(
'-ca',
'--ctrl_arg',
action='append',
nargs=2,
default=[],
help=
'Controller arguments, see https://github.com/kchua/handful-of-trials#controller-arguments'
)
parser.add_argument(
'-o',
'--override',
action='append',
nargs=2,
default=[],
help=
'Override default parameters, see https://github.com/kchua/handful-of-trials#overrides'
)
# MESA Arguments
parser.add_argument("--meta", action="store_true")
# Multitask Benchmark
parser.add_argument("--multitask", action="store_true")
# Save Replay Buffer (Data Generation for Training and Testing Datasets)
parser.add_argument('--save_replay', action="store_true")
# Iterations to adapt offline-trained agent to test set data (See Phase 2: MESA)
parser.add_argument(
'--online_iters', type=int, default=500
)
# Size of Test Set (10K for HalfCheetah-Disabled)
parser.add_argument(
'--test_size', type=int, default=10000
)
args = parser.parse_args()
if args.nu_schedule:
nu_schedule = linear_schedule(args.nu_start, args.nu_end, args.num_eps)
else:
nu_schedule = linear_schedule(args.nu, args.nu, 0)
# TODO: clean this up later
if 'shelf' in args.env_name and args.num_constraint_transitions == 10000:
args.num_constraint_transitions = 20000
if not os.path.exists(args.logdir):
os.makedirs(args.logdir)
logdir = os.path.join(
args.logdir, '{}_SAC_{}_{}_{}'.format(
datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"), args.env_name,
args.policy, args.logdir_suffix))
print("LOGDIR: ", logdir)
writer = SummaryWriter(logdir=logdir)
pickle.dump(args, open(os.path.join(logdir, "args.pkl"), "wb"))
agent, recovery_policy, env = experiment_setup(logdir, args)
# Memory
memory = ReplayMemory(args.replay_size)
recovery_memory = ConstraintReplayMemory(args.safe_replay_size)
# Training Loop
total_numsteps = 0
updates = 0
conditional_penalty = 0
task_demos = args.task_demos
constraint_demo_data, task_demo_data, obs_seqs, ac_seqs, constraint_seqs = get_constraint_demos(
env, args)
# Phase 1 MESA: Load in multiple training datasets (N Datasets) into N Replay Buffers
if args.meta:
if args.env_name == 'maze':
inner_replay = [ConstraintReplayMemory(args.safe_replay_size) for i in range(100)]
outer_replay = inner_replay
for i in range(100):
data = pickle.load(open("demos/maze_goals/constraint_demos_" + str(i) + ".pkl", "rb"))
for transition in data:
inner_replay[i].push(*transition)
elif args.env_name == 'simplepointbot0':
inner_replay = [ConstraintReplayMemory(args.safe_replay_size) for i in range(24)]
outer_replay = inner_replay
for i in range(24):
data = pickle.load(open("demos/pointbot0_dynamics/constraint_demos_" + str(i) + ".pkl", "rb"))
for transition in data:
inner_replay[i].push(*transition)
elif args.env_name == 'simplepointbot1':
inner_replay = [ConstraintReplayMemory(args.safe_replay_size) for i in range(25)]
outer_replay = inner_replay
for i in range(25):
data = pickle.load(open("demos/pointbot1_dynamics/constraint_demos_" + str(i) + ".pkl", "rb"))
for transition in data:
inner_replay[i].push(*transition)
elif args.env_name == 'cartpole':
inner_replay = [ConstraintReplayMemory(args.safe_replay_size) for i in range(20)]
outer_replay = inner_replay
for i in range(20):
data = pickle.load(open("demos/cartpole_no_task/constraint_demos_" + str(i) + ".pkl", "rb"))
for transition in data:
inner_replay[i].push(*transition)
elif args.env_name == 'HalfCheetah-Disabled':
inner_replay = [ConstraintReplayMemory(args.safe_replay_size) for i in range(4)]
outer_replay = inner_replay
for i in range(4):
data = pickle.load(open("demos/halfcheetah_disabled_no_task/constraint_demos_" + str(i+1) + ".pkl", "rb"))
for transition in data:
inner_replay[i].push(*transition)
elif args.env_name == 'Ant-Disabled':
inner_replay = [ConstraintReplayMemory(args.safe_replay_size) for i in range(3)]
outer_replay = inner_replay
for i in range(3):
data = pickle.load(open("demos/ant_disabled_no_task/constraint_demos_" + str(i) + ".pkl", "rb"))
for transition in data:
inner_replay[i].push(*transition)
# Phase 1: MESA, Offline Training
num_constraint_violations = 0
# Train recovery policy and associated value function on demos
if not args.disable_offline_updates:
if (args.use_recovery and not args.disable_learned_recovery
) or args.DGD_constraints or args.RCPO:
if not args.vismpc_recovery:
demo_data_states = np.array([
d[0]
for d in constraint_demo_data[:args.num_constraint_transitions]
])
demo_data_actions = np.array([
d[1]
for d in constraint_demo_data[:args.num_constraint_transitions]
])
demo_data_next_states = np.array([
d[3]
for d in constraint_demo_data[:args.num_constraint_transitions]
])
num_constraint_transitions = 0
for transition in constraint_demo_data:
recovery_memory.push(*transition)
num_constraint_violations += int(transition[2])
num_constraint_transitions += 1
#if num_constraint_transitions == args.num_constraint_transitions:
#break
print("Number of Constraint Transitions: ",
num_constraint_transitions)
print("Number of Constraint Violations: ",
num_constraint_violations)
if args.env_name in [
'simplepointbot0', 'simplepointbot1', 'maze', 'image_maze'
]:
plot = True
else:
plot = False
if args.use_qvalue:
for i in range(args.critic_safe_pretraining_steps):
if i % 100 == 0:
print("CRITIC SAFE UPDATE STEP: ", i)
if args.meta:
agent.safety_critic.meta_update_parameters(
inner_buffers = inner_replay,
outer_buffers = outer_replay,
memory=recovery_memory,
policy=agent.policy,
critic=agent.critic,
batch_size=min(args.batch_size,
len(constraint_demo_data)))
else:
agent.safety_critic.update_parameters(
memory=recovery_memory,
policy=agent.policy,
critic=agent.critic,
batch_size=min(args.batch_size,
len(constraint_demo_data)))
if args.goal:
recovery_memory = ConstraintReplayMemory(args.safe_replay_size)
constraint_demo_data = pickle.load(open("demos/maze_goals/constraint_demos_-0.2_0.15.pkl", "rb"))[:10000]
for transition in constraint_demo_data:
recovery_memory.push(*transition)
else:
agent.train_safety_critic(
0, recovery_memory, agent.policy_sample, plot=0)
if not (args.ddpg_recovery or args.Q_sampling_recovery
or args.DGD_constraints or args.RCPO):
train_recovery(
demo_data_states,
demo_data_actions,
demo_data_next_states,
epochs=50)
else:
# Pre-train vis dynamics model if needed
if not args.load_vismpc:
recovery_policy.train(
obs_seqs,
ac_seqs,
constraint_seqs,
recovery_memory,
num_train_steps=20000
if "maze" in args.env_name else 200000)
# Process everything in recovery_memory to be encoded in order to train safety critic
num_constraint_transitions = 0
for transition in constraint_demo_data:
recovery_memory.push(*transition)
num_constraint_violations += int(transition[2])
num_constraint_transitions += 1
if num_constraint_transitions == args.num_constraint_transitions:
break
print("Number of Constraint Transitions: ",
num_constraint_transitions)
print("Number of Constraint Violations: ",
num_constraint_violations)
if args.use_qvalue:
# Pass encoding function to safety critic:
agent.safety_critic.encoder = recovery_policy.get_encoding
# Train safety critic using the encoder
for i in range(args.critic_safe_pretraining_steps):
if i % 100 == 0:
print("CRITIC SAFE UPDATE STEP: ", i)
agent.safety_critic.update_parameters(
memory=recovery_memory,
policy=agent.policy,
critic=agent.critic,
batch_size=min(args.batch_size,
len(constraint_demo_data)))
# If use task demos, add them to memory and train agent
if task_demos:
num_task_transitions = 0
for transition in task_demo_data:
memory.push(*transition)
num_task_transitions += 1
if num_task_transitions == args.num_task_transitions:
break
print("Number of Task Transitions: ", num_task_transitions)
for i in range(args.critic_pretraining_steps):
if i % 100 == 0:
print("Update: ", i)
agent.update_parameters(
memory,
min(args.batch_size, num_task_transitions),
updates,
safety_critic=agent.safety_critic)
updates += 1
test_rollouts = []
train_rollouts = []
all_ep_data = []
num_viols = 0
num_successes = 0
viol_and_recovery = 0
viol_and_no_recovery = 0
total_viols = 0
# Phase 2: MESA
if args.multitask:
recovery_memory = ConstraintReplayMemory(args.safe_replay_size)
if args.env_name =='simplepointbot0':
constraint_demo_data = get_random_transitions_pointbot0(w1=0.0, w2=0.0, discount=args.gamma_safe, num_transitions=args.num_constraint_transitions)[:200]
elif args.env_name =='simplepointbot1':
constraint_demo_data = get_random_transitions_pointbot1(w1=0.0, w2=0.0, discount=args.gamma_safe, num_transitions=args.num_constraint_transitions)[:200]
elif args.env_name == 'maze':
constraint_demo_data = pickle.load(open("demos/maze_goals/constraint_demos_test.pkl", "rb"))[:1000]
elif args.env_name == "cartpole":
data = pickle.load(open("demos/cartpole_no_task/constraint_demos_test.pkl", "rb"))
import random
data = random.sample(data, args.test_size)
constraint_demo_data.extend(data)
elif args.env_name == "HalfCheetah-Disabled":
data = pickle.load(open("demos/halfcheetah_disabled_no_task/constraint_demos_5.pkl", "rb"))
import random
data = random.sample(data, args.test_size)
constraint_demo_data.extend(data)
elif args.env_name == "Ant-Disabled":
data = pickle.load(open("demos/ant_disabled_no_task/constraint_demos_3.pkl", "rb"))
import random
data = random.sample(data, args.test_size)
constraint_demo_data.extend(data)
for transition in constraint_demo_data:
recovery_memory.push(*transition)
if args.save_replay:
recovery_memory = ConstraintReplayMemory(args.safe_replay_size)
if args.meta or args.multitask:
for i in range(args.online_iters):
agent.safety_critic.update_parameters(
memory=recovery_memory,
policy=agent.policy,
critic=agent.critic,
batch_size=args.batch_size,
plot=1)
# Phase 3: MESA (Rest is standard Recovery RL)
for i_episode in itertools.count(1):
episode_reward = 0
episode_steps = 0
done = False
state = env.reset()
if args.env_name == 'reacher':
recorder = VideoRecorder(
env, osp.join(logdir, 'video_{}.mp4'.format(i_episode)))
if args.cnn:
state = process_obs(state, args.env_name)
train_rollouts.append([])
ep_states = [state]
ep_actions = []
ep_constraints = []
rollouts = []
while not done:
if args.env_name == 'reacher':
recorder.capture_frame()
if len(memory) > args.batch_size:
# Number of updates per step in environment
for i in range(args.updates_per_step):
# Update parameters of all the networks
critic_1_loss, critic_2_loss, policy_loss, ent_loss, alpha = agent.update_parameters(
memory,
min(args.batch_size, len(memory)),
updates,
safety_critic=agent.safety_critic,
nu=nu_schedule(i_episode))
if args.use_qvalue and not args.disable_online_updates and len(
recovery_memory) > args.batch_size and (
num_viols + num_constraint_violations
) / args.batch_size > args.pos_fraction:
agent.safety_critic.update_parameters(
memory=recovery_memory,
policy=agent.policy,
critic=agent.critic,
batch_size=args.batch_size,
plot=1)
writer.add_scalar('loss/critic_1', critic_1_loss, updates)
writer.add_scalar('loss/critic_2', critic_2_loss, updates)
writer.add_scalar('loss/policy', policy_loss, updates)
writer.add_scalar('loss/entropy_loss', ent_loss, updates)
writer.add_scalar('entropy_temprature/alpha', alpha, updates)
updates += 1
action, real_action, recovery_used = get_action(
state, env, agent, recovery_policy, args)
next_state, reward, done, info = env.step(real_action) # Step
if 'constraint' not in info:
info['reward'] = reward
info['state'] = state
info['next_state'] = next_state
info["action"] = action
info['constraint'] = 0
info['recovery'] = recovery_used
total_viols+= info['constraint']
#print(reward)
if args.cnn:
next_state = process_obs(next_state, args.env_name)
train_rollouts[-1].append(info)
episode_steps += 1
total_numsteps += 1
episode_reward += reward
if args.constraint_reward_penalty != 0 and info['constraint']:
reward -= args.constraint_reward_penalty
if args.safety_critic_penalty > 0:
critic_val = agent.safety_critic.get_value(
torchify(state).unsqueeze(0),
torchify(action).unsqueeze(0)).detach().cpu().numpy()[0, 0]
reward -= args.safety_critic_penalty * critic_val
mask = float(not done)
done = done or episode_steps == env._max_episode_steps
if args.conditional:
critic_val = agent.safety_critic.get_value(
torchify(state).unsqueeze(0),
torchify(action).unsqueeze(0)).detach().cpu().numpy()[0, 0]
if not abs(critic_val - args.eps_condition) < 0.07:
reward -= 0.5
if not args.disable_action_relabeling:
memory.push(state, action, reward, next_state,
mask) # Append transition to memory
else:
memory.push(state, real_action, reward, next_state,
mask) # Append transition to memory
rollouts.append([state, real_action, info['constraint'], next_state, mask])
if args.use_recovery or args.DGD_constraints or args.RCPO:
#recovery_memory.push(state, real_action, info['constraint'],
#next_state, mask)
if recovery_used and args.add_both_transitions:
memory.push(state, real_action, reward, next_state,
mask) # Append transition to memory
state = next_state
ep_states.append(state)
ep_actions.append(real_action)
ep_constraints.append([info['constraint']])
if args.use_recovery or args.save_replay:
mc_reward =0
discount=args.gamma_safe
for transition in rollouts[::-1]:
mc_reward = transition[2] + discount * mc_reward
transition.append(mc_reward)
recovery_memory.push(*transition)
if args.env_name == 'reacher':
recorder.capture_frame()
recorder.close()
if info['constraint']:
num_viols += 1
if info['recovery']:
viol_and_recovery += 1
else:
viol_and_no_recovery += 1
if "shelf" in args.env_name and info['reward'] > -0.5:
num_successes += 1
elif "point" in args.env_name and info['reward'] > -4:
num_successes += 1
elif "maze" in args.env_name and -info['reward'] < 0.03:
num_successes += 1
elif "cartpole" in args.env_name and episode_reward > 160:
num_successes += 1
if (args.use_recovery and not args.disable_learned_recovery
) and not args.disable_online_updates:
all_ep_data.append({
'obs': np.array(ep_states),
'ac': np.array(ep_actions),
'constraint': np.array(ep_constraints)
})
if i_episode % args.recovery_policy_update_freq == 0 and not (
args.ddpg_recovery or args.Q_sampling_recovery
or args.DGD_constraints):
if not args.vismpc_recovery:
train_recovery([ep_data['obs'] for ep_data in all_ep_data],
[ep_data['ac'] for ep_data in all_ep_data])
all_ep_data = []
else:
recovery_policy.train_dynamics(
i_episode, recovery_memory
) # Tbh we could train this on everything collected, but are not right now
if i_episode % args.critic_safe_update_freq == 0 and args.use_recovery:
if args.env_name in [
'simplepointbot0', 'simplepointbot1', 'maze', 'image_maze'
]:
plot = 0
else:
plot = False
if args.use_value:
agent.train_safety_critic(
i_episode,
recovery_memory,
agent.policy_sample,
training_iterations=50,
batch_size=100,
plot=plot)
writer.add_scalar('reward/train', episode_reward, i_episode)
writer.add_scalar('total_violations', total_viols, i_episode)
print("Episode: {}, total numsteps: {}, episode steps: {}, reward: {}".
format(i_episode, total_numsteps, episode_steps,
round(episode_reward, 2)))
print_episode_info(train_rollouts[-1])
print("Num Violations So Far: %d" % num_viols)
print("Violations with Recovery: %d" % viol_and_recovery)
print("Violations with No Recovery: %d" % viol_and_no_recovery)
print("Num Successes So Far: %d" % num_successes)
if total_numsteps > args.num_steps or i_episode > args.num_eps:
break
if i_episode % 10 == 0 and args.eval is True:
avg_reward = 0.
episodes = 1
for j in range(episodes):
test_rollouts.append([])
state = env.reset()
# TODO; clean up the following code
if 'maze' in args.env_name:
im_list = [env._get_obs(images=True)]
elif 'shelf' in args.env_name:
im_list = [env.render().squeeze()]
elif 'cartpole' in args.env_name:
im_list = [env.get_image()]
if args.cnn:
state = process_obs(state, args.env_name)
episode_reward = 0
episode_steps = 0
done = False
while not done:
action, real_action, recovery_used = get_action(
state, env, agent, recovery_policy, args, train=False)
next_state, reward, done, info = env.step(real_action) # Step
info['recovery'] = recovery_used
done = done or episode_steps == env._max_episode_steps
# TODO: clean up the following code
if 'maze' in args.env_name:
im_list.append(env._get_obs(images=True))
elif 'shelf' in args.env_name:
im_list.append(env.render().squeeze())
elif 'cartpole' in args.env_name:
im_list.append(env.get_image())
if args.cnn:
next_state = process_obs(next_state, args.env_name)
test_rollouts[-1].append(info)
episode_reward += reward
episode_steps += 1
state = next_state
print_episode_info(test_rollouts[-1])
avg_reward += episode_reward
if 'maze' in args.env_name or 'shelf' in args.env_name or 'cartpole' in args.env_name:
npy_to_gif(
im_list,
osp.join(logdir, "test_" + str(i_episode) + "_" + str(j)))
# Save Replay Buffer
if "HalfCheetah" in args.env_name and args.save_replay:
with open("demos/halfcheetah_disabled_no_task/constraint_demos_5" + ".pkl", 'wb') as handle:
pickle.dump(recovery_memory.buffer, handle)
elif "cartpole" in args.env_name and args.save_replay:
with open("demos/cartpole_no_task/constraint_demos_test" + ".pkl", 'wb') as handle:
pickle.dump(recovery_memory.buffer, handle)
elif "Ant-Disabled" in args.env_name and args.save_replay:
with open("demos/ant_disabled_no_task/constraint_demos_3" + ".pkl", 'wb') as handle:
pickle.dump(recovery_memory.buffer, handle)
avg_reward /= episodes
writer.add_scalar('avg_reward/test', avg_reward, i_episode)
print("----------------------------------------")
print("Test Episodes: {}, Avg. Reward: {}".format(
episodes, round(avg_reward, 2)))
print("----------------------------------------")
dump_logs(test_rollouts, train_rollouts, logdir)
env.close()
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,558
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/env/ant_disabled.py
|
import numpy as np
from learning_to_adapt.utils.serializable import Serializable
from learning_to_adapt.envs.mujoco_env import MujocoEnv
import os
from gym.utils import seeding
HORIZON = 1000
def transition_function(num_transition, discount = 0.99):
env = AntEnv()
transitions = []
rollouts = []
done = True
steps =0
while True:
if done:
steps =0
if len(rollouts):
mc_reward =0
for transition in rollouts[::-1]:
mc_reward = transition[2] + discount * mc_reward
transition.append(mc_reward)
transitions.extend(rollouts)
if len(transitions) > num_transition:
break
# Reset
state = env.reset()
rollouts = []
action = env.action_space.sample()
next_state, reward, _, info = env.step(action)
steps +=1
constraint = info['constraint']
done = steps == 1000
rollouts.append([state, action, constraint, next_state,
not constraint])
state = next_state
return transitions
class AntEnv(MujocoEnv, Serializable):
def __init__(self, task='cripple', reset_every_episode=True):
Serializable.quick_init(self, locals())
self.cripple_mask = None
self.reset_every_episode = reset_every_episode
self._max_episode_steps = HORIZON
self.first = True
self.task = task
self.transition_function = transition_function
MujocoEnv.__init__(self, os.path.join(os.path.abspath(os.path.dirname(__file__)), "assets", "ant.xml"))
task = None if task == 'None' else task
self.cripple_mask = np.ones(self.action_space.shape)
self._init_geom_rgba = self.model.geom_rgba.copy()
self._init_geom_contype = self.model.geom_contype.copy()
self._init_geom_size = self.model.geom_size.copy()
self._init_geom_pos = self.model.geom_pos.copy()
self.dt = self.model.opt.timestep
assert task in [None, 'cripple']
self.crippled_leg = 0
self.steps = 0
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def get_current_obs(self):
obs = np.concatenate([
self.model.data.qpos.flat,
self.model.data.qvel.flat,
self.get_body_xmat("torso").flat,
self.get_body_com("torso"),
]).reshape(-1)
obs = np.concatenate([obs, # Reward
[self.get_body_comvel("torso")[0] + 0.05],
# Catastrophe
self.check_catastrophe(),])
return obs
def check_catastrophe(self):
for i in range(self.data.ncon):
contact = self.data.contact[i]
name_set = set()
name_set.add(self.model.geom_names[contact.geom1])
name_set.add(self.model.geom_names[contact.geom2])
if 'floor' in name_set and 'torso_geom' in name_set:
return [1]
return [0]
def step(self, action, early_stop=True):
obs = self.get_current_obs()
if self.cripple_mask is not None:
action = self.cripple_mask * action
self.forward_dynamics(action)
comvel = self.get_body_comvel("torso")
forward_reward = comvel[0]
lb, ub = self.action_space.low[0], self.action_space.high[0]
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling)) # 0
self.steps += 1
contact_cost = 0
survive_reward = 0.05
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
done = False
next_obs = self.get_current_obs()
catastrophe = self.check_catastrophe()[0]
if early_stop:
done = catastrophe
info = {
"constraint": catastrophe,
"reward": reward,
"action": action,
"state": obs,
"next_state": next_obs,
}
return next_obs, reward, done, info
def reset_mujoco(self, init_state=None):
super(AntEnv, self).reset_mujoco(init_state=init_state)
if self.reset_every_episode and not self.first:
self.reset_task()
if self.first:
self.first = False
def reset(self, mode='train'):
self.steps = 0
self.mode = mode
return MujocoEnv.reset(self)
'''
our "front" is in +x direction, to the right side of screen
LEG 4 (they call this back R)
action0: front-right leg, top joint
action1: front-right leg, bottom joint
LEG 1 (they call this front L)
action2: front-left leg, top joint
action3: front-left leg, bottom joint
LEG 2 (they call this front R)
action4: back-left leg, top joint
action5: back-left leg, bottom joint
LEG 3 (they call this back L)
action6: back-right leg, top joint
action7: back-right leg, bottom joint
geom_names has
['floor','torso_geom',
'aux_1_geom','left_leg_geom','left_ankle_geom', --1
'aux_2_geom','right_leg_geom','right_ankle_geom', --2
'aux_3_geom','back_leg_geom','third_ankle_geom', --3
'aux_4_geom','rightback_leg_geom','fourth_ankle_geom'] --4
'''
def reset_task(self, value=None):
if self.task == 'cripple':
# Pick which leg to remove (0 1 2 are train... 3 is test)
value = 3
self.crippled_leg = value if value is not None else np.random.randint(0, 3)
# Pick which actuators to disable
self.cripple_mask = np.ones(self.action_space.shape)
if self.crippled_leg == 0:
self.cripple_mask[2] = 0
self.cripple_mask[3] = 0
elif self.crippled_leg == 1:
self.cripple_mask[4] = 0
self.cripple_mask[5] = 0
elif self.crippled_leg == 2:
self.cripple_mask[6] = 0
self.cripple_mask[7] = 0
elif self.crippled_leg == 3:
self.cripple_mask[0] = 0
self.cripple_mask[1] = 0
# Make the removed leg look red
geom_rgba = self._init_geom_rgba.copy()
if self.crippled_leg == 0:
geom_rgba[3, :3] = np.array([1, 0, 0])
geom_rgba[4, :3] = np.array([1, 0, 0])
elif self.crippled_leg == 1:
geom_rgba[6, :3] = np.array([1, 0, 0])
geom_rgba[7, :3] = np.array([1, 0, 0])
elif self.crippled_leg == 2:
geom_rgba[9, :3] = np.array([1, 0, 0])
geom_rgba[10, :3] = np.array([1, 0, 0])
elif self.crippled_leg == 3:
geom_rgba[12, :3] = np.array([1, 0, 0])
geom_rgba[13, :3] = np.array([1, 0, 0])
self.model.geom_rgba = geom_rgba
# Make the removed leg not affect anything
temp_size = self._init_geom_size.copy()
temp_pos = self._init_geom_pos.copy()
if self.crippled_leg == 0:
# Top half
temp_size[3, 0] = temp_size[3, 0]/2
temp_size[3, 1] = temp_size[3, 1]/2
# Bottom half
temp_size[4, 0] = temp_size[4, 0]/2
temp_size[4, 1] = temp_size[4, 1]/2
temp_pos[4, :] = temp_pos[3, :]
elif self.crippled_leg == 1:
# Top half
temp_size[6, 0] = temp_size[6, 0]/2
temp_size[6, 1] = temp_size[6, 1]/2
# Bottom half
temp_size[7, 0] = temp_size[7, 0]/2
temp_size[7, 1] = temp_size[7, 1]/2
temp_pos[7, :] = temp_pos[6, :]
elif self.crippled_leg == 2:
# Top half
temp_size[9, 0] = temp_size[9, 0]/2
temp_size[9, 1] = temp_size[9, 1]/2
# Bottom half
temp_size[10, 0] = temp_size[10, 0]/2
temp_size[10, 1] = temp_size[10, 1]/2
temp_pos[10, :] = temp_pos[9, :]
elif self.crippled_leg == 3:
# Top half
temp_size[12, 0] = temp_size[12, 0]/2
temp_size[12, 1] = temp_size[12, 1]/2
# Bottom half
temp_size[13, 0] = temp_size[13, 0]/2
temp_size[13, 1] = temp_size[13, 1]/2
temp_pos[13, :] = temp_pos[12, :]
self.model.geom_size = temp_size
self.model.geom_pos = temp_pos
elif self.task is None:
pass
else:
raise NotImplementedError
self.model.forward()
"""
if __name__ == '__main__':
env = AntEnv(task='cripple')
while True:
env.reset()
for _ in range(1000):
env.step(env.action_space.sample())
env.render()
"""
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,559
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/gen_dynamic_shelf_demos.py
|
import argparse
import datetime
import gym
import numpy as np
import itertools
import torch
from tensorboardX import SummaryWriter
import cv2
import os
import moviepy.editor as mpy
from env.shelf_dynamic_env import ShelfDynamicEnv
import pickle
import time
HYPERPARAMS = {
'T': 25, # length of each episode
'image_height': 48,
'image_width': 64,
}
def npy_to_gif(im_list, filename, fps=4):
clip = mpy.ImageSequenceClip(im_list, fps=fps)
clip.write_gif(filename + '.gif')
def process_obs(obs):
agent_img_height = HYPERPARAMS['image_height']
agent_img_width = HYPERPARAMS['image_width']
im = obs
im = cv2.resize(
im, (agent_img_width, agent_img_height), interpolation=cv2.INTER_AREA)
im = np.transpose(im, (2, 0, 1))
return im
parser = argparse.ArgumentParser(description='PyTorch Soft Actor-Critic Args')
parser.add_argument(
'--env-name',
default="ShelfEnv",
help='Mujoco Gym environment (default: ShelfEnv')
parser.add_argument(
'--start_steps',
type=int,
default=5000,
metavar=
'N', # TODO: think about what this is approperiate to be...maybe even lower, or make it higher
# so can explore sufficiently after the demos are over??
help='Steps sampling random actions (default: 10000)')
parser.add_argument(
'--num_demos',
type=int,
default=250,
metavar='N',
help='num demos (default: 250)')
parser.add_argument(
'--seed',
type=int,
default=123456,
metavar='N',
help='random seed (default: 123456)')
parser.add_argument(
'--cnn', action="store_true", help='visual observations (default: False)')
parser.add_argument(
'--cuda', action="store_true", help='run on CUDA (default: False)')
parser.add_argument(
'--demo_filter_constraints',
action="store_true",
help='make sure all demos satisfy constraints (default: False)')
parser.add_argument('--demo_quality', default='high')
parser.add_argument('--dense_reward', action="store_true")
parser.add_argument('--fixed_env', action="store_true")
parser.add_argument('--gt_state', action="store_true")
parser.add_argument('--early_termination', action="store_true")
parser.add_argument('--early_termination_success', action="store_true")
parser.add_argument(
'--use_constraint_penalty',
action="store_true",
help='use constraints penalty (default: False)')
parser.add_argument(
'--constraint_penalty',
type=int,
default=1,
metavar='N',
help='constraint penalty (default: 10)')
parser.add_argument('--constraint_demos', action="store_true")
parser.add_argument('--save_rollouts', action="store_true")
args = parser.parse_args()
# Environment
env = gym.make('ShelfDynamic-v0')
torch.manual_seed(args.seed)
np.random.seed(args.seed)
env.seed(args.seed)
print("ENV STUFF")
print("OBSERVATION SPACE", env.observation_space)
print("ACTION SPACE", env.action_space.low)
print("ACTION SPACE", env.action_space.high)
# Training Loop
total_numsteps = 0
updates = 0
demo_transitions = []
demo_rollouts = []
i_demos = 0
start = time.time()
while i_demos < args.num_demos:
state = env.reset()
demo_rollouts.append([])
if not args.gt_state:
state = process_obs(state)
episode_steps = 0
episode_reward = 0
episode_constraints = 0
done = False
t = 0
im_list = [env.render().squeeze()]
while not done:
if args.constraint_demos:
time_seed = np.random.random()
if time_seed < 0.6:
idx = 2
else:
idx = t
action = env.expert_action(idx, noise_std=0.05)
else:
action = env.expert_action(t, noise_std=0.005)
next_state, reward, done, info = env.step(action) # Step
im_list.append(env.render().squeeze())
if episode_steps == env._max_episode_steps:
done = True
# if done and reward > 0:
# reward = 5
# info['reward'] = 5
constraint = info['constraint']
if args.use_constraint_penalty and constraint:
reward += args.constraint_penalty * (-int(constraint))
episode_steps += 1
total_numsteps += 1
episode_reward += reward
episode_constraints += constraint
mask = float(not done)
if not args.gt_state:
next_state = process_obs(next_state)
if args.constraint_demos:
# if constraint:
demo_transitions.append((state, action, constraint, next_state,
mask))
demo_rollouts[-1].append((state, action, constraint, next_state,
mask))
# else:
# if np.random.random() < 0.1:
# demo_transitions.append( (state, action, constraint, next_state, mask) )
# demo_rollouts[-1].append( (state, action, constraint, next_state, mask) )
else:
demo_transitions.append((state, action, reward, next_state, mask))
demo_rollouts[-1].append((state, action, constraint, next_state,
mask))
state = next_state
t += 1
# if i_demos % 100 == 0:
print("Demo #: ", i_demos)
print("TIME: ", time.time() - start)
print("DEMO EPISODE REWARD", episode_reward)
print("DEMO EPISODE CONSTRAINTS", episode_constraints)
print("DEMO EPISODE STEPS", episode_steps)
if not args.constraint_demos:
if episode_reward > -25 and episode_constraints == 0:
# npy_to_gif(im_list, "out_{}".format(i_demos))
i_demos += 1
else:
# Remove last rollout if it doesn't do the task...
demo_transitions = demo_transitions[:-t]
demo_rollouts.pop()
else:
i_demos += 1
if args.constraint_demos:
f_name = "constraint_demos"
if args.save_rollouts:
f_name += "_rollouts"
if not args.gt_state:
f_name += "_images"
f_name += ".pkl"
if not args.save_rollouts:
pickle.dump(demo_transitions,
open(os.path.join("demos/shelf_dynamic", f_name), "wb"))
else:
pickle.dump(demo_rollouts,
open(os.path.join("demos/shelf_dynamic", f_name), "wb"))
else:
f_name = "task_demos"
if args.save_rollouts:
f_name += "_rollouts"
if not args.gt_state:
f_name += "_images"
f_name += ".pkl"
if not args.save_rollouts:
pickle.dump(demo_transitions,
open(os.path.join("demos/shelf_dynamic", f_name), "wb"))
else:
pickle.dump(demo_rollouts,
open(os.path.join("demos/shelf_dynamic", f_name), "wb"))
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,560
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/env/image_maze.py
|
import os
import pickle
import matplotlib.pyplot as plt
import os.path as osp
import numpy as np
from gym import Env
from gym import utils
from gym.spaces import Box
from mujoco_py import load_model_from_path, MjSim
import moviepy.editor as mpy
from .maze_const_images import *
import cv2
def process_action(a):
return np.clip(a, -MAX_FORCE, MAX_FORCE)
def process_obs(obs):
im = np.transpose(obs, (2, 0, 1))
return im
def npy_to_gif(im_list, filename, fps=4):
clip = mpy.ImageSequenceClip(im_list, fps=fps)
clip.write_gif(filename + '.gif')
def get_random_transitions(num_transitions,
images=False,
save_rollouts=False,
task_demos=False):
env = MazeImageNavigation()
transitions = []
num_constraints = 0
total = 0
rollouts = []
obs_seqs = []
ac_seqs = []
constraint_seqs = []
for i in range(int(0.7 * num_transitions)):
if i % 500 == 0:
print("DEMO: ", i)
if i % 20 == 0:
sample = np.random.uniform(0, 1, 1)[0]
if sample < 0.4: # maybe make 0.2 to 0.3
mode = 'e'
else:
mode = 'm'
state = env.reset(mode, check_constraint=False)
if not GT_STATE:
state = process_obs(state)
rollouts.append([])
obs_seqs.append([state])
ac_seqs.append([])
constraint_seqs.append([])
action = env.action_space.sample()
next_state, reward, done, info = env.step(action)
if not GT_STATE:
next_state = process_obs(next_state)
constraint = info['constraint']
rollouts[-1].append((state, action, constraint, next_state, not done))
obs_seqs[-1].append(next_state)
constraint_seqs[-1].append(constraint)
ac_seqs[-1].append(action)
transitions.append((state, action, constraint, next_state, not done))
total += 1
num_constraints += int(constraint)
state = next_state
if images:
im_state = im_next_state
for i in range(int(0.3 * num_transitions)):
if i % 500 == 0:
print("DEMO: ", i)
if i % 20 == 0:
sample = np.random.uniform(0, 1, 1)[0]
if sample < 0.4: # maybe make 0.2 to 0.3
mode = 'e'
else:
mode = 'm'
state = env.reset(mode, check_constraint=False)
if not GT_STATE:
state = process_obs(state)
rollouts.append([])
obs_seqs.append([state])
ac_seqs.append([])
constraint_seqs.append([])
action = env.expert_action()
next_state, reward, done, info = env.step(action)
if not GT_STATE:
next_state = process_obs(next_state)
constraint = info['constraint']
rollouts[-1].append((state, action, constraint, next_state, not done))
obs_seqs[-1].append(next_state)
constraint_seqs[-1].append(constraint)
ac_seqs[-1].append(action)
transitions.append((state, action, constraint, next_state, not done))
total += 1
num_constraints += int(constraint)
state = next_state
if images:
im_state = im_next_state
print("data dist", total, num_constraints)
rollouts = np.array(rollouts)
for i in range(len(ac_seqs)):
ac_seqs[i] = np.array(ac_seqs[i])
for i in range(len(obs_seqs)):
obs_seqs[i] = np.array(obs_seqs[i])
for i in range(len(constraint_seqs)):
constraint_seqs[i] = np.array(constraint_seqs[i])
ac_seqs = np.array(ac_seqs)
obs_seqs = np.array(obs_seqs)
constraint_seqs = np.array(constraint_seqs)
print("ACS SHAPE", ac_seqs.shape)
print("OBS SHAPE", obs_seqs.shape)
print("CONSTRAINT SHAPE", constraint_seqs.shape)
if save_rollouts:
return rollouts
else:
return transitions, obs_seqs, ac_seqs, constraint_seqs
class MazeImageNavigation(Env, utils.EzPickle):
def __init__(self):
utils.EzPickle.__init__(self)
self.hist = self.cost = self.done = self.time = self.state = None
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'simple_maze_images.xml')
self.sim = MjSim(load_model_from_path(filename))
self.horizon = HORIZON
self._max_episode_steps = self.horizon
self.transition_function = get_random_transitions
self.steps = 0
self.images = not GT_STATE
self.action_space = Box(-MAX_FORCE * np.ones(2),
MAX_FORCE * np.ones(2))
self.transition_function = get_random_transitions
obs = self._get_obs()
# print("OBS", obs.shape)
# print("OBS", np.max(obs), np.min(obs))
# cv2.imwrite('maze.jpg', 255*obs)
# assert(False)
self.dense_reward = DENSE_REWARD
if self.images:
self.observation_space = obs.shape
else:
self.observation_space = Box(-0.3, 0.3, shape=obs.shape)
self.gain = 5
self.goal = np.zeros((2, ))
# self.goal[0] = np.random.uniform(0.15, 0.27)
# self.goal[1] = np.random.uniform(-0.27, 0.27)
self.goal[0] = 0.25
self.goal[1] = 0.25
def step(self, action):
action = process_action(action)
self.sim.data.qvel[:] = 0
self.sim.data.ctrl[:] = action
cur_obs = self._get_obs()
constraint = int(self.sim.data.ncon > 3)
if not constraint:
for _ in range(500):
self.sim.step()
obs = self._get_obs()
self.sim.data.qvel[:] = 0
self.steps += 1
constraint = int(self.sim.data.ncon > 3)
self.done = self.steps >= self.horizon or (self.get_distance_score() <
GOAL_THRESH) or constraint
if not self.dense_reward:
reward = -(self.get_distance_score() > GOAL_THRESH).astype(float)
else:
reward = -self.get_distance_score()
# if self.get_distance_score() < GOAL_THRESH:
# reward += 10
info = {
"constraint": constraint,
"reward": reward,
"state": cur_obs,
"next_state": obs,
"action": action
}
return obs, reward, self.done, info
def _get_obs(self, images=False):
if images:
return cv2.resize(
self.sim.render(64, 64, camera_name="cam0")[20:64, 20:64],
(64, 64),
interpolation=cv2.INTER_AREA)
#joint poisitions and velocities
state = np.concatenate(
[self.sim.data.qpos[:].copy(), self.sim.data.qvel[:].copy()])
if not self.images:
return state[:2] # State is just (x, y) now
#get images
ims = cv2.resize(
self.sim.render(64, 64, camera_name="cam0")[20:64, 20:64],
(64, 64),
interpolation=cv2.INTER_AREA)
return ims
def reset(self, difficulty='m', check_constraint=True, pos=()):
if len(pos):
self.sim.data.qpos[0] = pos[0]
self.sim.data.qpos[1] = pos[1]
else:
if difficulty == 'e':
self.sim.data.qpos[0] = np.random.uniform(0.15, 0.22)
elif difficulty == 'm':
self.sim.data.qpos[0] = np.random.uniform(-0.04, 0.04)
self.sim.data.qpos[1] = np.random.uniform(0.0, 0.22)
self.steps = 0
# self.sim.data.qpos[0] = 0.25
# self.sim.data.qpos[1] = 0
# print(self._get_obs())
# print("GOT HERE")
# assert(False)
# Randomize wal positions
# w1 = -0#np.random.uniform(-0.2, 0.2)
# w2 = 0 #np.random.uniform(-0.2, 0.2)
# # print(self.sim.model.geom_pos[:])
# # print(self.sim.model.geom_pos[:].shape)
# self.sim.model.geom_pos[5, 1] = 0.4 + w1
# self.sim.model.geom_pos[7, 1] = -0.25 + w1
# self.sim.model.geom_pos[6, 1] = 0.4 + w2
# self.sim.model.geom_pos[8, 1] = -0.25 + w2
w1 = -0 #np.random.uniform(-0.2, 0.2)
w2 = 0.08 #np.random.uniform(-0.2, 0.2)
# print(self.sim.model.geom_pos[:])
# print(self.sim.model.geom_pos[:].shape)
self.sim.model.geom_pos[5, 1] = 0.25 + w1
self.sim.model.geom_pos[7, 1] = -0.25 + w1
self.sim.model.geom_pos[6, 1] = 0.35 + w2
self.sim.model.geom_pos[8, 1] = -0.25 + w2
self.sim.forward()
# print("RESET!", self._get_obs())
constraint = int(self.sim.data.ncon > 3)
if constraint and check_constraint:
if not len(pos):
self.reset(difficulty, pos=pos)
# # self.render()
# im = self.sim.render(64, 64, camera_name= "cam0")
# print('aaa',self.sim.data.ncon, self.sim.data.qpos, im.sum())
# plt.imshow(im)
# plt.show()
# plt.pause(0.1)
# assert 0
return self._get_obs()
def get_distance_score(self):
"""
:return: mean of the distances between all objects and goals
"""
d = np.sqrt(np.mean((self.goal - self.sim.data.qpos[:])**2))
return d
# TODO: implement noise_std, demo_quality, right now these are ignored
def expert_action(self, noise_std=0, demo_quality='high'):
st = self.sim.data.qpos[:]
# print("STATE", st)
if st[0] <= 0.149:
delt = (np.array([0.15, 0.125]) - st)
else:
delt = (np.array([self.goal[0], self.goal[1]]) - st)
act = self.gain * delt
return act
class MazeImageTeacher(object):
def __init__(self):
self.env = MazeImageNavigation()
self.demonstrations = []
self.default_noise = 0
# all get_rollout functions for all envs should have a noise parameter
def get_rollout(self, noise_param_in=None, mode="eps_greedy"):
if mode == "eps_greedy":
if noise_param_in is None:
noise_param = 0
else:
noise_param = noise_param_in
elif mode == "gaussian_noise":
if noise_param_in is None:
noise_param = 0
else:
noise_param = noise_param_in
obs = self.env.reset(difficulty='m')
O, A, cost_sum, costs = [obs], [], 0, []
constraints_violated = 0
im_list = [self.env._get_obs(images=True)]
noise_idx = np.random.randint(int(2 * HORIZON / 4))
for i in range(HORIZON):
action = self.env.expert_action()
if i < noise_idx:
if mode == "eps_greedy":
assert (noise_param <= 1)
if np.random.random() < noise_param:
action = self.env.action_space.sample()
else:
if np.random.random() < self.default_noise:
action = self.env.action_space.sample()
elif mode == "gaussian_noise":
action = (np.array(action) + np.random.normal(
0, noise_param + self.default_noise,
self.env.action_space.shape[0])).tolist()
else:
print("Invalid Mode!")
assert (False)
A.append(action)
obs, cost, done, info = self.env.step(action)
print("CON", info['constraint'])
# print("STATE", obs)
# print("DONE", done)
constraints_violated += info['constraint']
O.append(obs)
im_list.append(self.env._get_obs(images=True))
cost_sum += cost
costs.append(cost)
if done:
break
values = np.cumsum(costs[::-1])[::-1]
print(cost_sum)
print(len(O))
print("CONSTRAINTS: ", constraints_violated)
print("FINAL COST: ", cost)
if int(cost_sum) == -HORIZON:
print("FAILED")
# return self.get_rollout(noise_param_in)
npy_to_gif(im_list, 'image_maze')
assert (False)
print("obs", O)
return {
"obs": np.array(O),
"noise": noise_param,
"actions": np.array(A),
"reward_sum": -cost_sum,
"rewards": -np.array(costs),
"values": -np.array(values)
}
if __name__ == "__main__":
teacher = MazeImageTeacher()
reward_sum_completed = []
constraint_sat = 0
for i in range(1000):
rollout_stats = teacher.get_rollout()
print("Iter: ", i)
print(rollout_stats['reward_sum'])
print(len(rollout_stats['rewards']))
ep_len = len(rollout_stats['rewards'])
diff = HORIZON - ep_len
if ep_len == HORIZON:
constraint_sat += 1
reward_sum_completed.append(rollout_stats['reward_sum'] +
diff * rollout_stats['rewards'][-1])
print("completed reward sum", np.mean(reward_sum_completed),
np.std(reward_sum_completed), constraint_sat)
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,561
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/run_multitask.py
|
import argparse
from copy import deepcopy
from typing import List, Optional
import os
import itertools
import math
import random
import time
import json
import pickle
from collections import defaultdict
import warnings
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from PIL import Image
import os.path as osp
import numpy as np
import higher
import numpy as np
import torch
import torch.autograd as A
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as O
import torch.distributions as D
import cv2
from torch.distributions import Normal
from random import choices
class FreezeParameters:
def __init__(self, parameters):
self.parameters = parameters
self.param_states = [p.requires_grad for p in self.parameters]
def __enter__(self):
for param in self.parameters:
param.requires_grad = False
def __exit__(self, exc_type, exc_val, exc_tb):
for i, param in enumerate(self.parameters):
param.requires_grad = self.param_states[i]
class WLinear(nn.Module):
def __init__(self, in_features: int, out_features: int, bias_size = None, paaa=None):
super().__init__()
self.pa = paaa
if bias_size is None:
bias_size = out_features
dim = 100
self.z = nn.Parameter(torch.empty(dim).normal_(0, 1. / out_features))
self.fc = nn.Linear(dim, in_features * out_features + out_features)
self.seq = self.fc
self.w_idx = in_features * out_features
self.weight = self.fc.weight
self._linear = self.fc
self.out_f = out_features
def adaptation_parameters(self):
return self.parameters()#[self.z]
def forward(self, x: torch.tensor):
#theta = self.fc(self.z + torch.empty_like(self.z).normal_(0, 1. / self.out_f))
theta = self.fc(self.z)
w = theta[:self.w_idx].view(x.shape[-1], -1)
b = theta[self.w_idx:]
return x @ w + b
class Linear(nn.Linear):
def adaptation_parameters(self):
return list(self.parameters())
class MLP(nn.Module):
def __init__(self, layer_widths, final_activation = lambda x: x, extra_head_layers = None, w_linear: bool = False, scale=1.0):
super().__init__()
if len(layer_widths) < 2:
raise ValueError('Layer widths needs at least an in-dimension and out-dimension')
self._final_activation = final_activation
self.seq = nn.Sequential()
self._head = extra_head_layers is not None
self.scale = scale
if not w_linear:
linear = Linear
else:
linear = WLinear
self.aparams = []
for idx in range(len(layer_widths) - 1):
w = linear(layer_widths[idx], layer_widths[idx + 1])
self.aparams.extend(w.adaptation_parameters())
self.seq.add_module(f'fc_{idx}', w)
if idx < len(layer_widths) - 2:
self.seq.add_module(f'relu_{idx}', nn.ReLU())
if extra_head_layers is not None:
self.pre_seq = self.seq[:-2]
self.post_seq = self.seq[-2:]
self.head_seq = nn.Sequential()
extra_head_layers = [layer_widths[-2] + layer_widths[-1]] + extra_head_layers
for idx, (infc, outfc) in enumerate(zip(extra_head_layers[:-1], extra_head_layers[1:])):
self.head_seq.add_module(f'relu_{idx}', nn.ReLU())
w = linear(extra_head_layers[idx], extra_head_layers[idx + 1])
self.aparams.extend(w.adaptation_parameters())
self.head_seq.add_module(f'fc_{idx}', w)
def bias_parameters(self):
return [self.seq[0].bias]
def adaptation_parameters(self):
return self.parameters()
#return self.aparams
def forward(self, x: torch.tensor, acts: Optional[torch.tensor] = None):
if self._head and acts is not None:
h = self.pre_seq(x)
head_input = torch.cat((h,acts), -1)
return self._final_activation(self.post_seq(h))*self.scale, self.head_seq(head_input)
else:
return self._final_activation(self.seq(x))*self.scale
def weights_init_(m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight, gain=1)
torch.nn.init.constant_(m.bias, 0)
class StochasticPolicy(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim, action_space=None):
super(StochasticPolicy, self).__init__()
self.linear1 = nn.Linear(num_inputs, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.mean = nn.Linear(hidden_dim, num_actions)
self.log_std = torch.nn.Parameter(
torch.as_tensor([np.log(0.1)] * num_actions))
self.min_log_std = np.log(1e-6)
self.apply(weights_init_)
self.register_parameter(name='log_std', param=self.log_std)
# action rescaling
if action_space is None:
self.action_scale = 1.
self.action_bias = 0.
else:
self.action_scale = torch.FloatTensor(
(action_space.high - action_space.low) / 2.)
self.action_bias = torch.FloatTensor(
(action_space.high + action_space.low) / 2.)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
mean = torch.tanh(self.mean(x)) * self.action_scale + self.action_bias
#print(self.log_std)
log_std = torch.clamp(self.log_std, min=self.min_log_std)
log_std = log_std.unsqueeze(0).repeat([len(mean), 1])
std = torch.exp(log_std)
return Normal(mean, std)
def adaptation_parameters(self):
return self.parameters()
def sample(self, state):
dist = self.forward(state)
action = dist.rsample()
return action, dist.log_prob(action).sum(-1), dist.mean
def to(self, device):
self.action_scale = self.action_scale.to(device)
self.action_bias = self.action_bias.to(device)
return super(StochasticPolicy, self).to(device)
class MAMLRAWR(object):
def __init__(self, obs_space, ac_space, hidden_size, logdir, action_space, args, tmp_env):
self.env_name = args.env_name
self.device = torch.device("cuda" if args.cuda else "cpu")
self.logdir = logdir
self._args = args
self.tmp_env = tmp_env
self.gamma_safe = args.gamma_safe
self.obs_space = obs_space
self.ac_space = ac_space
self.pos_fraction = args.pos_fraction if args.pos_fraction >=0 else None
self.batch_size = 256
self.inner_batch_size = 256
self._observation_dim = obs_space.shape[0]
self._action_dim = ac_space.shape[0]
self.policy_head = [32, 1]
self.net_width = 100#256#100
self.net_depth = 3#2#3
self.outer_value_lr = 0.00001
self.outer_policy_lr = 0.0001
self.lrlr = 0.001
self.inner_policy_lr = 0.001 #0.001#0.0003#0.001
self.inner_value_lr = 0.001#0.001#0.0003#0.001
self.task_batch_size = 5
self.use_og_policy = False
self.advantage_head_coef = 0.01
self._adaptation_temperature = 1.0
self._gradient_steps_per_iteration = 1
self._advantage_clamp = np.log(20.0)
self._action_sigma = 0.01
self._grad_clip = 40.0
self._env_seeds = np.random.randint(1e10, size=(int(1e7),))
self._rollout_counter = 0
self._maml_steps = 1
self.updates = 0
self.value_target = None
# Value Function doesn't work anymore, Q_value should be true (DDPG Loss)
self.q_value = True
import os
try:
os.makedirs(logdir + "/1")
os.makedirs(logdir + "/5")
os.makedirs(logdir + "/10")
os.makedirs(logdir + "/20")
os.makedirs(logdir + "/right")
os.makedirs(logdir + "/left")
os.makedirs(logdir + "/up")
os.makedirs(logdir + "/down")
except OSError as e:
if e.errno != errno.EEXIST:
raise
if self.use_og_policy:
self._adaptation_policy = StochasticPolicy(self._observation_dim, self._action_dim, 256, ac_space).to(self.device)
else:
self._adaptation_policy = MLP([self._observation_dim] +
[self.net_width] * self.net_depth +
[self._action_dim],
final_activation=torch.tanh,
w_linear=False,
scale=ac_space.high[0]).to(self.device)
if self.q_value:
self._value_function = MLP([self._observation_dim + self._action_dim] +
[self.net_width] * self.net_depth +
[1],
final_activation=torch.sigmoid,
w_linear=True).to(self.device)
else:
self._value_function = MLP([self._observation_dim] +
[self.net_width] * self.net_depth +
[1],
final_activation=torch.sigmoid,
w_linear=True).to(self.device)
# For Meta Update
self._adaptation_policy_optimizer = O.Adam(self._adaptation_policy.parameters(), lr=self.outer_policy_lr)
self._value_function_optimizer = O.Adam(self._value_function.parameters(), lr=self.outer_value_lr)
self.torchify = lambda x: torch.FloatTensor(x).to(self.device)
self._policy_lrs = None
self._value_lrs = None
self._adv_coef = None
# Buffer probably declared in main.py
self._inner_buffers = None
self._outer_buffers = None
self._policy_lrs = [torch.nn.Parameter(torch.tensor(float(np.log(self.inner_policy_lr))).to(self.device))
for p in self._adaptation_policy.adaptation_parameters()]
self._value_lrs = [torch.nn.Parameter(torch.tensor(float(np.log(self.inner_policy_lr))).to(self.device))
for p in self._value_function.adaptation_parameters()]
self._adv_coef = torch.nn.Parameter(torch.tensor(float(np.log(self.advantage_head_coef))).to(self.device))
self._policy_lr_optimizer = O.Adam(self._policy_lrs, lr=self.lrlr)
self._value_lr_optimizer = O.Adam(self._value_lrs, lr=self.lrlr)
self._adv_coef_optimizer = O.Adam([self._adv_coef], lr=self.lrlr)
self.online_adapt_policy_opt = None
self.online_adapt_value_opt = None
def select_action(self, state, eval=False, policy=None):
if policy is None:
policy = self._adaptation_policy
state = torch.FloatTensor(state).to(self.device).unsqueeze(0)
if self.use_og_policy:
action, log_prob, action_mean = policy.sample(state)
if eval:
return action_mean.detach().cpu().numpy()[0]
else:
return action.detach().cpu().numpy()[0]
mu = policy(state)
if eval is True:
action = mu
else:
action = mu + self._action_sigma * torch.empty_like(mu).normal_()
return action.detach().cpu().numpy()[0]
def get_value(self, states, actions):
if self.q_value:
return self._value_function(torch.cat([states, actions], 1))
return self._value_function(states)
def __call__(self, states, actions):
if self.q_value:
value = self._value_function(torch.cat([states, actions], 1))
else:
value = self._value_function(states)
return value, value
def policy_output(self, policy, state_batch):
if self.use_og_policy:
action, _, _ = policy.sample(state_batch)
return action
mu = policy(state_batch)
actions = mu + self._action_sigma * torch.empty_like(mu).normal_()
return actions
def value_function_loss_on_batch(self, value_function, action_function, task_policy, state_batch, next_state_batch, action_batch, mc_reward_batch, reward_batch, mask_batch, inner: bool = False, target = None):
if self.q_value:
with torch.no_grad():
actions_next, _, _ = task_policy.sample(next_state_batch)
if target is None:
qvalue_next = value_function(torch.cat([next_state_batch, actions_next], 1))
else:
qvalue_next = target(torch.cat([next_state_batch, actions_next], 1))
targets = reward_batch + mask_batch * self.gamma_safe * qvalue_next
qvalue_estimates = value_function(torch.cat([state_batch, action_batch], 1))
losses = torch.nn.functional.mse_loss(qvalue_estimates,targets)
return losses, None, None, None
else:
value_estimates = value_function(state_batch)
with torch.no_grad():
mc_value_estimates = mc_reward_batch
targets = mc_value_estimates
if inner:
pass
factor = 1
losses = torch.nn.functional.mse_loss(value_estimates,targets)
return losses, value_estimates.mean(), mc_value_estimates.mean(), mc_value_estimates.std()
def adaptation_policy_loss_on_batch(self, policy, value_function, state_batch, action_batch, mc_reward_batch, inner: bool = False):
if self.q_value:
actions = self.policy_output(policy, state_batch)
q_value_estimate = value_function(torch.cat([state_batch, actions], 1))
losses = q_value_estimate.mean()
return losses, None, None, None
else:
with torch.no_grad():
value_estimates = value_function(state_batch)
action_value_estimates = mc_reward_batch
advantages = (action_value_estimates - value_estimates).squeeze(-1)
normalized_advantages = (1 / self._adaptation_temperature) * (advantages - advantages.mean()) / advantages.std()
normalized_advantages = -normalized_advantages
weights = normalized_advantages.clamp(max=self._advantage_clamp).exp()
action_mu, advantage_prediction = policy(state_batch, action_batch)
action_sigma = torch.empty_like(action_mu).fill_(self._action_sigma)
action_distribution = D.Normal(action_mu, action_sigma)
action_log_probs = action_distribution.log_prob(action_batch).sum(-1)
losses = -(action_log_probs * weights)
adv_prediction_loss = None
if inner:
if self.q_value:
pass
else:
adv_prediction_loss = F.softplus(self._adv_coef) * (advantage_prediction.squeeze() - advantages) ** 2
losses = losses + adv_prediction_loss
adv_prediction_loss = adv_prediction_loss.mean()
return losses.mean(), advantages.mean(), weights, adv_prediction_loss
def update_model(self, model: nn.Module, optimizer: torch.optim.Optimizer, clip: float = None, extra_grad: list = None):
if clip is not None:
grad = torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
else:
grad = None
optimizer.step()
optimizer.zero_grad()
return grad
def update_params(self, params: list, optimizer: torch.optim.Optimizer, clip: float = None, extra_grad: list = None):
optimizer.step()
optimizer.zero_grad()
def soft_update(self, source, target):
for param_source, param_target in zip(source.named_parameters(), target.named_parameters()):
assert param_source[0] == param_target[0]
param_target[1].data = (1-self._args.tau_safe) * param_target[1].data + self._args.tau_safe * param_source[1].data
def meta_update_parameters(self, inner_buffers, outer_buffers, writer=None, ep=None, memory=None, policy=None, critic=None, lr=None, batch_size=None, training_iterations=None, plot=None):
meta_value_grads = []
meta_policy_grads = []
train_rewards = []
rollouts = []
successes = []
train_step_index = self.updates
self.num_tasks = len(inner_buffers)
tasks = choices(range(self.num_tasks), k=self.task_batch_size)#random.sample(range(self.num_tasks), self.task_batch_size)
for i, (train_task_idx, inner_buffer, outer_buffer) in enumerate(zip(range(self.num_tasks), inner_buffers, outer_buffers)):
# Only train on the randomly selected tasks for this iteration
if train_task_idx not in tasks:
continue
# Data for Inner Adaptation
self.maml_steps = self._maml_steps
state_batch, action_batch, constraint_batch, next_state_batch, mask_batch, mc_reward_batch = inner_buffer.sample(
batch_size=self.inner_batch_size * self.maml_steps,
pos_fraction=self.pos_fraction)
state_batch = torch.FloatTensor(state_batch).to(self.device)
next_state_batch = torch.FloatTensor(next_state_batch).to(self.device)
action_batch = torch.FloatTensor(action_batch).to(self.device)
mask_batch = torch.FloatTensor(mask_batch).to(self.device).unsqueeze(1)
constraint_batch = torch.FloatTensor(constraint_batch).to(
self.device).unsqueeze(1)
mc_reward_batch = torch.FloatTensor(mc_reward_batch).to(
self.device).unsqueeze(1)
state_batch = state_batch.view(self.maml_steps, state_batch.shape[0] // self.maml_steps, *state_batch.shape[1:])
next_state_batch = next_state_batch.view(self.maml_steps, next_state_batch.shape[0] // self.maml_steps, *next_state_batch.shape[1:])
action_batch = action_batch.view(self.maml_steps, action_batch.shape[0] // self.maml_steps, *action_batch.shape[1:])
mask_batch = mask_batch.view(self.maml_steps, mask_batch.shape[0] // self.maml_steps, *mask_batch.shape[1:])
constraint_batch = constraint_batch.view(self.maml_steps, constraint_batch.shape[0] // self.maml_steps, *constraint_batch.shape[1:])
mc_reward_batch = mc_reward_batch.view(self.maml_steps, mc_reward_batch.shape[0] // self.maml_steps, *mc_reward_batch.shape[1:])
# Data for Outer Adaptation
meta_state_batch, meta_action_batch, meta_constraint_batch, meta_next_state_batch, meta_mask_batch, meta_mc_reward_batch = outer_buffer.sample(
batch_size=self.batch_size,
pos_fraction=self.pos_fraction)
meta_state_batch = torch.FloatTensor(meta_state_batch).to(self.device)
meta_next_state_batch = torch.FloatTensor(meta_next_state_batch).to(self.device)
meta_action_batch = torch.FloatTensor(meta_action_batch).to(self.device)
meta_mask_batch = torch.FloatTensor(meta_mask_batch).to(self.device).unsqueeze(1)
meta_constraint_batch = torch.FloatTensor(meta_constraint_batch).to(
self.device).unsqueeze(1)
meta_mc_reward_batch = torch.FloatTensor(meta_mc_reward_batch).to(
self.device).unsqueeze(1)
inner_value_losses = []
meta_value_losses = []
inner_policy_losses = []
adv_policy_losses = []
meta_policy_losses = []
value_lr_grads = []
policy_lr_grads = []
#inner_mc_means, inner_mc_stds = [], []
#outer_mc_means, outer_mc_stds = [], []
#inner_values, outer_values = [], []
#inner_weights, outer_weights = [], []
#inner_advantages, outer_advantages = [], []
##################################################################################################
# Adapt value function and collect meta-gradients
##################################################################################################
vf = self._value_function
vf.train()
vf_target = deepcopy(vf)
opt = O.SGD([{'params': p, 'lr': None} for p in vf.adaptation_parameters()])
with higher.innerloop_ctx(vf, opt, override={'lr': [F.softplus(l) for l in self._value_lrs]}, copy_initial_weights=False) as (f_value_function, diff_value_opt):
for step in range(self._maml_steps):
state = state_batch[step]
next_state = next_state_batch[step]
action = action_batch[step]
mask = mask_batch[step]
constraint = constraint_batch[step]
mc_reward = mc_reward_batch[step]
loss, value_inner, mc_inner, mc_std_inner = self.value_function_loss_on_batch(f_value_function, self._adaptation_policy, policy, state, next_state, action, mc_reward, constraint, mask, inner=True, target = vf_target)
#inner_values.append(value_inner.item())
#inner_mc_means.append(mc_inner.item())
#inner_mc_stds.append(mc_std_inner.item())
diff_value_opt.step(loss)
inner_value_losses.append(loss.item())
self.soft_update(f_value_function, vf_target)
#Soft Update the Target Network
# Collect grads for the value function update in the outer loop [L14],
# which is not actually performed here
meta_value_function_loss, value, mc, mc_std = self.value_function_loss_on_batch(f_value_function, self._adaptation_policy, policy, meta_state_batch, meta_next_state_batch, meta_action_batch, meta_mc_reward_batch, meta_constraint_batch, meta_mask_batch, inner = False, target = vf_target)
total_vf_loss = meta_value_function_loss / self.num_tasks
total_vf_loss.backward()
#outer_values.append(value.item())
#outer_mc_means.append(mc.item())
#outer_mc_stds.append(mc_std.item())
'''
meta_value_losses.append(meta_value_function_loss.item())
##################################################################################################
# Adapt policy and collect meta-gradients
##################################################################################################
adapted_value_function = f_value_function
opt = O.SGD([{'params': p, 'lr': None} for p in self._adaptation_policy.adaptation_parameters()])
self._adaptation_policy.train()
with higher.innerloop_ctx(self._adaptation_policy, opt, override={'lr': [F.softplus(l) for l in self._policy_lrs]}, copy_initial_weights=False) as (f_adaptation_policy, diff_policy_opt):
with FreezeParameters(adapted_value_function.parameters()):
for step in range(self._maml_steps):
loss, adv, weights, adv_loss = self.adaptation_policy_loss_on_batch(f_adaptation_policy,
adapted_value_function, state_batch, action_batch, mc_reward_batch, inner=True)
diff_policy_opt.step(loss)
inner_policy_losses.append(loss.item())
#adv_policy_losses.append(adv_loss.item())
#inner_advantages.append(adv.item())
#inner_weights.append(weights.mean().item())
meta_policy_loss, outer_adv, outer_weights_, _ = self.adaptation_policy_loss_on_batch(f_adaptation_policy, adapted_value_function, meta_state_batch, meta_action_batch, meta_mc_reward_batch, inner=False)
(meta_policy_loss / self.num_tasks).backward()
#outer_weights.append(outer_weights_.mean().item())
#outer_advantages.append(outer_adv.item())
meta_policy_losses.append(meta_policy_loss.item())
##################################################################################################
'''
# Meta-update value function [L14]
grad = self.update_model(self._value_function, self._value_function_optimizer, clip=self._grad_clip)
# Meta-update adaptation policy [L15] (Not really metaupdated)
ap_opt = self._adaptation_policy_optimizer
ap_opt.zero_grad()
state_batch, action_batch, constraint_batch, next_state_batch, mask_batch, mc_reward_batch = memory.sample(
batch_size=min(batch_size, len(memory)),
pos_fraction=self.pos_fraction)
state_batch = torch.FloatTensor(state_batch).to(self.device)
next_state_batch = torch.FloatTensor(next_state_batch).to(self.device)
action_batch = torch.FloatTensor(action_batch).to(self.device)
mask_batch = torch.FloatTensor(mask_batch).to(self.device).unsqueeze(1)
constraint_batch = torch.FloatTensor(constraint_batch).to(
self.device).unsqueeze(1)
mc_reward_batch = torch.FloatTensor(mc_reward_batch).to(
self.device).unsqueeze(1)
ap_loss, _, _, _ = self.adaptation_policy_loss_on_batch(self._adaptation_policy, self._value_function, state_batch, action_batch, mc_reward_batch, inner=True)
ap_opt.zero_grad()
ap_loss.backward()
ap_opt.step()
self._value_function_optimizer.zero_grad()
#grad = self.update_model(self._adaptation_policy, self._adaptation_policy_optimizer, clip=self._grad_clip)
if self.lrlr > 0:
self.update_params(self._value_lrs, self._value_lr_optimizer)
#self.update_params(self._policy_lrs, self._policy_lr_optimizer)
#self.update_params([self._adv_coef], self._adv_coef_optimizer)
self.updates+=1
if self.updates%100==0:
if self._args.env_name=='cartpole':
return
if self._args.env_name=='Ant-Disabled':
return
if self._args.env_name=='HalfCheetah-Disabled':
return
# For Maze
self.plot(policy, self.updates, [.1, 0], "right", folder_prefix="/right/")
self.plot(policy, self.updates, [-.1, 0], "left", folder_prefix="/left/")
self.plot(policy, self.updates, [0, .1], "down", folder_prefix="/down/")
self.plot(policy, self.updates, [0, -.1], "up", folder_prefix="/up/")
self.eval_adaptation(policy, memory)
def eval_adaptation(self, policy, memory):
vf = deepcopy(self._value_function)
ap = deepcopy(self._adaptation_policy)
opt = O.Adam(vf.parameters(), lr=self.inner_value_lr)
ap_opt = O.Adam(ap.parameters(), lr=self.inner_policy_lr)
vf_target = deepcopy(self._value_function)
log_steps = [1,5,10,20]
for step in range(20):
state_batch, action_batch, constraint_batch, next_state_batch, mask_batch, mc_reward_batch = memory.sample(
batch_size=min(self.batch_size, len(memory)),
pos_fraction=self.pos_fraction)
state_batch = torch.FloatTensor(state_batch).to(self.device)
next_state_batch = torch.FloatTensor(next_state_batch).to(self.device)
action_batch = torch.FloatTensor(action_batch).to(self.device)
mask_batch = torch.FloatTensor(mask_batch).to(self.device).unsqueeze(1)
constraint_batch = torch.FloatTensor(constraint_batch).to(
self.device).unsqueeze(1)
mc_reward_batch = torch.FloatTensor(mc_reward_batch).to(
self.device).unsqueeze(1)
vf_loss, _, _, _ = self.value_function_loss_on_batch(vf, ap, policy, state_batch, next_state_batch, action_batch, mc_reward_batch, constraint_batch, mask_batch, inner=True, target = vf_target)
opt.zero_grad()
vf_loss.backward()
opt.step()
self.soft_update(vf, vf_target)
ap_loss, _, _, _ = self.adaptation_policy_loss_on_batch(ap, vf, state_batch, action_batch, mc_reward_batch, inner=True)
ap_opt.zero_grad()
ap_loss.backward()
ap_opt.step()
if step+1 in log_steps:
if self._args.env_name == 'cartpole':
return
if self._args.env_name == 'Ant-Disabled':
return
if self._args.env_name=='HalfCheetah-Disabled':
return
# For Maze
self.plot(policy, self.updates, [.1, 0], "right", folder_prefix="/" + str(step+1) + "/", critic=vf)
def update_parameters(self, ep=None, memory=None, policy=None, critic=None, lr=None, batch_size=None, training_iterations=None, plot=None):
if self.online_adapt_value_opt is None and self.online_adapt_policy_opt is None:
self.online_adapt_value_opt = O.Adam(self._value_function.parameters(), lr=self.inner_value_lr)
self.online_adapt_policy_opt = O.Adam(self._adaptation_policy.parameters(), lr=self.inner_policy_lr)
if self.value_target is None:
self.value_target = deepcopy(self._value_function)
# Data for Inner Adaptation
state_batch, action_batch, constraint_batch, next_state_batch, mask_batch, mc_reward_batch = memory.sample(
batch_size=min(batch_size, len(memory)),
pos_fraction=self.pos_fraction)
state_batch = torch.FloatTensor(state_batch).to(self.device)
next_state_batch = torch.FloatTensor(next_state_batch).to(self.device)
action_batch = torch.FloatTensor(action_batch).to(self.device)
mask_batch = torch.FloatTensor(mask_batch).to(self.device).unsqueeze(1)
constraint_batch = torch.FloatTensor(constraint_batch).to(
self.device).unsqueeze(1)
mc_reward_batch = torch.FloatTensor(mc_reward_batch).to(
self.device).unsqueeze(1)
vf = self._value_function
vf.train()
vf_loss, _, _ , _ = self.value_function_loss_on_batch(vf, self._adaptation_policy, policy, state_batch, next_state_batch, action_batch, mc_reward_batch, constraint_batch, mask_batch, inner=True, target = self.value_target)
self.soft_update(self._value_function, self.value_target)
self.online_adapt_value_opt.zero_grad()
vf_loss.backward()
self.online_adapt_value_opt.step()
self._adaptation_policy.train()
actor_loss, _, _, _= self.adaptation_policy_loss_on_batch(self._adaptation_policy,
self._value_function, state_batch, action_batch, mc_reward_batch, inner=True)
# Meta-update value function [L14]
self.online_adapt_policy_opt.zero_grad()
actor_loss.backward()
self.online_adapt_policy_opt.step()
self.updates+=1
if self.updates%100==0:
if self._args.env_name == 'cartpole':
return
if self._args.env_name == 'Ant-Disabled':
return
if self._args.env_name=='HalfCheetah-Disabled':
return
# For Maze
if self.q_value:
self.plot(policy, self.updates, [.1, 0], "right", folder_prefix="/right/")
self.plot(policy, self.updates, [-.1, 0], "left", folder_prefix="/left/")
self.plot(policy, self.updates, [0, .1], "down", folder_prefix="/down/")
self.plot(policy, self.updates, [0, -.1], "up", folder_prefix="/up/")
else:
self.plot(policy, self.updates)
def plot(self, pi, ep, action=None, suffix="", folder_prefix = "", critic=None):
env = self.tmp_env
if self.env_name in ['maze', 'maze_1', 'maze_2', 'maze_3', 'maze_4', 'maze_5', 'maze_6']:
x_bounds = [-0.3, 0.3]
y_bounds = [-0.3, 0.3]
elif self.env_name == 'simplepointbot0':
x_bounds = [-80, 20]
y_bounds = [-10, 10]
elif self.env_name =='simplepointbot1':
x_bounds = [-75, 25]
y_bounds = [-20, 20]
states = []
x_pts = 100
y_pts = int(
x_pts * (x_bounds[1] - x_bounds[0]) / (y_bounds[1] - y_bounds[0]))
for x in np.linspace(x_bounds[0], x_bounds[1], y_pts):
for y in np.linspace(y_bounds[0], y_bounds[1], x_pts):
if self.env_name == 'image_maze':
env.reset(pos=(x, y))
obs = process_obs(env._get_obs(images=True))
states.append(obs)
else:
states.append([x, y])
if self._args.env_name=='maze':
states = np.array(states)
goal_state = self.tmp_env.get_goal()
batch_size = states.shape[0]
goal_states = np.tile(goal_state, (batch_size, 1))
states = np.concatenate([states, goal_states], axis=1)
states = self.torchify(states)
else:
states = self.torchify(np.array(states))
if critic is None:
critic = self._value_function
critic.eval()
if self.q_value:
actions = self.torchify(np.tile(action, (len(states), 1)))
max_qf = critic(torch.cat([states, actions], 1))
else:
max_qf = critic(states)
grid = max_qf.detach().cpu().numpy()
grid = grid.reshape(y_pts, x_pts)
if self.env_name == 'simplepointbot0':
plt.gca().add_patch(
Rectangle(
(0, 25),
500,
50,
linewidth=1,
edgecolor='r',
facecolor='none'))
elif self.env_name == 'simplepointbot1':
plt.gca().add_patch(
Rectangle(
(112.5, 31.25),
10*2.5,
15*2.5,
linewidth=1,
edgecolor='r',
facecolor='none'))
if self.env_name in ['maze', 'maze_1', 'maze_2', 'maze_3', 'maze_4', 'maze_5', 'maze_6']:
fig, ax = plt.subplots()
cmap = plt.get_cmap('jet', 10)
background = cv2.resize(env._get_obs(images=True), (x_pts, y_pts))
plt.imshow(background)
im = ax.imshow(grid.T, alpha=0.6, cmap=cmap, vmin=0.0, vmax=1.0)
cbar = fig.colorbar(im, ax=ax)
else:
plt.imshow(grid.T)
log_string = self.logdir + "/" + folder_prefix + "value_" + str(ep) + suffix
plt.savefig(
log_string,
bbox_inches='tight')
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,562
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/constraint.py
|
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn.functional as F
from torch.optim import Adam
from matplotlib.patches import Rectangle
from PIL import Image
from model import ValueNetwork, QNetworkConstraint, hard_update, soft_update
from replay_memory import ReplayMemory
from utils import soft_update
import os.path as osp
class ValueFunction:
def __init__(self, params):
self.gamma_safe = params.gamma_safe
self.device = params.device
self.torchify = lambda x: torch.FloatTensor(x).to(self.device)
self.model = ValueNetwork(params.state_dim, params.hidden_size,
params.pred_time).to(self.device)
self.target = ValueNetwork(params.state_dim, params.hidden_size,
params.pred_time).to(self.device)
self.tau = params.tau_safe
self.logdir = params.logdir
self.pred_time = params.pred_time
self.env_name = params.env_name
self.opt = params.opt
if not params.use_target:
self.tau = 1.
hard_update(self.target, self.model)
def train(self,
ep,
memory,
pi=None,
lr=0.0003,
batch_size=1000,
training_iterations=3000,
plot=False):
optim = Adam(self.model.parameters(), lr=lr)
for j in range(training_iterations):
state_batch, action_batch, constraint_batch, next_state_batch, _ = memory.sample(
batch_size=batch_size)
with torch.no_grad():
if self.pred_time:
target = (self.gamma_safe * self.target(
self.torchify(next_state_batch))[:, 0] + 1) * (
1 - self.torchify(constraint_batch))
else:
target = self.torchify(
constraint_batch) + self.gamma_safe * self.target(
self.torchify(next_state_batch))[:, 0] * (
1 - self.torchify(constraint_batch))
preds = self.model(self.torchify(state_batch))[:, 0]
optim.zero_grad()
loss = F.mse_loss(preds, target)
loss.backward()
optim.step()
loss = loss.detach().cpu().numpy()
if j % 100 == 0:
with torch.no_grad():
print(
"Value Training Iteration %d Loss: %f" % (j, loss))
soft_update(self.target, self.model, self.tau)
if plot:
self.plot(ep)
def plot(self, ep):
if self.env_name == 'maze' or self.env_name == 'image_maze':
x_bounds = [-0.3, 0.3]
y_bounds = [-0.3, 0.3]
elif self.env_name == 'simplepointbot0':
x_bounds = [-80, 20]
y_bounds = [-10, 10]
elif self.env_name == 'simplepointbot1':
x_bounds = [-75, 25]
y_bounds = [-75, 25]
elif self.env_name == 'car':
x_bounds = [0, 20]
y_bounds = [-5, 5]
else:
raise NotImplementedError("Plotting unsupported for this env")
states = []
x_pts = 100
y_pts = int(
x_pts * (x_bounds[1] - x_bounds[0]) / (y_bounds[1] - y_bounds[0]))
for x in np.linspace(x_bounds[0], x_bounds[1], y_pts):
for y in np.linspace(y_bounds[0], y_bounds[1], x_pts):
if self.env_name != 'car':
states.append([x, y])
else:
for i in range(100):
v = np.random.random(
) * 2 - 1 # random velocities on [-1, 1]
states.append([x, y, v])
if not self.opt:
if self.env_name != 'car':
grid = self.model(self.torchify(
np.array(states))).detach().cpu().numpy()
grid = grid.reshape(y_pts, x_pts)
else:
grid = []
for i in range(x_pts * y_pts):
grid.append(
self.model(self.torchify(np.array(
states[i:i + 100]))).detach().cpu().numpy())
grid = np.array(grid)
grid = grid.squeeze()
grid = np.mean(grid, axis=-1)
grid = grid.reshape((y_pts, x_pts))
else:
raise (NotImplementedError("Need to implement opt"))
if self.env_name == 'simplepointbot0':
plt.gca().add_patch(
Rectangle(
(0, 25),
500,
50,
linewidth=1,
edgecolor='r',
facecolor='none'))
elif self.env_name == 'simplepointbot1':
plt.gca().add_patch(
Rectangle(
(45, 65),
10,
20,
linewidth=1,
edgecolor='r',
facecolor='none'))
plt.imshow(grid.T)
plt.savefig(osp.join(self.logdir, "value_" + str(ep)))
def get_value(self, states, actions=None):
return self.model(states)
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,563
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/learning_to_adapt/envs/__init__.py
|
#from .ant_env import AntEnv
#from .half_cheetah_env import HalfCheetahEnv
#from .arm_7dof_env import Arm7DofEnv
#from .half_cheetah_blocks_env import HalfCheetahBlocksEnv
#from .half_cheetah_hfield_env import HalfCheetahHFieldEnv
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,564
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/analyze_runs_michael.py
|
import os.path as osp
import os
import numpy as np
import pickle
import matplotlib.pyplot as plt
from scipy.interpolate import make_interp_spline, BSpline
import glob
import pandas as pd
import seaborn as sns
from plotting_utils import get_color, get_legend_name
def get_directory(dirname, suffix, parent="/Users/michaelluo/Documents/recovery-rl"):
dirs = [
osp.join(parent, dirname, d)
for d in os.listdir(osp.join(parent, dirname)) if d.endswith(suffix)
]
return dirs
experiment_map = {
"maze": {
"algs": {
#"multitask": get_directory("maze", "multi-maze"),
#"meta": get_directory("maze", "meta-maze"),
#"sac_norecovery": get_directory("maze", "vanilla"),
# "sac_penalty1": get_directory("maze", "reward_1"),
# "sac_penalty10": get_directory("maze", "reward_10"),
#"sac_penalty100": get_directory("maze", "reward_100"),
# "sac_lagrangian_1": get_directory("maze", "nu_1_update"),
# "sac_lagrangian_10": get_directory("maze", "nu_10_update"),
#"sac_lagrangian_100": get_directory("maze", "nu_100_update"),
# "lookahead": get_directory("maze", "lookahead"),
#"recovery": get_directory("maze", "recovery"),
#"test": get_directory("temp", "test"),
},
"outfile": "maze_plot.png"
},
"cartpole": {
"algs": {
#"sac_vanilla": get_directory("nav1_recovery_no_task", "sac_base"),
# "sac_penalty1": get_directory("maze", "reward_1"),
#"reward_10": get_directory("cartpole_runs", "penalty_10_cartpole"),
#"reward_50": get_directory("cartpole_runs", "penalty_100_cartpole"),
# "sac_lagrangian_1": get_directory("maze", "nu_1_update"),
# "sac_lagrangian_10": get_directory("maze", "nu_10_update"),
#"sac_lagrangian_100": get_directory("maze", "nu_100_update"),
# "lookahead": get_directory("maze", "lookahead"),
#"sac_recovery_ddpg": get_directory("cartpole_runs", "2020-12-23_06-43-25_SAC_cartpole_Gaussian_recovery_0.15_0.8"),
#"test": get_directory("temp", "test"),
#"meta": get_directory("nav1_recovery_no_task", "meta"),
#"multitask": get_directory("cartpole_meta", "2021-01-08_08-08-39_SAC_cartpole_Gaussian_recovery_0.15_0.8"),
},
"outfile": "cartpole_plot.png"
},
"pointbot0": {
"algs": {
"sac_vanilla": get_directory("nav2_recovery_notask", "sac_baseline"),
"sac_recovery_ddpg": get_directory("nav2_recovery_notask", "recovery"),
"meta": get_directory("nav2_recovery_notask", "meta"),
"multitask": get_directory("nav2_recovery_notask", "multitask"),
#"multitask": get_directory("pointbot0", "meta-nav1"),
#"meta": get_directory("pointbot0", "multi-nav1"),
#"sac_vanilla": get_directory("pointbot0", "vanilla"),
# "sac_penalty1": get_directory("pointbot0", "reward_1"),
# "sac_penalty10": get_directory("pointbot0", "reward_10"),
# "sac_penalty100": get_directory("pointbot0", "reward_100"),
#"sac_penalty": get_directory("pointbot0", "reward_1000"),
# "sac_penalty3000": get_directory("pointbot0", "reward_3000"),
# "sac_lagrangian_1": get_directory("pointbot0", "nu_1"),
# "sac_lagrangian_10": get_directory("pointbot0", "nu_10"),
# "sac_lagrangian_100": get_directory("pointbot0", "nu_100"),
# "sac_lagrangian_1000": get_directory("pointbot0", "nu_1000"),
#"sac_lagrangian": get_directory("pointbot0", "nu_5000"),
# "sac_lagrangian_3000": get_directory("pointbot0", "nu_3000"),
# "rcpo_1": get_directory("pointbot0", "rcpo_1"),
# "rcpo_10": get_directory("pointbot0", "rcpo_10"),
# "rcpo_100": get_directory("pointbot0", "rcpo_100"),
# "rcpo_1000": get_directory("pointbot0", "rcpo_1000"),
# "rcpo_5000": get_directory("pointbot0", "rcpo_5000"),
#"sac_rcpo": get_directory("pointbot0", "rcpo_1000"),
# "lookahead": get_directory("pointbot0", "lookahead"),
#"sac_recovery_pets": get_directory("pointbot0", "pets"),
#"sac_recovery_ddpg": get_directory("pointbot0", "ddpg"),
},
"outfile": "pointbot0.png"
},
"pointbot1": {
"algs": {
#"multitask": get_directory("pointbot1", "multi-nav2"),
#"meta": get_directory("pointbot1", "meta-nav2"),
#"sac_vanilla": get_directory("pointbot1", "vanilla"),
# "sac_penalty1": get_directory("pointbot1", "reward_1"),
# "sac_penalty10": get_directory("pointbot1", "reward_10"),
# "sac_penalty100": get_directory("pointbot1", "reward_100"),
# "sac_penalty1000": get_directory("pointbot1", "reward_1000"),
#"sac_penalty": get_directory("pointbot1", "reward_3000"),
# "sac_lagrangian_1": get_directory("pointbot1", "nu_1"),
# "sac_lagrangian_10": get_directory("pointbot1", "nu_10"),
# "sac_lagrangian_100": get_directory("pointbot1", "nu_100_update"),
# "sac_lagrangian_500": get_directory("pointbot1", "nu_500_update"), # rerun this
#"sac_lagrangian": get_directory("pointbot1", "nu_1000"),
# "sac_lagrangian_5000": get_directory("pointbot1", "nu_5000"),
# "rcpo_1": get_directory("pointbot1", "rcpo_1"),
# "rcpo_10": get_directory("pointbot1", "rcpo_10"),
# "rcpo_100": get_directory("pointbot1", "rcpo_100"),
# "rcpo_1000": get_directory("pointbot1", "rcpo_1000"),
#"sac_rcpo": get_directory("pointbot1", "rcpo_5000"),
#"sac_recovery_pets": get_directory("pointbot1", "pets"),
#"sac_recovery_ddpg": get_directory("pointbot1", "ddpg"),
},
"outfile": "pointbot1.png"
},
}
def get_stats(data):
minlen = min([len(d) for d in data])
data = [d[:minlen] for d in data]
mu = np.mean(data, axis=0)
lb = mu - np.std(data, axis=0) / np.sqrt(len(data))
ub = mu + np.std(data, axis=0) / np.sqrt(len(data))
return mu, lb, ub
def moving_average(x, N):
window_means = []
for i in range(len(x) - N+1):
window = x[i: i+N]
num_nans = np.count_nonzero(np.isnan(window))
window_sum = np.nansum(window)
if num_nans < N:
window_mean = window_sum / (N - num_nans)
else:
window_mean = np.nan
window_means.append(window_mean)
return window_means
eps = {
"maze": 1500,
"pointbot0": 500,
"pointbot1": 500,
"shelf": 4000,
"shelf_dynamic": 4000,
"cartpole": 470,
}
envname = {
"maze": "Maze",
"pointbot0": "Navigation 1",
"pointbot1": "Navigation 2",
"shelf": "Shelf",
"shelf_dynamic": "Dynamic Shelf",
"cartpole": "Cartpole Length",
}
yscaling = {
"maze": 0.25,
"pointbot0": 0.5/5.0,
"pointbot1": 0.3/5.0,
"shelf": 0.15,
"shelf_dynamic": 0.2,
"cartpole": 0.15,
}
def plot_experiment(experiment): # 3000 for normal shelf...
max_eps = eps[experiment]
'''
fig, axs = plt.subplots(2, figsize=(16, 16))
axs[0].set_title(
"%s: Cumulative Constraint Violations vs. Episode" %
envname[experiment],
fontsize=30)
axs[0].set_ylim(-0.1, int(yscaling[experiment] * max_eps) + 1)
axs[0].set_xlabel("Episode", fontsize=24)
axs[0].set_ylabel("Cumulative Constraint Violations", fontsize=24)
axs[0].tick_params(axis='both', which='major', labelsize=21)
axs[1].set_title(
"%s: Cumulative Task Successes vs. Episode" % envname[experiment],
fontsize=30)
axs[1].set_ylim(0, int(max_eps) + 1)
axs[1].set_xlabel("Episode", fontsize=24)
axs[1].set_ylabel("Cumulative Task Successes", fontsize=24)
axs[1].tick_params(axis='both', which='major', labelsize=21)
plt.subplots_adjust(hspace=0.3)
'''
final_ratios_dict = {}
final_successes_means = []
# final_successes_errs = []
final_violations_means = []
# final_violations_errs = []
listDF = []
for alg in experiment_map[experiment]["algs"]:
print(alg)
exp_dirs = experiment_map[experiment]["algs"][alg]
exp_dirs = glob.glob(exp_dirs[0] + "/*/")
fnames = [osp.join(exp_dir, "run_stats.pkl") for exp_dir in exp_dirs]
task_successes_list = []
train_rewards_safe_list = []
train_violations_list = []
recovery_called_list = []
recovery_called_constraint_list = []
prop_viol_recovery_list = []
for fname in fnames:
with open(fname, "rb") as f:
data = pickle.load(f)
train_stats = data['train_stats']
train_violations = []
train_rewards = []
last_rewards = []
recovery_called = []
num_viols_recovery = []
num_viols_no_recovery = []
num_viols_recovery = 0
num_viols_no_recovery = 0
for traj_stats in train_stats:
train_violations.append([])
recovery_called.append([])
train_rewards.append(0)
last_reward = 0
for step_stats in traj_stats:
train_violations[-1].append(step_stats['constraint'])
# recovery_called[-1].append(step_stats['recovery'])
if "recovery" in alg:
# print("CONSTRANT", step_stats['constraint'])
recovery_viol = int(step_stats['recovery'] and step_stats['constraint'])
no_recovery_viol = int( (not step_stats['recovery']) and step_stats['constraint'])
num_viols_recovery += recovery_viol
num_viols_no_recovery += no_recovery_viol
train_rewards[-1] += step_stats['reward']
last_reward = step_stats['reward']
last_rewards.append(last_reward)
recovery_called = np.array([np.sum(t) > 0 for t in recovery_called])[:max_eps].astype(int)
ep_lengths = np.array([len(t) for t in train_violations])[:max_eps]
train_violations = np.array([np.sum(t) > 0 for t in train_violations])[:max_eps]
train_rewards = np.array(train_rewards)[:max_eps]
train_rewards_safe = train_rewards
train_rewards_safe[train_violations > 0] = np.nan
# print("TRAIN VIOLATIONS: ", train_violations)
# print("TRAIN REWARDS: ", train_rewards)
# print("TRAIN REWARDS SAFE: ", train_rewards_safe)
# print("TRAIN REWARDS SAFE: ", train_rewards_safe)
# assert(False)
recovery_called_constraint = np.bitwise_and(recovery_called, train_violations)
recovery_called = np.cumsum(recovery_called)
train_violations = np.cumsum(train_violations)
recovery_called_constraint = np.cumsum(recovery_called_constraint)
last_rewards = np.array(last_rewards)[:max_eps]
if 'maze' in experiment:
task_successes = (-last_rewards < 0.03).astype(int)
elif 'shelf' in experiment:
task_successes = (last_rewards == 0).astype(int)
elif "pointbot0" in experiment:
task_successes = (last_rewards > -4).astype(int)
else:
task_successes = (last_rewards > -4).astype(int)
task_successes = np.cumsum(task_successes)
task_successes_list.append(task_successes)
train_rewards_safe_list.append(train_rewards_safe)
train_violations_list.append(train_violations)
recovery_called_list.append(recovery_called)
recovery_called_constraint_list.append(recovery_called_constraint)
if not num_viols_no_recovery + num_viols_recovery == 0:
prop_viol_recovery_list.append(float(num_viols_recovery)/float(num_viols_no_recovery + num_viols_recovery))
else:
prop_viol_recovery_list.append(-1)
task_successes_list = np.array(task_successes_list)
train_violations_list = np.array(train_violations_list)
# Smooth out train rewards
for i in range(len(train_rewards_safe_list)):
train_rewards_safe_list[i] = moving_average(train_rewards_safe_list[i], 1)
train_rewards_safe_list = np.array(train_rewards_safe_list)
recovery_called_list = np.array(recovery_called_list)
recovery_called_constraint_list = np.array(recovery_called_constraint_list)
print("TASK SUCCESSES", task_successes_list.shape)
print("TRAIN VIOLS", train_violations_list.shape)
print("TRAIN RECOVERY", recovery_called_list.shape)
print("TRAIN RECOVERY CONSTRAINT", recovery_called_constraint_list.shape)
print("TRAIN REWARDS", train_rewards_safe_list.shape)
safe_ratios = (task_successes_list+1)/(train_violations_list+1)
final_ratio = safe_ratios.mean(axis=0)[-1]
final_successes = task_successes_list[:, -1]
final_violations = train_violations_list[:, -1]
final_success_mean = np.mean(final_successes)
final_success_err = np.std(final_successes)/np.sqrt(len(final_successes))
final_violation_mean = np.mean(final_violations)
final_violation_err = np.std(final_violations)/np.sqrt(len(final_violations))
final_successes_means.append(final_success_mean)
final_violations_means.append(final_violation_mean)
print("FINAL SUCCESSES", final_success_mean)
print("FINAL VIOLATIONS", final_violation_mean)
print("FINAL RATIO: ", final_ratio)
print("PROP VIOLS", experiment, prop_viol_recovery_list)
# if "recovery" in alg:
# assert(False)
final_ratios_dict[alg] = final_ratio
safe_ratios_mean, safe_ratios_lb, safe_ratios_ub = get_stats(safe_ratios)
ts_mean, ts_lb, ts_ub = get_stats(task_successes_list)
# print("TRAIN REWS: ", train_rewards_safe_list)
trew_mean, trew_lb, trew_ub = get_stats(train_rewards_safe_list)
# print("TRAIN REW MEAN: ", trew_mean)
tv_mean, tv_lb, tv_ub = get_stats(train_violations_list)
trec_mean, trec_lb, trec_ub = get_stats(recovery_called_list)
trec_constraint_mean, trec_constraint_lb, trec_constraint_ub = get_stats(recovery_called_constraint_list)
color = get_color(alg)
#import pdb; pdb.set_trace()
name_dict = {'multitask': "Multi-Task", 'meta': "MESA", 'sac_recovery_ddpg': "RRL", "sac_vanilla": "Unconstrained"}
num_runs = task_successes_list.shape[0]
len_episode = task_successes_list.shape[1]
'''
run_dict = {'Episode':np.array(list(range(len_episode))*num_runs),
'Violations':train_violations_list.flatten(),
'Task Successes': task_successes_list.flatten(),
#'Returns': train_rewards_safe_list.flatten(),
'Name': [name_dict[alg]]*len_episode*num_runs}
'''
run_dict = {'Episode':np.array(list(range(train_rewards_safe_list.shape[1]))*num_runs),
'Returns': train_rewards_safe_list.flatten(),
'Name': [name_dict[alg]]*train_rewards_safe_list.shape[1]*num_runs
}
import pdb; pdb.set_trace()
# Create DataFrame
df = pd.DataFrame(run_dict)
listDF.append(df)
'''
axs[0].fill_between(
range(tv_mean.shape[0]),
tv_ub,
tv_lb,
color=color,
alpha=.25,
label=get_legend_name(alg))
axs[0].plot(tv_mean, color=color)
axs[1].fill_between(
range(ts_mean.shape[0]), ts_ub, ts_lb, color=color, alpha=.25)
axs[1].plot(ts_mean, color=color, label=get_legend_name(alg))
'''
'''
axs[0].legend(loc="upper left", fontsize=20)
axs[1].legend(loc="upper left", fontsize=20)
plt.savefig(experiment_map[experiment]["outfile"], bbox_inches='tight')
plt.show()
'''
sns.set_theme()
final_df = pd.concat(listDF)
plt.ylim(-5, 60)
ax = sns.lineplot(data=final_df, x='Episode', y="Returns", hue="Name")
#ax = sns.lineplot(data=df, x='Episode', y="Violations", hue="Name")
ax.set_xlabel("Episode",fontsize=14)
#ax.set_ylabel("Task Successes",fontsize=14)
ax.set_ylabel("Returns",fontsize=14)
#ax.set_title("Cartpole-Length", fontsize=16)
ax.set_title("Navigation 2", fontsize=16)
ax.get_legend().remove()
#ax.legend(loc="lower left", ncol=len(df.columns))
plt.show()
if __name__ == '__main__':
experiment = "pointbot0"
plot_experiment(experiment)
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,565
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/make_legend.py
|
import scipy.io as sio
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.colors import colorConverter as cc
import pylab
import numpy as np
class LegendObject(object):
def __init__(self, facecolor='red', edgecolor='white', dashed=False):
self.facecolor = facecolor
self.edgecolor = edgecolor
self.dashed = dashed
def legend_artist(self, legend, orig_handle, fontsize, handlebox):
x0, y0 = handlebox.xdescent, handlebox.ydescent
width, height = handlebox.width, handlebox.height
patch = mpatches.Rectangle(
# create a rectangle that is filled with color
[x0, y0], width, height, facecolor=self.facecolor,
# and whose edges are the faded color
edgecolor=self.edgecolor, lw=3)
handlebox.add_artist(patch)
# if we're creating the legend for a dashed line,
# manually add the dash in to our rectangle
if self.dashed:
patch1 = mpatches.Rectangle(
[x0 + 2*width/5, y0], width/5, height, facecolor=self.edgecolor,
transform=handlebox.get_transform())
handlebox.add_artist(patch1)
return patch
figlegend = pylab.figure(figsize=(15.5,0.85))
bg = np.array([1, 1, 1]) # background of the legend is white
# colors = ["#7776bc", "#aef78e", "#8ff499", "#66a182", "#b7c335", "#be8d39"]
colors = ["#AA5D1F", "#BA2DC1", "#6C2896", "#D43827", "#4899C5", "#34539C"]
colors_faded = [(np.array(cc.to_rgb(color)) + bg) / 2.0 for color in colors]
figlegend.legend([0, 1, 2, 3, 4, 5],
['Unconstrained', 'LR', 'RSPO', 'SQRL', 'RP', 'RCPO'],
handler_map={
0: LegendObject(colors[0], colors_faded[0]),
1: LegendObject(colors[1], colors_faded[1]),
2: LegendObject(colors[2], colors_faded[2]),
3: LegendObject(colors[3], colors_faded[3]),
4: LegendObject(colors[4], colors_faded[4]),
5: LegendObject(colors[5], colors_faded[5]),
# 6: LegendObject(colors[6], colors_faded[6]),
# 7: LegendObject(colors[7], colors_faded[7]),
}, loc='lower right', fontsize=24, ncol=6)
figlegend.savefig('legend.png')
figlegend = pylab.figure(figsize=(14.4,0.85))
bg = np.array([1, 1, 1]) # background of the legend is white
# colors = ["#f88585", "#830404"]
colors = ["#60CC38", "#349C26"]
colors_faded = [(np.array(cc.to_rgb(color)) + bg) / 2.0 for color in colors]
figlegend.legend([0, 1],
['Ours: Recovery RL (MF Recovery)', 'Ours: Recovery RL (MB Recovery)'],
handler_map={
0: LegendObject(colors[0], colors_faded[0]),
1: LegendObject(colors[1], colors_faded[1]),
# 6: LegendObject(colors[6], colors_faded[6]),
# 7: LegendObject(colors[7], colors_faded[7]),
}, loc='lower right', fontsize=24, ncol=2)
figlegend.savefig('legend_ours.png')
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,566
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/env/reacher.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
# TARGET = np.array([0.13345871, 0.21923056, -0.10861196])
TARGET = np.array([0., 0., -0.])
THRESH = 0.07
HORIZON = 150
class ReacherSparse3DEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
self.viewer, self.time = None, 0
utils.EzPickle.__init__(self)
dir_path = os.path.dirname(os.path.realpath(__file__))
self.goal = np.copy(TARGET)
self._max_episode_steps = HORIZON
# self.obstacle = ReacherObstacle(np.array([0.5, 0.2, 0]), 0.15)
self.obstacle = ReacherEEObstacle(np.array([0.5, 0.2, 0]), 0.15)
self.transition_function = get_random_transitions
mujoco_env.MujocoEnv.__init__(
self, os.path.join(dir_path, 'assets/reacher3d.xml'), 2)
def step(self, a):
# a = self.process_action(a)
old_state = self._get_obs().copy()
# if not self.obstacle(self.get_EE_pos(old_state[None])):
self.do_simulation(a, self.frame_skip)
self.time += 1
ob = self._get_obs().copy()
obs_cost = np.sum(np.square(self.get_EE_pos(ob[None]) - self.goal))
ctrl_cost = 0.001 * np.square(a).sum()
cost = obs_cost + ctrl_cost
if obs_cost < THRESH:
cost = -10000 + (1e-5) * np.square(a).sum()
if obs_cost < THRESH:
print("goal", ctrl_cost, obs_cost, self.time)
done = HORIZON <= self.time
return ob, -cost, done, {
"constraint": self.obstacle(self.get_EE_pos(ob[None])),
"reward": -cost,
"state": old_state,
"next_state": ob,
"action": a
}
def process_action(self, action):
return action
def viewer_setup(self):
self.viewer.cam.trackbodyid = 1
self.viewer.cam.distance = 2.5
self.viewer.cam.elevation = -30
self.viewer.cam.azimuth = 270
def reset_model(self):
qpos, qvel = np.copy(self.init_qpos), np.copy(self.init_qvel)
# qpos[-3:] += np.random.normal(loc=0, scale=0.1, size=[3])
qvel[-3:] = 0
self.time = 0
# self.goal = qpos[-3:]
qpos[-3:] = self.goal = np.copy(TARGET)
self.set_state(qpos, qvel)
return self._get_obs()
def _get_obs(self):
return np.concatenate([
self.sim.data.qpos.flat,
self.sim.data.qvel.flat[:-3],
])
def get_EE_pos(self, states):
theta1, theta2, theta3, theta4, theta5, theta6, theta7 = \
states[:, :1], states[:, 1:2], states[:, 2:3], states[:, 3:4], states[:, 4:5], states[:, 5:6], states[:, 6:]
rot_axis = np.concatenate(
[
np.cos(theta2) * np.cos(theta1),
np.cos(theta2) * np.sin(theta1), -np.sin(theta2)
],
axis=1)
rot_perp_axis = np.concatenate(
[-np.sin(theta1),
np.cos(theta1),
np.zeros(theta1.shape)], axis=1)
cur_end = np.concatenate(
[
0.1 * np.cos(theta1) + 0.4 * np.cos(theta1) * np.cos(theta2),
0.1 * np.sin(theta1) + 0.4 * np.sin(theta1) * np.cos(theta2) -
0.188, -0.4 * np.sin(theta2)
],
axis=1)
for length, hinge, roll in [(0.321, theta4, theta3), (0.16828, theta6,
theta5)]:
perp_all_axis = np.cross(rot_axis, rot_perp_axis)
x = np.cos(hinge) * rot_axis
y = np.sin(hinge) * np.sin(roll) * rot_perp_axis
z = -np.sin(hinge) * np.cos(roll) * perp_all_axis
new_rot_axis = x + y + z
new_rot_perp_axis = np.cross(new_rot_axis, rot_axis)
new_rot_perp_axis[np.linalg.norm(new_rot_perp_axis, axis=1) < 1e-30] = \
rot_perp_axis[np.linalg.norm(new_rot_perp_axis, axis=1) < 1e-30]
new_rot_perp_axis /= np.linalg.norm(
new_rot_perp_axis, axis=1, keepdims=True)
rot_axis, rot_perp_axis, cur_end = new_rot_axis, new_rot_perp_axis, cur_end + length * new_rot_axis
return cur_end
def is_stable(self, ob):
return (np.sum(np.square(self.get_EE_pos(ob[None]) - self.goal)) <
THRESH).astype(bool)
def get_random_transitions(num_transitions, task_demos=False):
env = ReacherSparse3DEnv()
transitions = []
task_transitions = []
done = False
for i in range(num_transitions):
state = env.reset()
action = np.random.randn(7)
next_state, reward, done, info = env.step(action)
constraint = info['constraint']
transitions.append((state, action, reward, next_state, done))
if not task_demos:
return transitions
else:
return transitions, task_transitions
class ReacherEEObstacle:
def __init__(self, center=[0., 0, 0], radius=0.1):
self.center = np.array(center)
self.radius = radius
def __call__(self, x):
return np.linalg.norm(x - self.center) <= self.radius
class ReacherObstacle:
def __init__(self, center=[0., 0, 0], radius=0.1, arm_size=0.09,
penalty=1):
# spherical obstacle
# self.center = tf.convert_to_tensor(center, dtype=tf.dtypes.float32)
self.center = np.array(center)
self.radius = radius
self.collision_radius = radius + arm_size
def __call__(self, x):
x = x[:, :, :, :7]
x_reshaped = tf.reshape(x, (-1, 7))
bools = tf.zeros(shape[:1], dtype=tf.dtypes.bool)
points = self.reacher_points(x_reshaped)
for i in range(1, len(points)):
v1 = (points[i] - points[i - 1])[:, :3]
v2 = points[i - 1][:, :3] - self.center
v2_other = points[i][:, :3] - self.center
lambda_num = -tf.reduce_sum(tf.multiply(v1, v2), axis=1)
lambda_denom = tf.multiply(
tf.norm(v1, axis=1), tf.norm(v1, axis=1))
v3 = tf.cross(v1, v2)
shortest_dists = tf.norm(v3, axis=1) / tf.norm(v1, axis=1)
shortest_in_segment = tf.logical_and(lambda_num > 0,
lambda_num < lambda_denom)
actual_dist = tf.multiply(
tf.dtypes.cast(shortest_in_segment, tf.dtypes.float32),
shortest_dists) + tf.multiply(
tf.dtypes.cast(
tf.logical_not(shortest_in_segment),
tf.dtypes.float32),
tf.minimum(tf.norm(v2, axis=1), tf.norm(v2_other, axis=1)))
#bools = tf.logical_or(bools, curr_bools)
bools = tf.logical_or(bools, actual_dist < self.collision_radius)
#print(bools.numpy())
bools_reshaped = tf.dtypes.cast(
tf.reshape(bools,
tf.shape(x)[:3]), tf.dtypes.float32)
return bools_reshaped
@staticmethod
def reacher_points(state):
# state.shape should equal (-1, 7)
#import ipdb; ipdb.set_trace()
points = [[0, -0.188, 0, 1], [0.1, -0.188, 0, 1], [0.5, -0.188, 0, 1],
[0.821, -0.188, 0, 1], [1.021, -0.188, 0, 1]]
points = [[0, 0., 0, 1], [0, 0., 0, 1], [0, 0., 0, 1], [0, 0., 0, 1],
[0, 0., 0, 1]]
with tf.name_scope('p0'):
transform = TF_FK.translate(state[:, 0],
[0, -0.188, 0]) # (-1, 4, 4)
points[0] = tf.tensordot(
transform, points[0], axes=[[2], [0]]) # (-1, 4, 4) @ (4,)
with tf.name_scope('p1'):
transform = transform @ TF_FK.rot_z(state[:, 0])
transform = transform @ TF_FK.translate(state[:, 0], [0.1, 0, 0])
points[1] = tf.tensordot(
transform, points[1], axes=[[2], [0]]) # (-1, 4, 4) @ (4,)
with tf.name_scope('p2'):
transform = transform @ TF_FK.rot_y(state[:, 1])
transform = transform @ TF_FK.rot_x(state[:, 2])
transform = transform @ TF_FK.translate(state[:, 0], [0.4, 0, 0])
points[2] = tf.tensordot(
transform, points[2], axes=[[2], [0]]) # (-1, 4, 4) @ (4,)
with tf.name_scope('p3'):
transform = transform @ TF_FK.rot_y(state[:, 3])
transform = transform @ TF_FK.rot_x(state[:, 4])
transform = transform @ TF_FK.translate(state[:, 0], [0.321, 0, 0])
points[3] = tf.tensordot(
transform, points[3], axes=[[2], [0]]) # (-1, 4, 4) @ (4,)
with tf.name_scope('p4'):
transform = transform @ TF_FK.rot_y(state[:, 5])
transform = transform @ TF_FK.rot_x(state[:, 6])
transform = transform @ TF_FK.translate(state[:, 0], [0.2, 0, 0])
points[4] = tf.tensordot(
transform, points[4], axes=[[2], [0]]) # (-1, 4, 4) @ (4,)
return points
class TF_FK():
@staticmethod
def _transpose_correct(mat):
# turn (4x4x10000) to (10000x4x4)
return tf.transpose(mat, perm=[2, 0, 1])
@staticmethod
def rot_x(theta):
return TF_FK._transpose_correct(
tf.convert_to_tensor([[
tf.ones_like(theta),
tf.zeros_like(theta),
tf.zeros_like(theta),
tf.zeros_like(theta)
], [
tf.zeros_like(theta),
tf.cos(theta), -tf.sin(theta),
tf.zeros_like(theta)
], [
tf.zeros_like(theta),
tf.sin(theta),
tf.cos(theta),
tf.zeros_like(theta)
], [
tf.zeros_like(theta),
tf.zeros_like(theta),
tf.zeros_like(theta),
tf.ones_like(theta)
]]))
@staticmethod
def rot_y(theta):
return TF_FK._transpose_correct(
tf.convert_to_tensor([[
tf.cos(theta),
tf.zeros_like(theta),
tf.sin(theta),
tf.zeros_like(theta)
], [
tf.zeros_like(theta),
tf.ones_like(theta),
tf.zeros_like(theta),
tf.zeros_like(theta)
], [
-tf.sin(theta),
tf.zeros_like(theta),
tf.cos(theta),
tf.zeros_like(theta)
], [
tf.zeros_like(theta),
tf.zeros_like(theta),
tf.zeros_like(theta),
tf.ones_like(theta)
]]))
@staticmethod
def rot_z(theta):
return TF_FK._transpose_correct(
tf.convert_to_tensor([[
tf.cos(theta), -tf.sin(theta),
tf.zeros_like(theta),
tf.zeros_like(theta)
], [
tf.sin(theta),
tf.cos(theta),
tf.zeros_like(theta),
tf.zeros_like(theta)
], [
tf.zeros_like(theta),
tf.zeros_like(theta),
tf.ones_like(theta),
tf.zeros_like(theta)
], [
tf.zeros_like(theta),
tf.zeros_like(theta),
tf.zeros_like(theta),
tf.ones_like(theta)
]]))
@staticmethod
def translate(theta, amount):
return TF_FK._transpose_correct(
tf.convert_to_tensor([[
tf.ones_like(theta),
tf.zeros_like(theta),
tf.zeros_like(theta), amount[0] * tf.ones_like(theta)
], [
tf.zeros_like(theta),
tf.ones_like(theta),
tf.zeros_like(theta), amount[1] * tf.ones_like(theta)
], [
tf.zeros_like(theta),
tf.zeros_like(theta),
tf.ones_like(theta), amount[2] * tf.ones_like(theta)
], [
tf.zeros_like(theta),
tf.zeros_like(theta),
tf.zeros_like(theta),
tf.ones_like(theta)
]]))
@staticmethod
def translate_meta(amount):
return lambda theta: TF_FK.translate(theta, amount)
if __name__ == '__main__':
import time
env = ReacherSparse3DEnv()
env.reset()
# env.render()
for i in range(100):
state, rewards, done, info = env.step(np.random.randn(7))
env.render()
print(info['constraint'])
time.sleep(0.2)
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,567
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/env/push_env/push.py
|
import numpy as np
import os
from gym import utils
from gym.envs.robotics import fetch_env
# Ensure we get the path separator correct on windows
MODEL_XML_PATH = os.path.join(os.getcwd(), "assets", 'fetch', 'push.xml')
class FetchPushEnv(fetch_env.FetchEnv, utils.EzPickle):
def __init__(self, reward_type='dense'):
initial_qpos = {
'robot0:slide0': 0.405,
'robot0:slide1': 0.48,
'robot0:slide2': 0.0,
'object0:joint': [1.25, 0.53, 0.4, 1., 0., 0., 0.],
}
fetch_env.FetchEnv.__init__(
self, MODEL_XML_PATH, has_object=True, block_gripper=True, n_substeps=20,
gripper_extra_height=0.0, target_in_the_air=False, target_offset=0.0,
obj_range=0.15, target_range=0.15, distance_threshold=0.05,
initial_qpos=initial_qpos, reward_type=reward_type)
utils.EzPickle.__init__(self)
# env = gym.make('FetchPush-v1')
env = FetchPushEnv()
env.seed(9)
env.reset()
obs = env.render(mode='rgb_array')
cv2.imwrite('temp.jpg', 255*obs)
# import IPython; IPython.embed()
for _ in range(1000):
#env.render()
env.step(np.array([.1, 0, 0, 0])) # take a random action
env.close()
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,568
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/env/maze.py
|
import os
import pickle
import matplotlib.pyplot as plt
import os.path as osp
import numpy as np
from gym import Env
from gym import utils
from gym.spaces import Box
from mujoco_py import load_model_from_path, MjSim
from .maze_const import *
import cv2
def process_action(a):
return np.clip(a, -MAX_FORCE, MAX_FORCE)
def process_obs(obs):
im = np.transpose(obs, (2, 0, 1))
return im
def get_random_transitions(num_transitions,
images=False,
save_rollouts=False,
task_demos=False,
env_cls = None):
env = env_cls()
transitions = []
num_constraints = 0
total = 0
rollouts = []
for i in range(1 * num_transitions // 2):
if i % 20 == 0:
sample = np.random.uniform(0, 1, 1)[0]
if sample < 0.3: # maybe make 0.2 to 0.3
mode = 'e'
elif sample < 0.6:
mode = 'm'
else:
mode = 'h'
state = env.reset(mode, check_constraint=False, demos=True)
rollouts.append([])
if images:
im_state = env.sim.render(64, 64, camera_name="cam0")
im_state = process_obs(im_state)
action = env.action_space.sample()
next_state, reward, done, info = env.step(action)
if images:
im_next_state = env.sim.render(64, 64, camera_name="cam0")
im_next_state = process_obs(im_next_state)
constraint = info['constraint']
rollouts[-1].append((state, action, constraint, next_state, not done))
transitions.append((state, action, constraint, next_state, not done))
total += 1
num_constraints += int(constraint)
state = next_state
if images:
im_state = im_next_state
# if done:
# sample = np.random.uniform(0, 1, 1)[0]
# if sample < 0.2: # maybe make 0.2 to 0.3
# mode = 'e'
# elif sample < 0.4:
# mode = 'm'
# else:
# mode = 'h'
# state = env.reset(mode, check_constraint=False)
# rollouts.append([])
for i in range(1 * num_transitions // 2):
if i % 20 == 0:
sample = np.random.uniform(0, 1, 1)[0]
if sample < 0.3: # maybe make 0.2 to 0.3
mode = 'e'
elif sample < 0.6:
mode = 'm'
else:
mode = 'h'
state = env.reset(mode, check_constraint=False, demos=True)
rollouts.append([])
if images:
im_state = env.sim.render(64, 64, camera_name="cam0")
im_state = process_obs(im_state)
action = env.expert_action()
next_state, reward, done, info = env.step(action)
if images:
im_next_state = env.sim.render(64, 64, camera_name="cam0")
im_next_state = process_obs(im_next_state)
constraint = info['constraint']
rollouts[-1].append((state, action, constraint, next_state, not done))
transitions.append((state, action, constraint, next_state, not done))
total += 1
num_constraints += int(constraint)
state = next_state
if images:
im_state = im_next_state
# if done:
# sample = np.random.uniform(0, 1, 1)[0]
# if sample < 0.2: # maybe make 0.2 to 0.3
# mode = 'e'
# elif sample < 0.4:
# mode = 'm'
# else:
# mode = 'h'
# state = env.reset(mode, check_constraint=False)
# rollouts.append([])
print("data dist", total, num_constraints)
if save_rollouts:
return rollouts
else:
return transitions
class MazeNavigation(Env, utils.EzPickle):
def __init__(self, goal_cond=True, w1 = -0.2, w2 = 0.15):
utils.EzPickle.__init__(self)
self.hist = self.cost = self.done = self.time = self.state = None
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'simple_maze.xml')
self.sim = MjSim(load_model_from_path(filename))
self.horizon = HORIZON
self._max_episode_steps = self.horizon
self.transition_function = get_random_transitions
self.steps = 0
self.images = not GT_STATE
self.action_space = Box(-MAX_FORCE * np.ones(2),
MAX_FORCE * np.ones(2))
self.w1 = w1
self.w2 = w2
self.transition_function = get_random_transitions
self.goal_cond = goal_cond
self.reset()
obs = self._get_obs()
#obs = self._get_obs(images=True)
# print("OBS", obs.shape)
# print("OBS", np.max(obs), np.min(obs))
#cv2.imwrite('runs/maze.jpg', 255*obs)
#exit()
# assert(False)
self.dense_reward = DENSE_REWARD
if self.images:
self.observation_space = obs.shape
else:
self.observation_space = Box(-0.3, 0.3, shape=obs.shape)
self.gain = 1.05
self.goal = np.zeros((2, ))
# self.goal[0] = np.random.uniform(0.15, 0.27)
# self.goal[1] = np.random.uniform(-0.27, 0.27)
self.goal[0] = 0.25
self.goal[1] = 0
def step(self, action):
action = process_action(action)
self.sim.data.qvel[:] = 0
self.sim.data.ctrl[:] = action
cur_obs = self._get_obs()
constraint = int(self.sim.data.ncon > 3)
if not constraint:
for _ in range(500):
self.sim.step()
obs = self._get_obs()
self.sim.data.qvel[:] = 0
self.steps += 1
constraint = int(self.sim.data.ncon > 3)
self.done = self.steps >= self.horizon or constraint or (
self.get_distance_score() < GOAL_THRESH)
if not self.dense_reward:
reward = -(self.get_distance_score() > GOAL_THRESH).astype(float)
else:
reward = -self.get_distance_score()
# if self.get_distance_score() < GOAL_THRESH:
# reward += 10
info = {
"constraint": constraint,
"reward": reward,
"state": cur_obs,
"next_state": obs,
"action": action
}
return obs, reward, self.done, info
def _get_obs(self, images=False):
if images:
return self.sim.render(64, 64, camera_name="cam0")
#joint poisitions and velocities
state = np.concatenate(
[self.sim.data.qpos[:].copy(), self.sim.data.qvel[:].copy()])
if not self.images and not images:
if self.goal_cond:
return np.concatenate([state[:2], self.get_goal()], axis=0)
return state[:2] # State is just (x, y) now
#get images
ims = self.sim.render(64, 64, camera_name="cam0")
return ims / 255
def reset(self, difficulty='h', check_constraint=True, demos=False,
pos=()):
if len(pos):
self.sim.data.qpos[0] = pos[0]
self.sim.data.qpos[1] = pos[1]
else:
if difficulty is None:
self.sim.data.qpos[0] = np.random.uniform(-0.27, 0.27)
elif difficulty == 'e':
self.sim.data.qpos[0] = np.random.uniform(0.14, 0.22)
elif difficulty == 'm':
self.sim.data.qpos[0] = np.random.uniform(-0.04, 0.04)
elif difficulty == 'h':
self.sim.data.qpos[0] = np.random.uniform(-0.22, -0.13)
self.sim.data.qpos[1] = np.random.uniform(-0.22, 0.22)
self.steps = 0
# self.sim.data.qpos[0] = 0.25
# self.sim.data.qpos[1] = 0
# print(self._get_obs())
# print("GOT HERE")
# assert(False)
# Randomize wal positions
#w1 = -0.08#-0.2#-0.08 #np.random.uniform(-0.1, 0.1)
#w2 = 0.08#0.15#0.08 #np.random.uniform(-0.1, 0.1)
#self.w1 = w1
#self.w2 = w2
# print(self.sim.model.geom_pos[:])
# print(self.sim.model.geom_pos[:].shape)
self.sim.model.geom_pos[5, 1] = 0.5 + self.w1
self.sim.model.geom_pos[7, 1] = -0.25 + self.w1
self.sim.model.geom_pos[6, 1] = 0.4 + self.w2
self.sim.model.geom_pos[8, 1] = -0.25 + self.w2
self.sim.forward()
# print("RESET!", self._get_obs())
constraint = int(self.sim.data.ncon > 3)
if constraint and check_constraint:
if not len(pos):
self.reset(difficulty)
# # self.render()
# im = self.sim.render(64, 64, camera_name= "cam0")
# print('aaa',self.sim.data.ncon, self.sim.data.qpos, im.sum())
# plt.imshow(im)
# plt.show()
# plt.pause(0.1)
# assert 0
return self._get_obs()
def get_distance_score(self):
"""
:return: mean of the distances between all objects and goals
"""
d = np.sqrt(np.mean((self.goal - self.sim.data.qpos[:])**2))
return d
# TODO: implement noise_std, demo_quality, right now these are ignored
def expert_action(self, noise_std=0, demo_quality='high'):
st = self.sim.data.qpos[:]
# print(st)
if st[0] <= -0.151:
delt = (np.array([-0.15, -0.125]) - st)
elif st[0] <= 0.149:
delt = (np.array([0.15, 0.125]) - st)
# elif st[1] < 0.25:
# delt = (np.array([0.25, 0]) - st)
else:
delt = (np.array([self.goal[0], self.goal[1]]) - st)
act = self.gain * delt
return act
def get_goal(self):
return np.array([self.w1, self.w2])
class MazeTeacher(object):
def __init__(self):
self.env = MazeNavigation()
self.demonstrations = []
self.default_noise = 0
# all get_rollout functions for all envs should have a noise parameter
def get_rollout(self, noise_param_in=None, mode="eps_greedy"):
if mode == "eps_greedy":
if noise_param_in is None:
noise_param = 0
else:
noise_param = noise_param_in
elif mode == "gaussian_noise":
if noise_param_in is None:
noise_param = 0
else:
noise_param = noise_param_in
obs = self.env.reset(difficulty='h')
O, A, cost_sum, costs = [obs], [], 0, []
constraints_violated = 0
noise_idx = np.random.randint(int(2 * HORIZON / 4))
for i in range(HORIZON):
action = self.env.expert_action()
if i < noise_idx:
if mode == "eps_greedy":
assert (noise_param <= 1)
if np.random.random() < noise_param:
action = self.env.action_space.sample()
else:
if np.random.random() < self.default_noise:
action = self.env.action_space.sample()
elif mode == "gaussian_noise":
action = (np.array(action) + np.random.normal(
0, noise_param + self.default_noise,
self.env.action_space.shape[0])).tolist()
else:
print("Invalid Mode!")
assert (False)
A.append(action)
obs, cost, done, info = self.env.step(action)
print("CON", info['constraint'])
print("STATE", obs)
print("DONE", done)
constraints_violated += info['constraint']
O.append(obs)
cost_sum += cost
costs.append(cost)
if done:
break
values = np.cumsum(costs[::-1])[::-1]
print(cost_sum)
print(len(O))
print("CONSTRAINTS: ", constraints_violated)
if int(cost_sum) == -HORIZON:
print("FAILED")
# return self.get_rollout(noise_param_in)
cv2.imwrite('maze.jpg', 255 * obs)
assert (False)
print("obs", O)
return {
"obs": np.array(O),
"noise": noise_param,
"actions": np.array(A),
"reward_sum": -cost_sum,
"rewards": -np.array(costs),
"values": -np.array(values)
}
if __name__ == "__main__":
teacher = MazeTeacher()
reward_sum_completed = []
constraint_sat = 0
for i in range(1000):
rollout_stats = teacher.get_rollout()
print("Iter: ", i)
print(rollout_stats['reward_sum'])
print(len(rollout_stats['rewards']))
ep_len = len(rollout_stats['rewards'])
diff = HORIZON - ep_len
if ep_len == HORIZON:
constraint_sat += 1
reward_sum_completed.append(rollout_stats['reward_sum'] +
diff * rollout_stats['rewards'][-1])
print("completed reward sum", np.mean(reward_sum_completed),
np.std(reward_sum_completed), constraint_sat)
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,569
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/gen_pointbot0_demos.py
|
from env.simplepointbot0 import SimplePointBot, SimplePointBotTeacher
import numpy as np
import pickle
def get_random_transitions_pointbot0(w1,
w2,
discount,
num_transitions,
task_demos=False,
save_rollouts=False):
env = SimplePointBot(w1 = w1, w2 = w2)
transitions = []
rollouts = []
step = 0
done = True
while True:
if done:
if len(rollouts):
mc_reward =0
for transition in rollouts[::-1]:
mc_reward = transition[2] + discount * mc_reward
transition.append(mc_reward)
transitions.extend(rollouts)
if len(transitions) > num_transitions:
break
# Reset
if np.random.uniform(0, 1) < 0.5:
state = np.array(
[np.random.uniform(-80, 50),
np.random.uniform(-5, -2)])
else:
state = np.array(
[np.random.uniform(-80, 50),
np.random.uniform(2, 5)])
rollouts = []
action = np.clip(np.random.randn(2), -1, 1)
next_state = env._next_state(state, action, override=True)
constraint = env.obstacle(next_state)
done = len(rollouts)==10 or constraint
reward = env.step_cost(state, action)
rollouts.append([state, action, constraint, next_state,
not constraint])
state = next_state
return transitions
if __name__ == '__main__':
counter =0
num_constraint_transitions = 30000
for i in range(-2, 3):
for j in range(-2, 3):
if i==0 and j==0:
continue
constraint_demo_data = get_random_transitions_pointbot0(w1=0, w2=0, discount=0.8, num_transitions = num_constraint_transitions)
num_constraint_transitions = 0
num_constraint_violations = 0
for transition in constraint_demo_data:
num_constraint_violations += int(transition[2])
num_constraint_transitions += 1
print("Number of Constraint Transitions: ",
num_constraint_transitions)
print("Number of Constraint Violations: ",
num_constraint_violations)
with open("demos/pointbot0_dynamics/constraint_demos_" + str(counter) + ".pkl", 'wb') as handle:
pickle.dump(constraint_demo_data, handle)
print(counter)
counter+=1
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,570
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/sac.py
|
'''
Built on on SAC implementation from
https://github.com/pranz24/pytorch-soft-actor-critic
'''
import os
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from PIL import Image
import os.path as osp
import numpy as np
import torch
import torch.nn.functional as F
from torch.optim import Adam
from utils import soft_update, hard_update
from model import GaussianPolicy, QNetwork, DeterministicPolicy, QNetworkCNN, GaussianPolicyCNN, QNetworkConstraint, QNetworkConstraintCNN, DeterministicPolicyCNN, StochasticPolicy
from dotmap import DotMap
from constraint import ValueFunction
import cv2
from run_multitask import MAMLRAWR
def process_obs(obs):
im = np.transpose(obs, (2, 0, 1))
return im
class QSafeWrapper:
def __init__(self, obs_space, ac_space, hidden_size, logdir, action_space,
args, tmp_env):
self.env_name = args.env_name
self.goal = args.goal
self.logdir = logdir
self.device = torch.device("cuda" if args.cuda else "cpu")
self.ac_space = ac_space
self.images = args.cnn
self.encoding = args.vismpc_recovery
if not self.images:
self.safety_critic = QNetworkConstraint(
obs_space.shape[0], ac_space.shape[0],
hidden_size).to(device=self.device)
self.safety_critic_target = QNetworkConstraint(
obs_space.shape[0], ac_space.shape[0],
args.hidden_size).to(device=self.device)
else:
if self.encoding:
self.safety_critic = QNetworkConstraint(
hidden_size, ac_space.shape[0],
hidden_size).to(device=self.device)
self.safety_critic_target = QNetworkConstraint(
hidden_size, ac_space.shape[0],
args.hidden_size).to(device=self.device)
else:
self.safety_critic = QNetworkConstraintCNN(
obs_space, ac_space.shape[0], hidden_size,
args.env_name).to(self.device)
self.safety_critic_target = QNetworkConstraintCNN(
obs_space, ac_space.shape[0], hidden_size,
args.env_name).to(self.device)
self.awr = False
import os
try:
os.makedirs(logdir + "/right")
os.makedirs(logdir + "/left")
os.makedirs(logdir + "/up")
os.makedirs(logdir + "/down")
except OSError as e:
if e.errno != errno.EEXIST:
raise
self.lr = args.lr
self.safety_critic_optim = Adam(
self.safety_critic.parameters(), lr=args.lr)
hard_update(self.safety_critic_target, self.safety_critic)
self.tau = args.tau_safe
self.gamma_safe = args.gamma_safe
self.updates = 0
self.target_update_interval = args.target_update_interval
self.torchify = lambda x: torch.FloatTensor(x).to(self.device)
if not self.images:
self.policy = StochasticPolicy(obs_space.shape[0],
ac_space.shape[0], hidden_size,
action_space).to(self.device)
else:
self.policy = DeterministicPolicyCNN(obs_space, ac_space.shape[0],
hidden_size, args.env_name,
action_space).to(self.device)
self.policy_optim = Adam(self.policy.parameters(), lr=args.lr)
self.pos_fraction = args.pos_fraction if args.pos_fraction >= 0 else None
self.ddpg_recovery = args.ddpg_recovery
self.Q_sampling_recovery = args.Q_sampling_recovery
self.tmp_env = tmp_env
self.lagrangian_recovery = args.lagrangian_recovery
self.recovery_lambda = args.recovery_lambda
self.eps_safe = args.eps_safe
self.alpha = args.alpha
if args.env_name in ['maze', 'maze_1', 'maze_2', 'maze_3', 'maze_4', 'maze_5', 'maze_6']:
self.tmp_env.reset(pos=(12, 12))
def update_parameters(self,
ep=None,
memory=None,
policy=None,
critic=None,
lr=None,
batch_size=None,
training_iterations=3000,
plot=1):
# TODO: cleanup this is hardcoded for maze
#state_batch, action_batch, constraint_batch, next_state_batch, mask_batch, mc_reward_batch = memory.sample(
#batch_size=min(batch_size, len(memory)),
#pos_fraction=self.pos_fraction)
state_batch, action_batch, constraint_batch, next_state_batch, mask_batch, mc_reward_batch = memory.sample(
batch_size=min(batch_size, len(memory)),
pos_fraction=self.pos_fraction)
state_batch = torch.FloatTensor(state_batch).to(self.device)
next_state_batch = torch.FloatTensor(next_state_batch).to(self.device)
action_batch = torch.FloatTensor(action_batch).to(self.device)
mask_batch = torch.FloatTensor(mask_batch).to(self.device).unsqueeze(1)
constraint_batch = torch.FloatTensor(constraint_batch).to(
self.device).unsqueeze(1)
mc_reward_batch = torch.FloatTensor(mc_reward_batch).to(
self.device).unsqueeze(1)
if self.encoding:
state_batch_enc = self.encoder(state_batch)
next_state_batch_enc = self.encoder(next_state_batch)
if not self.awr:
with torch.no_grad():
next_state_action, next_state_log_pi, _ = policy.sample(
next_state_batch)
if self.encoding:
qf1_next_target, qf2_next_target = self.safety_critic_target(
next_state_batch_enc, next_state_action)
else:
qf1_next_target, qf2_next_target = self.safety_critic(
next_state_batch, next_state_action)
min_qf_next_target = torch.max(qf1_next_target, qf2_next_target)
next_q_value = constraint_batch + mask_batch * self.gamma_safe * (
min_qf_next_target)
# qf1, qf2 = self.safety_critic(state_batch, policy.sample(state_batch)[0]) # Two Q-functions to mitigate positive bias in the policy improvement step
if self.encoding:
qf1, qf2 = self.safety_critic(
state_batch_enc, action_batch
) # Two Q-functions to mitigate positive bias in the policy improvement step
else:
qf1, qf2 = self.safety_critic(
state_batch, action_batch
) # Two Q-functions to mitigate positive bias in the policy improvement step
qf1_loss = F.mse_loss(
qf1, next_q_value
) # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
qf2_loss = F.mse_loss(
qf2, next_q_value
) # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
self.safety_critic_optim.zero_grad()
(qf1_loss + qf2_loss).backward()
self.safety_critic_optim.step()
else:
qf1, qf2 = self.safety_critic(
state_batch, action_batch
)
qf_loss = F.mse_loss(qf1, mc_reward_batch) + F.mse_loss(qf2, mc_reward_batch)
self.safety_critic_optim.zero_grad()
qf_loss.backward()
self.safety_critic_optim.step()
if self.ddpg_recovery:
pi, log_pi, _ = self.policy.sample(state_batch)
qf1_pi, qf2_pi = self.safety_critic(state_batch, pi)
max_sqf_pi = torch.max(qf1_pi, qf2_pi)
if self.lagrangian_recovery:
assert critic is not None
pi, log_pi, _ = policy.sample(state_batch)
qf1_pi, qf2_pi = critic(state_batch, pi)
min_qf_pi = torch.min(qf1_pi, qf2_pi)
policy_loss = (
self.recovery_lambda * (max_sqf_pi - self.eps_safe) -
min_qf_pi
).mean(
) # Jπ = 𝔼st∼D,εt∼N[α * logπ(f(εt;st)|st) − Q(st,f(εt;st))]
else:
# Ignore AWR doesn't work with Recovery RL
if self.awr:
with torch.no_grad():
advantages = (mc_reward_batch - qf1).squeeze(-1)
normalized_advantages = (1/0.333333)*(advantages - advantages.mean())/advantages.std()
normalized_advantages = - normalized_advantages
weights = advantages.clamp(max=np.log(20.0)).exp()
cur_dist = self.policy(state_batch)
action_log_probs = cur_dist.log_prob(action_batch).sum(-1)
policy_loss = -(action_log_probs * weights).mean()
else:
policy_loss = max_sqf_pi.mean()
self.policy_optim.zero_grad()
policy_loss.backward()
self.policy_optim.step()
if self.updates % self.target_update_interval == 0:
soft_update(self.safety_critic_target, self.safety_critic,
self.tau)
self.updates += 1
plot_interval = 100
if self.env_name == 'image_maze':
plot_interval = 29000
if plot and self.updates % plot_interval == 0:
if self.env_name in ['simplepointbot0', 'simplepointbot1', 'maze', 'maze_1', 'maze_2', 'maze_3', 'maze_4', 'maze_5', 'maze_6']:
self.plot(policy, self.updates, [.1, 0], "right", folder_prefix="/right/")
self.plot(policy, self.updates, [-.1, 0], "left", folder_prefix="/left/")
self.plot(policy, self.updates, [0, .1], "down", folder_prefix="/down/")
self.plot(policy, self.updates, [0, -.1], "up", folder_prefix="/up/")
elif self.env_name == 'image_maze':
self.plot(policy, self.updates, [.3, 0], "right")
self.plot(policy, self.updates, [-.3, 0], "left")
self.plot(policy, self.updates, [0, .3], "up")
self.plot(policy, self.updates, [0, -.3], "down")
else:
return
raise NotImplementedError("Unsupported environment for plotting")
def get_value(self, states, actions, encoded=False):
with torch.no_grad():
if self.encoding and not encoded:
q1, q2 = self.safety_critic(self.encoder(states), actions)
else:
q1, q2 = self.safety_critic(states, actions)
return torch.max(q1, q2)
def select_action(self, state, eval=False):
state = torch.FloatTensor(state).to(self.device).unsqueeze(0)
if self.ddpg_recovery:
if eval is False:
action, _, _ = self.policy.sample(state)
else:
_, _, action = self.policy.sample(state)
return action.detach().cpu().numpy()[0]
elif self.Q_sampling_recovery:
if not self.images:
state_batch = state.repeat(1000, 1)
else:
state_batch = state.repeat(1000, 1, 1, 1)
sampled_actions = torch.FloatTensor(
np.array([self.ac_space.sample() for _ in range(1000)])).to(
self.device)
q_vals = self.get_value(state_batch, sampled_actions)
min_q_value_idx = torch.argmin(q_vals)
action = sampled_actions[min_q_value_idx]
return action.detach().cpu().numpy()
else:
assert False
def plot(self, pi, ep, action=None, suffix="", folder_prefix="", critic=None):
env = self.tmp_env
if self.env_name in ['maze', 'maze_1', 'maze_2', 'maze_3', 'maze_4', 'maze_5', 'maze_6']:
x_bounds = [-0.3, 0.3]
y_bounds = [-0.3, 0.3]
elif self.env_name == 'simplepointbot0':
x_bounds = [-80, 20]
y_bounds = [-10, 10]
elif self.env_name == 'simplepointbot1':
x_bounds = [-75, 25]
y_bounds = [-20, 20]
elif self.env_name == 'image_maze':
x_bounds = [-0.05, 0.25]
y_bounds = [-0.05, 0.25]
else:
raise NotImplementedError("Plotting unsupported for this env")
states = []
x_pts = 100
y_pts = int(
x_pts * (x_bounds[1] - x_bounds[0]) / (y_bounds[1] - y_bounds[0]))
for x in np.linspace(x_bounds[0], x_bounds[1], y_pts):
for y in np.linspace(y_bounds[0], y_bounds[1], x_pts):
if self.env_name == 'image_maze':
env.reset(pos=(x, y))
obs = process_obs(env._get_obs(images=True))
states.append(obs)
else:
states.append([x, y])
num_states = len(states)
if not self.encoding and self.env_name=='maze':
states = np.array(states)
goal_state = self.tmp_env.get_goal()
batch_size = states.shape[0]
goal_states = np.tile(goal_state, (batch_size, 1))
states = np.concatenate([states, goal_states], axis=1)
states = self.torchify(states)
else:
states = self.torchify(np.array(states))
actions = self.torchify(np.tile(action, (len(states), 1)))
# if ep > 0:
# actions = pi(states)
# else:
# actions = self.torchify(np.array([self.action_space.sample() for _ in range(num_states)]))
if critic is None:
if self.encoding:
qf1, qf2 = self.safety_critic(self.encoder(states), actions)
else:
qf1, qf2 = self.safety_critic(states, actions)
max_qf = torch.max(qf1, qf2)
grid = max_qf.detach().cpu().numpy()
grid = grid.reshape(y_pts, x_pts)
if self.env_name == 'simplepointbot0':
plt.gca().add_patch(
Rectangle(
(0, 25),
500,
50,
linewidth=1,
edgecolor='r',
facecolor='none'))
elif self.env_name == 'simplepointbot1':
plt.gca().add_patch(
Rectangle(
(112.5, 31.25),
10*2.5,
15*2.5,
linewidth=1,
edgecolor='r',
facecolor='none'))
if self.env_name in ['maze', 'maze_1', 'maze_2', 'maze_3', 'maze_4', 'maze_5', 'maze_6']:
fig, ax = plt.subplots()
cmap = plt.get_cmap('jet', 10)
background = cv2.resize(env._get_obs(images=True), (x_pts, y_pts))
plt.imshow(background)
im = ax.imshow(grid.T, alpha=0.6, cmap=cmap, vmin=0.0, vmax=1.0)
cbar = fig.colorbar(im, ax=ax)
else:
plt.imshow(grid.T)
log_string = self.logdir + "/" + folder_prefix + "qvalue_" + str(ep) + suffix
plt.savefig(
log_string,
bbox_inches='tight')
def __call__(self, states, actions):
if self.encoding:
return self.safety_critic(self.encoder(states), actions)
else:
return self.safety_critic(states, actions)
class SAC(object):
def __init__(self,
observation_space,
action_space,
args,
logdir,
im_shape=None,
tmp_env=None):
self.gamma = args.gamma
self.tau = args.tau
self.alpha = args.alpha
self.env_name = args.env_name
self.logdir = logdir
self.gamma_safe = args.gamma_safe
self.policy_type = args.policy
self.target_update_interval = args.target_update_interval
self.automatic_entropy_tuning = args.automatic_entropy_tuning
self.torchify = lambda x: torch.FloatTensor(x).to(self.device)
self.device = torch.device("cuda" if args.cuda else "cpu")
if not args.cnn:
self.V_safe = ValueFunction(
DotMap(
gamma_safe=self.gamma_safe,
device=self.device,
state_dim=observation_space.shape[0],
hidden_size=200,
tau_safe=args.tau_safe,
use_target=args.use_target_safe,
logdir=logdir,
env_name=args.env_name,
opt=args.opt_value,
pred_time=args.pred_time))
self.cnn = args.cnn
# self.Q_safe = QFunction(DotMap(gamma_safe=self.gamma_safe,
# device=self.device,
# state_dim=observation_space.shape[0],
# ac_space=action_space,
# hidden_size=200,
# logdir=logdir,
# env_name=args.env_name,
# opt=args.opt_value,
# tau=args.tau_safe))
# TODO; cleanup for now this is hard-coded for maze
if im_shape:
observation_space = im_shape
if args.cnn:
self.critic = QNetworkCNN(observation_space, action_space.shape[0],
args.hidden_size,
args.env_name).to(device=self.device)
else:
self.critic = QNetwork(observation_space.shape[0],
action_space.shape[0],
args.hidden_size).to(device=self.device)
self.critic_optim = Adam(self.critic.parameters(), lr=args.lr)
if args.cnn:
self.critic_target = QNetworkCNN(
observation_space, action_space.shape[0], args.hidden_size,
args.env_name).to(device=self.device)
else:
self.critic_target = QNetwork(
observation_space.shape[0], action_space.shape[0],
args.hidden_size).to(device=self.device)
self.DGD_constraints = args.DGD_constraints
self.nu = args.nu
self.update_nu = args.update_nu
self.cnn = args.cnn
self.eps_safe = args.eps_safe
self.use_constraint_sampling = args.use_constraint_sampling
self.log_nu = torch.tensor(
np.log(self.nu), requires_grad=True, device=self.device)
self.nu_optim = Adam([self.log_nu], lr=0.1 * args.lr)
self.RCPO = args.RCPO
self.lambda_RCPO = args.lambda_RCPO
self.log_lambda_RCPO = torch.tensor(
np.log(self.lambda_RCPO), requires_grad=True, device=self.device)
self.lambda_RCPO_optim = Adam(
[self.log_lambda_RCPO],
lr=0.1 * args.lr) # Make lambda updated slower than other things
hard_update(self.critic_target, self.critic)
if self.policy_type == "Gaussian":
# Target Entropy = −dim(A) (e.g. , -6 for HalfCheetah-v2) as given in the paper
if self.automatic_entropy_tuning is True:
self.target_entropy = -torch.prod(
torch.Tensor(action_space.shape).to(self.device)).item()
self.log_alpha = torch.zeros(
1, requires_grad=True, device=self.device)
self.alpha_optim = Adam([self.log_alpha], lr=args.lr)
if args.cnn:
self.policy = GaussianPolicyCNN(
observation_space, action_space.shape[0], args.hidden_size,
args.env_name, action_space).to(self.device)
else:
self.policy = GaussianPolicy(
observation_space.shape[0], action_space.shape[0],
args.hidden_size, action_space).to(self.device)
self.policy_optim = Adam(self.policy.parameters(), lr=args.lr)
else:
self.alpha = 0
self.automatic_entropy_tuning = False
assert not args.cnn
self.policy = DeterministicPolicy(
observation_space.shape[0], action_space.shape[0],
args.hidden_size, action_space).to(self.device)
self.policy_optim = Adam(self.policy.parameters(), lr=args.lr)
if args.use_value:
self.safety_critic = self.V_safe
else:
if args.meta:
self.Q_safe = MAMLRAWR(
observation_space,
action_space,
args.hidden_size,
logdir,
action_space,
args,
tmp_env=tmp_env)
else:
self.Q_safe = QSafeWrapper(
observation_space,
action_space,
args.hidden_size,
logdir,
action_space,
args,
tmp_env=tmp_env)
self.safety_critic = self.Q_safe
def plot(self, ep, action, suffix):
if self.env_name == 'reacher':
x_bounds = np.array([0.03, 0.13]) * 100
y_bounds = np.array([0.03, 0.13]) * 100
states = []
x_pts = 100
y_pts = int(x_pts * (x_bounds[1] - x_bounds[0]) /
(y_bounds[1] - y_bounds[0]))
for x in np.linspace(x_bounds[0], x_bounds[1], y_pts):
for y in np.linspace(y_bounds[0], y_bounds[1], x_pts):
states.append([x, y, -0.13 * 100])
num_states = len(states)
states = self.torchify(np.array(states))
actions = self.torchify(np.tile(action, (len(states), 1)))
# if ep > 0:
# actions = pi(states)
# else:
# actions = self.torchify(np.array([self.action_space.sample() for _ in range(num_states)]))
qf1, qf2 = self.critic(states, actions)
max_qf = torch.min(qf1, qf2)
grid = max_qf.detach().cpu().numpy()
grid = grid.reshape(y_pts, x_pts)
plt.imshow(grid.T)
plt.savefig(osp.join(self.logdir, "qvalue_" + str(ep) + suffix))
def select_action(self, state, eval=False):
state = torch.FloatTensor(state).to(self.device).unsqueeze(0)
self.safe_samples = 100
if self.use_constraint_sampling:
if not self.cnn:
state_batch = state.repeat(self.safe_samples, 1)
else:
state_batch = state.repeat(self.safe_samples, 1, 1, 1)
pi, log_pi, _ = self.policy.sample(state_batch)
max_qf_constraint_pi = self.safety_critic.get_value(
state_batch, pi)
# Threshold with epsilon safe and get idxs and apply to both pi and max_qf_constraint_pi, if empty state
thresh_idxs = (max_qf_constraint_pi <=
self.eps_safe).nonzero()[:, 0]
# Note: these are auto-normalized
thresh_probs = torch.exp(log_pi[thresh_idxs])
thresh_probs = thresh_probs.flatten()
if list(thresh_probs.size())[0] == 0:
min_q_value_idx = torch.argmin(max_qf_constraint_pi)
action = pi[min_q_value_idx, :].unsqueeze(0)
else:
prob_dist = torch.distributions.Categorical(thresh_probs)
sampled_idx = prob_dist.sample()
action = pi[sampled_idx, :].unsqueeze(0)
else:
if eval is False:
action, _, _ = self.policy.sample(state)
else:
_, _, action = self.policy.sample(state)
return action.detach().cpu().numpy()[0]
def train_safety_critic(self,
ep,
memory,
pi,
lr=0.0003,
batch_size=1000,
training_iterations=3000,
plot=False):
# TODO: cleanup this is hardcoded for maze
if self.env_name in ['maze', 'maze_1', 'maze_2', 'maze_3', 'maze_4', 'maze_5', 'maze_6']:
lr = 1e-3
self.safety_critic.train(ep, memory, pi, lr, batch_size,
training_iterations, plot)
def policy_sample(self, states):
actions, _, _ = self.policy.sample(states)
return actions
def get_critic_value(self, states, actions):
with torch.no_grad():
q1, q2 = self.critic(states, actions)
return torch.max(q1, q2).detach().cpu().numpy()
def update_parameters(self,
memory,
batch_size,
updates,
nu=None,
safety_critic=None):
if nu is None:
nu = self.nu
# Sample a batch from memory
state_batch, action_batch, reward_batch, next_state_batch, mask_batch = memory.sample(
batch_size=batch_size)
state_batch = torch.FloatTensor(state_batch).to(self.device)
next_state_batch = torch.FloatTensor(next_state_batch).to(self.device)
action_batch = torch.FloatTensor(action_batch).to(self.device)
reward_batch = torch.FloatTensor(reward_batch).to(
self.device).unsqueeze(1)
mask_batch = torch.FloatTensor(mask_batch).to(self.device).unsqueeze(1)
with torch.no_grad():
next_state_action, next_state_log_pi, _ = self.policy.sample(
next_state_batch)
qf1_next_target, qf2_next_target = self.critic_target(
next_state_batch, next_state_action)
min_qf_next_target = torch.min(
qf1_next_target,
qf2_next_target) - self.alpha * next_state_log_pi
next_q_value = reward_batch + mask_batch * self.gamma * (
min_qf_next_target)
if self.RCPO:
qsafe_batch = torch.max(
*safety_critic(state_batch, action_batch))
assert safety_critic is not None
next_q_value -= self.lambda_RCPO * qsafe_batch
qf1, qf2 = self.critic(
state_batch, action_batch
) # Two Q-functions to mitigate positive bias in the policy improvement step
qf1_loss = F.mse_loss(
qf1, next_q_value
) # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
qf2_loss = F.mse_loss(
qf2, next_q_value
) # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
pi, log_pi, _ = self.policy.sample(state_batch)
qf1_pi, qf2_pi = self.critic(state_batch, pi)
min_qf_pi = torch.min(qf1_pi, qf2_pi)
sqf1_pi, sqf2_pi = self.safety_critic(state_batch, pi)
max_sqf_pi = torch.max(sqf1_pi, sqf2_pi)
if self.DGD_constraints:
policy_loss = (
(self.alpha * log_pi) + nu * (max_sqf_pi - self.eps_safe) -
1. * min_qf_pi
).mean() # Jπ = 𝔼st∼D,εt∼N[α * logπ(f(εt;st)|st) − Q(st,f(εt;st))]
else:
policy_loss = ((self.alpha * log_pi) - min_qf_pi).mean(
) # Jπ = 𝔼st∼D,εt∼N[α * logπ(f(εt;st)|st) − Q(st,f(εt;st))]
self.critic_optim.zero_grad()
(qf1_loss + qf2_loss).backward()
self.critic_optim.step()
self.policy_optim.zero_grad()
policy_loss.backward()
self.policy_optim.step()
if self.automatic_entropy_tuning:
alpha_loss = -(self.log_alpha *
(log_pi + self.target_entropy).detach()).mean()
self.alpha_optim.zero_grad()
alpha_loss.backward()
self.alpha_optim.step()
self.alpha = self.log_alpha.exp()
alpha_tlogs = self.alpha.clone() # For TensorboardX logs
else:
alpha_loss = torch.tensor(0.).to(self.device)
alpha_tlogs = torch.tensor(self.alpha) # For TensorboardX logs
# Optimize nu
if self.update_nu:
nu_loss = (
self.log_nu * (self.eps_safe - max_sqf_pi).detach()
).mean(
) # TODO: used log trick here too, just like alpha case, need to understand why this is done.
self.nu_optim.zero_grad()
nu_loss.backward()
self.nu_optim.step()
self.nu = self.log_nu.exp()
# Optimize lambda
if self.RCPO:
lambda_RCPO_loss = (
self.log_lambda_RCPO * (self.eps_safe - qsafe_batch).detach()
).mean(
) # TODO: used log trick here too, just like alpha case, need to understand why this is done.
self.lambda_RCPO_optim.zero_grad()
lambda_RCPO_loss.backward()
self.lambda_RCPO_optim.step()
self.lambda_RCPO = self.log_lambda_RCPO.exp()
if updates % self.target_update_interval == 0:
soft_update(self.critic_target, self.critic, self.tau)
if self.env_name == 'reacher' and updates % 50 == 0 and not self.cnn:
self.plot(updates, [0.005, 0, 0], "right")
self.plot(updates, [-0.005, 0, 0], "left")
self.plot(updates, [0, 0.005, 0], "up")
self.plot(updates, [0, -0.005, 0], "down")
return qf1_loss.item(), qf2_loss.item(), policy_loss.item(
), alpha_loss.item(), alpha_tlogs.item()
# Save model parameters
def save_model(self,
env_name,
suffix="",
actor_path=None,
critic_path=None):
if not os.path.exists('models/'):
os.makedirs('models/')
if actor_path is None:
actor_path = "models/sac_actor_{}_{}".format(env_name, suffix)
if critic_path is None:
critic_path = "models/sac_critic_{}_{}".format(env_name, suffix)
print('Saving models to {} and {}'.format(actor_path, critic_path))
torch.save(self.policy.state_dict(), actor_path)
torch.save(self.critic.state_dict(), critic_path)
# Load model parameters
def load_model(self, actor_path, critic_path):
print('Loading models from {} and {}'.format(actor_path, critic_path))
if actor_path is not None:
self.policy.load_state_dict(torch.load(actor_path))
if critic_path is not None:
self.critic.load_state_dict(torch.load(critic_path))
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,571
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/gen_pointbot_demos.py
|
from env.simplepointbot1 import SimplePointBot, SimplePointBotTeacher
import numpy as np
import pickle
def get_random_transitions_pointbot1(w1,
w2,
discount,
num_transitions,
task_demos=False,
save_rollouts=False):
env = SimplePointBot(w1 = w1, w2 = w2)
transitions = []
rollouts = []
done = True
total =0
while True:
if done:
if len(rollouts):
mc_reward =0
for transition in rollouts[::-1]:
mc_reward = transition[2] + discount * mc_reward
transition.append(mc_reward)
transitions.extend(rollouts)
if total > num_transitions / 3:
break
state = np.array(
[np.random.uniform(-50, 10),
np.random.uniform(-25, 25)])
while env.obstacle(state):
state = np.array(
[np.random.uniform(-50, 10),
np.random.uniform(-25, 25)])
rollouts = []
action = np.clip(np.random.randn(2), -1, 1)
next_state = env._next_state(state, action, override=True)
constraint = env.obstacle(next_state)
done = constraint or len(rollouts)==9
reward = env.step_cost(state, action)
rollouts.append([state, action, constraint, next_state,
not constraint])
state = next_state
total+=1
rollouts = []
done = True
total = 0
while True:
if done:
if len(rollouts):
mc_reward =0
for transition in rollouts[::-1]:
mc_reward = transition[2] + discount * mc_reward
transition.append(mc_reward)
transitions.extend(rollouts)
if total > num_transitions /4:
break
state = np.array(
[np.random.uniform(-35-w1, -30-w1),
np.random.uniform(-12, 12)])
rollouts = []
action = np.clip(
np.array([np.random.uniform(0.5, 1, 1),
np.random.randn(1)]), -1, 1).ravel()
next_state = env._next_state(state, action, override=True)
constraint = env.obstacle(next_state)
done = constraint or len(rollouts)==9
reward = env.step_cost(state, action)
rollouts.append([state, action, constraint, next_state,
not constraint])
state = next_state
total+=1
rollouts = []
done = True
total = 0
while True:
if done:
if len(rollouts):
mc_reward =0
for transition in rollouts[::-1]:
mc_reward = transition[2] + discount * mc_reward
transition.append(mc_reward)
transitions.extend(rollouts)
if total > num_transitions /4:
break
state = np.array(
[np.random.uniform(-20+w1, -15+w1),
np.random.uniform(-12, 12)])
rollouts = []
action = np.clip(
np.array([np.random.uniform(-1, -0.5, 1),
np.random.randn(1)]), -1, 1).ravel()
next_state = env._next_state(state, action, override=True)
constraint = env.obstacle(next_state)
done = constraint or len(rollouts)==9
reward = env.step_cost(state, action)
rollouts.append([state, action, constraint, next_state,
not constraint])
state = next_state
total+=1
rollouts = []
done = True
total = 0
while True:
if done:
if len(rollouts):
mc_reward =0
for transition in rollouts[::-1]:
mc_reward = transition[2] + discount * mc_reward
transition.append(mc_reward)
transitions.extend(rollouts)
if total > num_transitions /4:
break
state = np.array(
[np.random.uniform(-30-w1, -20-w1),
np.random.uniform(10+w2, 15+w2)])
rollouts = []
action = np.clip(
np.array([np.random.randn(1),
np.random.uniform(-1, -0.5, 1)]), -1, 1).ravel()
next_state = env._next_state(state, action, override=True)
constraint = env.obstacle(next_state)
done = constraint or len(rollouts)==9
reward = env.step_cost(state, action)
rollouts.append([state, action, constraint, next_state,
not constraint])
state = next_state
total+=1
rollouts = []
done = True
total = 0
while True:
if done:
if len(rollouts):
mc_reward =0
for transition in rollouts[::-1]:
mc_reward = transition[2] + discount * mc_reward
transition.append(mc_reward)
transitions.extend(rollouts)
if total > num_transitions /4:
break
state = np.array(
[np.random.uniform(-30-w1, -20-w1),
np.random.uniform(-15-w2, -10-w2)])
rollouts = []
action = np.clip(
np.array([np.random.randn(1),
np.random.uniform(0.5, 1, 1)]), -1, 1).ravel()
next_state = env._next_state(state, action, override=True)
constraint = env.obstacle(next_state)
done = constraint or len(rollouts)==9
reward = env.step_cost(state, action)
rollouts.append([state, action, constraint, next_state,
not constraint])
state = next_state
total+=1
return transitions
if __name__ == '__main__':
counter =0
num_constraint_transitions = 30000
for i in range(0, 25):
print(counter)
w1 = np.uniform(low=-5.0, high=5.0)
w2 = np.uniform(low=-5.0, high=5.0)
constraint_demo_data = get_random_transitions_pointbot0(w1=i, w2=j, discount=0.65, num_transitions = num_constraint_transitions)
num_constraint_transitions = 0
num_constraint_violations = 0
for transition in constraint_demo_data:
num_constraint_violations += int(transition[2])
num_constraint_transitions += 1
print("Number of Constraint Transitions: ",
num_constraint_transitions)
print("Number of Constraint Violations: ",
num_constraint_violations)
with open("demos/pointbot_1/constraint_demos_" + str(counter) + ".pkl", 'wb') as handle:
pickle.dump(constraint_demo_data, handle)
counter+=1
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,572
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/env/mazes.py
|
import os
import pickle
import matplotlib.pyplot as plt
import os.path as osp
import numpy as np
from gym import Env
from gym import utils
from gym.spaces import Box
from mujoco_py import load_model_from_path, MjSim
from .maze_const import *
from .maze import MazeNavigation, get_random_transitions
import cv2
class Maze1Navigation(MazeNavigation):
def __init__(self):
utils.EzPickle.__init__(self)
self.hist = self.cost = self.done = self.time = self.state = None
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'simple_maze_1.xml')
self.sim = MjSim(load_model_from_path(filename))
self.horizon = HORIZON
self._max_episode_steps = self.horizon
self.transition_function = get_random_transitions
self.steps = 0
self.images = not GT_STATE
self.action_space = Box(-MAX_FORCE * np.ones(2),
MAX_FORCE * np.ones(2))
self.transition_function = get_random_transitions
obs = self._get_obs()
if False:
self.reset()
ob = self._get_obs(images=True)
cv2.imwrite('runs/maze.jpg', 255*ob)
exit()
self.dense_reward = DENSE_REWARD
if self.images:
self.observation_space = obs.shape
else:
self.observation_space = Box(-0.3, 0.3, shape=obs.shape)
self.gain = 1.05
self.goal = np.zeros((2, ))
self.goal[0] = 0.25
self.goal[1] = 0
def reset(self, difficulty='h', check_constraint=True, demos=False,
pos=()):
if len(pos):
self.sim.data.qpos[0] = pos[0]
self.sim.data.qpos[1] = pos[1]
else:
if difficulty is None:
self.sim.data.qpos[0] = np.random.uniform(-0.27, 0.27)
elif difficulty == 'e':
self.sim.data.qpos[0] = np.random.uniform(0.14, 0.22)
elif difficulty == 'm':
self.sim.data.qpos[0] = np.random.uniform(-0.04, 0.04)
elif difficulty == 'h':
self.sim.data.qpos[0] = np.random.uniform(-0.22, -0.13)
self.sim.data.qpos[1] = np.random.uniform(-0.22, 0.22)
self.steps = 0
self.sim.forward()
# print("RESET!", self._get_obs())
constraint = int(self.sim.data.ncon > 3)
if constraint and check_constraint:
if not len(pos):
self.reset(difficulty)
return self._get_obs()
class Maze2Navigation(Maze1Navigation):
def __init__(self):
utils.EzPickle.__init__(self)
self.hist = self.cost = self.done = self.time = self.state = None
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'simple_maze_2.xml')
self.sim = MjSim(load_model_from_path(filename))
self.horizon = HORIZON
self._max_episode_steps = self.horizon
self.transition_function = get_random_transitions
self.steps = 0
self.images = not GT_STATE
self.action_space = Box(-MAX_FORCE * np.ones(2),
MAX_FORCE * np.ones(2))
self.transition_function = get_random_transitions
obs = self._get_obs()
if False:
self.reset()
ob = self._get_obs(images=True)
cv2.imwrite('runs/maze.jpg', 255*ob)
exit()
self.dense_reward = DENSE_REWARD
if self.images:
self.observation_space = obs.shape
else:
self.observation_space = Box(-0.3, 0.3, shape=obs.shape)
self.gain = 1.05
self.goal = np.zeros((2, ))
self.goal[0] = 0.25
self.goal[1] = 0
class Maze3Navigation(Maze1Navigation):
def __init__(self):
utils.EzPickle.__init__(self)
self.hist = self.cost = self.done = self.time = self.state = None
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'simple_maze_3.xml')
self.sim = MjSim(load_model_from_path(filename))
self.horizon = HORIZON
self._max_episode_steps = self.horizon
self.transition_function = get_random_transitions
self.steps = 0
self.images = not GT_STATE
self.action_space = Box(-MAX_FORCE * np.ones(2),
MAX_FORCE * np.ones(2))
self.transition_function = get_random_transitions
obs = self._get_obs()
if False:
self.reset()
ob = self._get_obs(images=True)
cv2.imwrite('runs/maze.jpg', 255*ob)
exit()
self.dense_reward = DENSE_REWARD
if self.images:
self.observation_space = obs.shape
else:
self.observation_space = Box(-0.3, 0.3, shape=obs.shape)
self.gain = 1.05
self.goal = np.zeros((2, ))
self.goal[0] = 0.25
self.goal[1] = 0
class Maze4Navigation(Maze1Navigation):
def __init__(self):
utils.EzPickle.__init__(self)
self.hist = self.cost = self.done = self.time = self.state = None
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'simple_maze_4.xml')
self.sim = MjSim(load_model_from_path(filename))
self.horizon = HORIZON
self._max_episode_steps = self.horizon
self.transition_function = get_random_transitions
self.steps = 0
self.images = not GT_STATE
self.action_space = Box(-MAX_FORCE * np.ones(2),
MAX_FORCE * np.ones(2))
self.transition_function = get_random_transitions
obs = self._get_obs()
if False:
self.reset()
ob = self._get_obs(images=True)
cv2.imwrite('runs/maze.jpg', 255*ob)
exit()
self.dense_reward = DENSE_REWARD
if self.images:
self.observation_space = obs.shape
else:
self.observation_space = Box(-0.3, 0.3, shape=obs.shape)
self.gain = 1.05
self.goal = np.zeros((2, ))
self.goal[0] = 0.25
self.goal[1] = 0
def reset(self, difficulty='h', check_constraint=True, demos=False,
pos=()):
if len(pos):
self.sim.data.qpos[0] = pos[0]
self.sim.data.qpos[1] = pos[1]
else:
if difficulty is None:
self.sim.data.qpos[0] = np.random.uniform(-0.27, 0.27)
elif difficulty == 'e':
self.sim.data.qpos[0] = np.random.uniform(0.14, 0.22)
elif difficulty == 'm':
self.sim.data.qpos[0] = np.random.uniform(-0.04, 0.04)
elif difficulty == 'h':
self.sim.data.qpos[0] = np.random.uniform(-0.22, -0.13)
self.sim.data.qpos[1] = np.random.uniform(-0.22, 0.22)
self.steps = 0
# Randomize wal positions
w1 = -0.08 #np.random.uniform(-0.2, 0.2)
w2 = 0.08 #np.random.uniform(-0.2, 0.2)
# print(self.sim.model.geom_pos[:])
# print(self.sim.model.geom_pos[:].shape)
self.sim.model.geom_pos[5, 1] = 0.4 + w2
self.sim.model.geom_pos[7, 1] = -0.25 + w2
self.sim.model.geom_pos[6, 1] = 0.5 + w1
self.sim.model.geom_pos[8, 1] = -0.25 + w1
self.sim.model.geom_pos[9, 1] = 0.45
self.sim.model.geom_pos[10, 1] = -0.25
self.sim.forward()
# print("RESET!", self._get_obs())
constraint = int(self.sim.data.ncon > 3)
if constraint and check_constraint:
if not len(pos):
self.reset(difficulty)
return self._get_obs()
class Maze5Navigation(Maze1Navigation):
def __init__(self):
utils.EzPickle.__init__(self)
self.hist = self.cost = self.done = self.time = self.state = None
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'simple_maze_5.xml')
self.sim = MjSim(load_model_from_path(filename))
self.horizon = HORIZON
self._max_episode_steps = self.horizon
self.transition_function = get_random_transitions
self.steps = 0
self.images = not GT_STATE
self.action_space = Box(-MAX_FORCE * np.ones(2),
MAX_FORCE * np.ones(2))
self.transition_function = get_random_transitions
obs = self._get_obs()
if False:
self.reset()
ob = self._get_obs(images=True)
cv2.imwrite('runs/maze.jpg', 255*ob)
exit()
self.dense_reward = DENSE_REWARD
if self.images:
self.observation_space = obs.shape
else:
self.observation_space = Box(-0.3, 0.3, shape=obs.shape)
self.gain = 1.05
self.goal = np.zeros((2, ))
self.goal[0] = 0.25
self.goal[1] = 0
def reset(self, difficulty='h', check_constraint=True, demos=False,
pos=()):
if len(pos):
self.sim.data.qpos[0] = pos[0]
self.sim.data.qpos[1] = pos[1]
else:
if difficulty is None:
self.sim.data.qpos[0] = np.random.uniform(-0.27, 0.27)
elif difficulty == 'e':
self.sim.data.qpos[0] = np.random.uniform(0.14, 0.22)
elif difficulty == 'm':
self.sim.data.qpos[0] = np.random.uniform(-0.04, 0.04)
elif difficulty == 'h':
self.sim.data.qpos[0] = np.random.uniform(-0.22, -0.13)
self.sim.data.qpos[1] = np.random.uniform(-0.22, 0.22)
self.steps = 0
# Randomize wal positions
w1 = -0.08 #np.random.uniform(-0.2, 0.2)
w2 = 0.08 #np.random.uniform(-0.2, 0.2)
# print(self.sim.model.geom_pos[:])
# print(self.sim.model.geom_pos[:].shape)
self.sim.model.geom_pos[5, 1] = 0.4
self.sim.model.geom_pos[6, 1] = 0.4
self.sim.model.geom_pos[7, 1] = -0.25
self.sim.model.geom_pos[8, 1] = -0.25
self.sim.model.geom_pos[9, 1] = 0.45
self.sim.model.geom_pos[10, 1] = -0.20
self.sim.forward()
# print("RESET!", self._get_obs())
constraint = int(self.sim.data.ncon > 3)
if constraint and check_constraint:
if not len(pos):
self.reset(difficulty)
return self._get_obs()
class Maze6Navigation(Maze1Navigation):
def __init__(self):
utils.EzPickle.__init__(self)
self.hist = self.cost = self.done = self.time = self.state = None
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'simple_maze_6.xml')
self.sim = MjSim(load_model_from_path(filename))
self.horizon = HORIZON
self._max_episode_steps = self.horizon
self.transition_function = get_random_transitions
self.steps = 0
self.images = not GT_STATE
self.action_space = Box(-MAX_FORCE * np.ones(2),
MAX_FORCE * np.ones(2))
self.transition_function = get_random_transitions
obs = self._get_obs()
if False:
self.reset()
ob = self._get_obs(images=True)
cv2.imwrite('runs/maze.jpg', 255*ob)
exit()
self.dense_reward = DENSE_REWARD
if self.images:
self.observation_space = obs.shape
else:
self.observation_space = Box(-0.3, 0.3, shape=obs.shape)
self.gain = 1.05
self.goal = np.zeros((2, ))
self.goal[0] = 0.25
self.goal[1] = 0
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,573
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/env/maze_const.py
|
"""
Constants associated with the Maze env.
"""
HORIZON = 100
MAX_FORCE = 0.1
FAILURE_COST = 0
GOAL_THRESH = 3e-2
GT_STATE = True
# GT_STATE = False
DENSE_REWARD = True
# DENSE_REWARD = False
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,574
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/plotting_utils.py
|
import numpy as np
# colors = {
# "sac_recovery_pets": "red",
# "sac_recovery_ddpg": "blue",
# "sac_penalty": "green",
# "sac_lagrangian": "black",
# "sac_rcpo": "purple",
# "sac_rspo": "orange",
# "sac_vanilla": "olive",
# "sac_sqrl": "magenta",
# "sac_recovery_disable_relabel": "blue",
# "sac_recovery_pets_1k": "green",
# "sac_recovery_pets_5k": "purple"
# }
colors = {
"sac_vanilla": "#AA5D1F",
"sac_lagrangian": "#BA2DC1",
"sac_rspo": "#6C2896",
"sac_sqrl": "#D43827",
"sac_penalty": "#4899C5",
"sac_rcpo": "#34539C",
"sac_recovery_ddpg": "red",
"sac_recovery_pets": "#349C26",
"sac_recovery_pets_100": "#AA5D1F",
"sac_recovery_pets_500": "#34539C",
"sac_recovery_pets_1k": "#4899C5",
"sac_recovery_pets_5k": "#D43827",
"sac_recovery_pets_20k": "#349C26",
"sac_recovery_pets_100": "#AA5D1F",
"sac_recovery_pets_500": "#34539C",
"sac_recovery_pets_1k": "#4899C5",
"sac_recovery_pets_5k": "#D43827",
"sac_recovery_pets_20k": "#349C26",
"reward_5": "#AA5D1F",
"reward_10": "#34539C",
"reward_15": "#4899C5",
"reward_25": "#D43827",
"reward_50": "#349C26",
"nu_5": "#AA5D1F",
"nu_10": "#34539C",
"nu_15": "#4899C5",
"nu_25": "#D43827",
"nu_50": "#349C26",
"lambda_5": "#AA5D1F",
"lambda_10": "#34539C",
"lambda_15": "#4899C5",
"lambda_25": "#D43827",
"lambda_50": "#349C26",
"eps_0.15": "#AA5D1F",
"eps_0.25": "#34539C",
"eps_0.35": "#4899C5",
"eps_0.45": "#D43827",
"eps_0.55": "#349C26",
"sac_recovery_pets_ablations": "#349C26",
"sac_recovery_pets_disable_relabel": "#34539C",
"sac_recovery_pets_disable_offline": "#AA5D1F",
"sac_recovery_pets_disable_online": "#D43827",
"multitask": "#AA5D1F",
"meta": "#BA2DC1",
}
# colors = {
# "sac_recovery_pets": (0, 0.45, 0.7),
# "sac_recovery_ddpg": (0.8, 0.6, 0.7),
# "sac_penalty": (0, 0.6, 0.5),
# "sac_lagrangian": "black",
# "sac_rcpo": (0.8, 0.4, 0),
# "sac_rspo": (0.9, 0.6, 0),
# "sac_vanilla": (0.35, 0.7, 0.9),
# "sac_sqrl": (0.2, 0.7, 0.3)
# }
names = {
"sac_recovery_pets": "SAC + Model-Based Recovery",
"sac_recovery_pets_ablations": "Ours: Recovery RL (MB Recovery)",
"sac_recovery_ddpg": "SAC + Model-Free Recovery",
"sac_penalty": "SAC + Reward Penalty (RCPO)",
"sac_lagrangian": "SAC + Lagrangian",
"sac_rcpo": "SAC + Critic Penalty (RCPO)",
"sac_rspo": "SAC + RSPO",
"sac_vanilla": "SAC",
"sac_sqrl": "SQRL",
"sac_recovery_pets_100": "100",
"sac_recovery_pets_500": "500",
"sac_recovery_pets_1k": "1K",
"sac_recovery_pets_5k": "5K",
"sac_recovery_pets_20k": "20K",
"reward_5": "$\lambda = 5$",
"reward_10": "$\lambda = 10$",
"reward_15": "$\lambda = 15$",
"reward_25": "$\lambda = 25$",
"reward_50": "$\lambda = 50$",
"nu_5": "$\lambda = 5$",
"nu_10": "$\lambda = 10$",
"nu_15": "$\lambda = 15$",
"nu_25": "$\lambda = 25$",
"nu_50": "$\lambda = 50$",
"lambda_5": "$\lambda = 5$",
"lambda_10": "$\lambda = 10$",
"lambda_15": "$\lambda = 15$",
"lambda_25": "$\lambda = 25$",
"lambda_50": "$\lambda = 50$",
"eps_0.15": "$\epsilon_{risk} = 0.15$",
"eps_0.25": "$\epsilon_{risk} = 0.25$",
"eps_0.35": "$\epsilon_{risk} = 0.35$",
"eps_0.45": "$\epsilon_{risk} = 0.45$",
"eps_0.55": "$\epsilon_{risk} = 0.55$",
"sac_recovery_pets_disable_relabel": "Ours - Action Relabeling",
"sac_recovery_pets_disable_offline": "Ours - Offline Training",
"sac_recovery_pets_disable_online": "Ours - Online Training",
"multitask": "Multitask",
"meta": "Metalearning",
}
def get_color(algname, alt_color_map={}):
if algname in colors:
return colors[algname]
elif algname in alt_color_map:
return alt_color_map[algname]
else:
return np.random.rand(3, )
def get_legend_name(algname, alt_name_map={}):
if algname in names:
return names[algname]
elif algname in alt_name_map:
return alt_name_map[algname]
else:
return algname
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,575
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/env/maze_const_images.py
|
"""
Constants associated with the Maze env.
"""
HORIZON = 50
MAX_FORCE = 0.3
FAILURE_COST = 0
GOAL_THRESH = 3e-2
# GT_STATE = True
GT_STATE = False
DENSE_REWARD = True
# DENSE_REWARD = False
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,576
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/env/simplepointbot1.py
|
"""
A robot that can exert force in cardinal directions. The robot's goal is to
reach the origin and it experiences zero-mean Gaussian Noise. State
representation is (x, y). Action representation is (dx, dy).
"""
import os
import pickle
import os.path as osp
import numpy as np
# import matplotlib.pyplot as plt
from gym import Env
from gym import utils
from gym.spaces import Box
from obstacle import Obstacle, ComplexObstacle
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
import io
import cv2
"""
Constants associated with the PointBot env.
"""
START_POS = [-50, 0]
END_POS = [0, 0]
GOAL_THRESH = 1.
START_STATE = START_POS
GOAL_STATE = END_POS
MAX_FORCE = 1
HORIZON = 100
NOISE_SCALE = 0.05
AIR_RESIST = 0.2
HARD_MODE = False
OBSTACLE = [[[-30, -20], [-7.5, 7.5]]]
CAUTION_ZONE = [[[-32, -18], [-12, 12]]]
OBSTACLE = ComplexObstacle(OBSTACLE)
CAUTION_ZONE = ComplexObstacle(CAUTION_ZONE)
def process_action(a):
return np.clip(a, -MAX_FORCE, MAX_FORCE)
def teacher_action(state, goal):
disp = np.subtract(goal, state)
disp[disp > MAX_FORCE] = MAX_FORCE
disp[disp < -MAX_FORCE] = -MAX_FORCE
return disp
class SimplePointBot(Env, utils.EzPickle):
def __init__(self, w1=None, w2=None):
utils.EzPickle.__init__(self)
self.hist = self.cost = self.done = self.time = self.state = None
self.A = np.eye(2)
self.B = np.array([[np.random.uniform(0.5, 1.5),0],[0,np.random.uniform(0.5, 1.5)]])#np.eye(2) #np.array([[2.0,0],[0,1.6]])
#self.B = np.eye(2)
self.horizon = HORIZON
self.action_space = Box(-np.ones(2) * MAX_FORCE,
np.ones(2) * MAX_FORCE)
self.observation_space = Box(-np.ones(2) * np.float('inf'),
np.ones(2) * np.float('inf'))
self._max_episode_steps = HORIZON
self.obstacle = OBSTACLE
if w1 is not None and w2 is not None:
new_obstacle = [[[-30-w1, -20+w1], [-7.5-w2, 7.5+w2]]]
self.obstacle = ComplexObstacle(new_obstacle)
self.caution_zone = CAUTION_ZONE
self.transition_function = get_random_transitions
self.safe_action = lambda x: safe_action(x)
self.goal = GOAL_STATE
def step(self, a):
a = process_action(a)
old_state = self.state.copy()
next_state = self._next_state(self.state, a)
cur_cost = self.step_cost(self.state, a)
self.cost.append(cur_cost)
self.state = next_state
self.time += 1
self.hist.append(self.state)
self.done = cur_cost > -1 or self.obstacle(next_state)
return self.state, cur_cost, self.done, {
"constraint": self.obstacle(next_state),
"reward": cur_cost,
"state": old_state,
"next_state": next_state,
"action": a
}
def reset(self):
self.state = START_STATE + np.random.randn(2)
self.time = 0
self.cost = []
self.done = False
self.hist = [self.state]
return self.state
def _next_state(self, s, a, override=False):
if self.obstacle(s):
#print("obs", s, a)
return s
return self.A.dot(s) + self.B.dot(a) + NOISE_SCALE * np.random.randn(
len(s))
def step_cost(self, s, a):
if HARD_MODE:
return int(
np.linalg.norm(np.subtract(GOAL_STATE, s)) < GOAL_THRESH)
return -np.linalg.norm(np.subtract(GOAL_STATE,
s)) - self.obstacle(s) * 0.
def values(self):
return np.cumsum(np.array(self.cost)[::-1])[::-1]
def sample(self):
"""
samples a random action from the action space.
"""
return np.random.random(2) * 2 * MAX_FORCE - MAX_FORCE
def plot_trajectory(self, states=None):
if states == None:
states = self.hist
states = np.array(states)
plt.scatter(states[:, 0], states[:, 2])
plt.show()
# Returns whether a state is stable or not
def is_stable(self, s):
return np.linalg.norm(np.subtract(GOAL_STATE, s)) <= GOAL_THRESH
def teacher(self, sess=None):
return SimplePointBotTeacher()
def expert_action(self, s):
return self.teacher._expert_control(s, 0)
def get_random_transitions(num_transitions,
task_demos=False,
save_rollouts=False):
env = SimplePointBot()
transitions = []
rollouts = []
done = False
for i in range(num_transitions // 10 // 3):
rollouts.append([])
state = np.array(
[np.random.uniform(-40, 10),
np.random.uniform(-25, 25)])
while env.obstacle(state):
state = np.array(
[np.random.uniform(-40, 10),
np.random.uniform(-25, 25)])
for j in range(10):
action = np.clip(np.random.randn(2), -1, 1)
next_state = env._next_state(state, action, override=True)
constraint = env.obstacle(next_state)
reward = env.step_cost(state, action)
transitions.append((state, action, constraint, next_state,
not constraint))
rollouts[-1].append((state, action, constraint, next_state,
not constraint))
state = next_state
if constraint:
break
for i in range(num_transitions // 10 * 1 // 4):
rollouts.append([])
state = np.array(
[np.random.uniform(-35, -30),
np.random.uniform(-12, 12)])
for j in range(10):
action = np.clip(
np.array([np.random.uniform(0.5, 1, 1),
np.random.randn(1)]), -1, 1).ravel()
next_state = env._next_state(state, action, override=True)
constraint = env.obstacle(next_state)
reward = env.step_cost(state, action)
transitions.append((state, action, constraint, next_state,
not constraint))
rollouts[-1].append((state, action, constraint, next_state,
not constraint))
state = next_state
if constraint:
break
for i in range(num_transitions // 10 * 1 // 4):
rollouts.append([])
state = np.array(
[np.random.uniform(-20, -15),
np.random.uniform(-12, 12)])
for j in range(10):
action = np.clip(
np.array([np.random.uniform(-1, -0.5, 1),
np.random.randn(1)]), -1, 1).ravel()
next_state = env._next_state(state, action, override=True)
constraint = env.obstacle(next_state)
reward = env.step_cost(state, action)
transitions.append((state, action, constraint, next_state,
not constraint))
rollouts[-1].append((state, action, constraint, next_state,
not constraint))
state = next_state
if constraint:
break
for i in range(num_transitions // 10 * 1 // 4):
rollouts.append([])
state = np.array(
[np.random.uniform(-30, -20),
np.random.uniform(10, 15)])
for j in range(10):
action = np.clip(
np.array([np.random.randn(1),
np.random.uniform(-1, -0.5, 1)]), -1, 1).ravel()
next_state = env._next_state(state, action, override=True)
constraint = env.obstacle(next_state)
reward = env.step_cost(state, action)
transitions.append((state, action, constraint, next_state,
not constraint))
rollouts[-1].append((state, action, constraint, next_state,
not constraint))
state = next_state
if constraint:
break
for i in range(num_transitions // 10 * 1 // 4):
rollouts.append([])
state = np.array(
[np.random.uniform(-30, -20),
np.random.uniform(-15, -10)])
for j in range(10):
action = np.clip(
np.array([np.random.randn(1),
np.random.uniform(0.5, 1, 1)]), -1, 1).ravel()
next_state = env._next_state(state, action, override=True)
constraint = env.obstacle(next_state)
reward = env.step_cost(state, action)
transitions.append((state, action, constraint, next_state,
not constraint))
rollouts[-1].append((state, action, constraint, next_state,
not constraint))
state = next_state
if constraint:
break
if save_rollouts:
return rollouts
else:
return transitions
def render(loc):
def get_img_from_fig(fig, dpi=180):
buf = io.BytesIO()
fig.savefig(buf, format="png", dpi=dpi)
buf.seek(0)
img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)
buf.close()
img = cv2.imdecode(img_arr, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
data_set = np.array([
[.9, .9], [.85, 2.1], [1.2, 1.], [2.1, .95], [3., 1.1],
[3.9, .7], [4., 1.4], [4.2, 1.8], [2., 2.3], [3., 2.3],
[1.5, 1.8], [2., 1.5], [2.2, 2.], [2.6, 1.7], [2.7, 1.85]
])
categories = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
color1 = (0.69411766529083252, 0.3490196168422699, 0.15686275064945221, 1.0)
color2 = (0.65098041296005249, 0.80784314870834351, 0.89019608497619629, 1.0)
colormap = np.array([color1, color2])
fig = plt.figure()
ax = fig.add_subplot(111)
# ax.scatter(
# x=[data_set[:, 0]],
# y=[data_set[:, 1]],
# c=colormap[categories],
# marker='o',
# alpha=0.9
# )
margin = .1
min_f0, max_f0 = -30, -20
min_f1, max_f1 = -7.5, 7.5
width = max_f0 - min_f0
height = max_f1 - min_f1
ax.add_patch(
patches.Rectangle(
xy=(min_f0, min_f1), # point of origin.
width=width,
height=height,
linewidth=1,
color='red',
fill=True
)
)
circle = plt.Circle(loc, radius=1, color='green')
ax.add_patch(circle)
circle = plt.Circle((-50, 0), radius=1)
ax.add_patch(circle)
circle = plt.Circle((0, 0), radius=1)
ax.add_patch(circle)
label = ax.annotate("start", xy=(-50, 3), fontsize=10, ha="center")
label = ax.annotate("goal", xy=(0, 3), fontsize=10, ha="center")
plt.xlim(-60, 10)
plt.ylim(-30, 30)
ax.set_aspect('equal')
ax.autoscale_view()
# plt.savefig("pointbot0_cartoon.png")
return get_img_from_fig(fig)
def safe_action(state, goal=GOAL_STATE):
dx = dy = 0
if state[0] < -30:
dx = -1
elif state[0] > -20:
dx = 1
if state[1] > 10:
dy = 1
elif state[1] < -10:
dy = -1
return np.array([dx, dy])
def teacher_action(state, goal=GOAL_STATE):
disp = np.subtract(goal, state)
disp[disp > MAX_FORCE] = MAX_FORCE
disp[disp < -MAX_FORCE] = -MAX_FORCE
return disp
class SimplePointBotTeacher(object):
def __init__(self):
self.env = SimplePointBot()
self.demonstrations = []
self.outdir = "data/simplepointbot"
self.goal = GOAL_STATE
def _generate_trajectory(self):
"""
The teacher initially tries to go northeast before going to the origin
"""
transitions = []
state = self.env.reset()
for i in range(HORIZON):
# if i < HORIZON / 2:
# action = [0.1, 0.1]
# else:
action = self._expert_control(state, i)
next_state, cost, done, _ = self.env.step(action)
transitions.append([state, action, cost, next_state, done])
state = next_state
assert done, "Did not reach the goal set on task completion."
V = self.env.values()
for i, t in enumerate(transitions):
t.append(V[i])
# self.env.plot_trajectory()
return transitions
def generate_demonstrations(self, num_demos):
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
else:
raise RuntimeError("Directory %s already exists." % (self.outdir))
for i in range(num_demos):
if i % 100 == 0:
print("Generating Demos: Iteration %d" % i)
demo = self._generate_trajectory()
with open(osp.join(self.outdir, "%d.pkl" % (i)), "wb") as f:
pickle.dump(demo, f)
self.demonstrations.append(demo)
def _get_gain(self, t):
return self.Ks[t]
def _expert_control(self, s, t):
return teacher_action(s, self.goal)
if __name__ == '__main__':
env = SimplePointBot()
obs = env.reset()
env.step([1, 1])
for i in range(HORIZON - 1):
env.step([0, 0])
teacher = env.teacher()
teacher.generate_demonstrations(1000)
# env.plot_trajectory()
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,577
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/env/push.py
|
import numpy as np
import cv2
import os
from gym import utils
from gym.envs.robotics import fetch_env
# Ensure we get the path separator correct on windows
MODEL_XML_PATH = os.path.join(os.getcwd(), "push_env", "assets", 'fetch', 'push.xml')
class FetchPushEnv(fetch_env.FetchEnv, utils.EzPickle):
def __init__(self, reward_type='sparse'):
initial_qpos = {
'robot0:slide0': 0.405,
'robot0:slide1': 0.48,
'robot0:slide2': 0.0,
'object0:joint': [1.25, 0.53, 0.4, 1., 0., 0., 0.],
}
fetch_env.FetchEnv.__init__(
self, MODEL_XML_PATH, has_object=True, block_gripper=True, n_substeps=20,
gripper_extra_height=0.0, target_in_the_air=False, target_offset=0.0,
obj_range=0.15, target_range=0.15, distance_threshold=0.05,
initial_qpos=initial_qpos, reward_type=reward_type)
utils.EzPickle.__init__(self)
'''
env = FetchPushEnv()
env.seed(9)
env.reset()
obs = env.render(mode='rgb_array')
obs = ~(255*obs)
cv2.imwrite('temp.jpg', obs)
for _ in range(1000):
#env.render()
env.step(np.array([.1, 0, 0, 0])) # take a random action
env.close()
'''
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,578
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/env/__init__.py
|
from gym.envs.registration import register
register(
id='SimplePointBot-v0', entry_point='env.simplepointbot0:SimplePointBot')
register(
id='SimplePointBot-v1', entry_point='env.simplepointbot1:SimplePointBot')
register(id='Maze-v0', entry_point='env.maze:MazeNavigation')
register(id='Maze1-v0', entry_point='env.mazes:Maze1Navigation')
register(id='Maze2-v0', entry_point='env.mazes:Maze2Navigation')
register(id='Maze3-v0', entry_point='env.mazes:Maze3Navigation')
register(id='Maze4-v0', entry_point='env.mazes:Maze4Navigation')
register(id='Maze5-v0', entry_point='env.mazes:Maze5Navigation')
register(id='Maze6-v0', entry_point='env.mazes:Maze6Navigation')
register(id='ImageMaze-v0', entry_point='env.image_maze:MazeImageNavigation')
register(id='CliffWalker-v0', entry_point='env.cliffwalker:CliffWalkerEnv')
register(id='CliffCheetah-v0', entry_point='env.cliffcheetah:CliffCheetahEnv')
register(id='Shelf-v0', entry_point='env.shelf_env:ShelfEnv')
register(
id='ShelfDynamic-v0', entry_point='env.shelf_dynamic_env:ShelfDynamicEnv')
register(id='ShelfLong-v0', entry_point='env.shelf_long_env:ShelfLongEnv')
register(
id='ShelfDynamicLong-v0',
entry_point='env.shelf_dynamic_long_env:ShelfDynamicLongEnv')
register(id='ShelfReach-v0', entry_point='env.shelf_reach_env:ShelfRotEnv')
register(id='CliffPusher-v0', entry_point='env.cliffpusher:PusherEnv')
register(id='Reacher-v0', entry_point='env.reacher:ReacherSparse3DEnv')
register(id='Car-v0', entry_point='env.car:DubinsCar')
register(id='DVRKReacher-v0', entry_point='env.dvrk_reacher:DVRK_Reacher')
register(id='Minitaur-v0', entry_point='env.minitaur:MinitaurGoalVelocityEnv')
# Mujoco Envs
register(id='CartPoleLength-v0', entry_point='env.cartpole:CartPoleEnv', max_episode_steps=200)
register(id='Push-v0', entry_point='env.push:FetchPushEnv', max_episode_steps=50)
#register(id='HalfCheetah-Disabled-v0', entry_point='env.half_cheetah_disabled:HalfCheetahEnv')
# register(
# id='MBRLPusherSparse-v0',
# entry_point='dmbrl.env.pushersparse:PusherSparseEnv'
# )
# register(
# id='MBRL-PickAndPlace-v1',
# entry_point='dmbrl.env.pick_and_place:FetchPickAndPlaceEnv'
# )
# register(
# id='AutograspCartgripper-v0',
# entry_point='dmbrl.env.cartgripper:AutograspCartgripperEnv'
# )
# register(
# id='TallCartgripper-v0',
# entry_point='dmbrl.env.tall_cartgripper:TallCartgripperEnv'
# )
# register(
# id='CartgripperXZGrasp-v0',
# entry_point='dmbrl.env.cartgripper_xz_grasp:CartgripperXZGrasp'
# )
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,579
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/env/cartpole.py
|
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
import mujoco_py
import os
from filelock import FileLock
import xml.etree.ElementTree
HORIZON = 200
def transition_function(num_transitions, length=None, discount=0.8):
env = CartPoleEnv(init_length = length)
transitions = []
rollouts = []
done = True
steps =0
while True:
if done:
steps =0
if len(rollouts):
mc_reward =0
for transition in rollouts[::-1]:
mc_reward = transition[2] + discount * mc_reward
transition.append(mc_reward)
transitions.extend(rollouts)
if len(transitions) > num_transitions:
break
# Reset
state = env.reset()
rollouts = []
action = env.action_space.sample()
next_state, reward, _, info = env.step(action)
steps +=1
constraint = info['constraint']
done = steps == 30
rollouts.append([state, action, constraint, next_state,
not constraint])
state = next_state
return transitions
class CartPoleEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self, init_length=None, no_task=True):
print("Carpole Length: ", init_length)
self.no_task = no_task
utils.EzPickle.__init__(self)
dir_path = os.path.dirname(os.path.realpath(__file__))
self.xml_location = '%s/assets/cartpole.xml' % dir_path
self.mode = 'train'
self.test_domain = 1.0
self.domain_low = 0.4
self.domain_high = 0.8
self.fixed = False
self.pendulum_length = 0.6
if init_length:
self.pendulum_length = init_length
self.fixed = True
self.set_length(self.pendulum_length)
self.steps = 0
self._max_episode_steps = HORIZON
self.max_episode_steps = HORIZON
self.transition_function = transition_function
mujoco_env.MujocoEnv.__init__(self, self.xml_location, 2)
def step(self, a, early_stop=True):
self.steps +=1
cur_ob = self._get_obs()
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
reward = np.exp(
-np.sum(np.square(self._get_ee_pos(ob) - np.array([0.0, self.pendulum_length]))) / (self.pendulum_length ** 2)
)
catastrophe = (np.abs(ob[1]) > np.pi/2) or (np.abs(ob[0]) >= 2.4)
info = {}
constraint = False
if catastrophe:
if not self.no_task:
ob[-1] = 1
constraint = True
notdone = np.isfinite(ob).all() and not (catastrophe) #and self.mode == 'test')
done = (not notdone) or self.steps >=200
if not early_stop:
done = False
info = {
"constraint": constraint,
"reward": reward,
"action": a,
"state": cur_ob,
"next_state": ob,
}
return ob, reward, done, info
def reset_model(self):
if not hasattr(self, "pendulum_length"):
self.pendulum_length = self.np_random.uniform(self.domain_low, self.domain_high)
qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-0.01, high=0.01)
qvel = self.init_qvel + self.np_random.uniform(size=self.model.nv, low=-0.01, high=0.01)
self.set_state(qpos, qvel)
return self._get_obs()
def _get_obs(self):
original_obs = np.concatenate([self.data.qpos, self.data.qvel]).ravel()
if self.no_task:
return original_obs
curr_obs = np.concatenate([original_obs, [self.pendulum_length, 0]], axis=-1)
return curr_obs
def _get_ee_pos(self, x):
x0, theta = x[0], x[1]
return np.array([
x0 + self.pendulum_length * np.sin(theta),
self.pendulum_length * np.cos(theta)
])
def viewer_setup(self):
v = self.viewer
v.cam.trackbodyid = 0
v.cam.distance = self.model.stat.extent
def set_length(self, length):
lock = FileLock(self.xml_location + '.lock') # concurrency protection
with lock:
et = xml.etree.ElementTree.parse(self.xml_location)
et.find('worldbody').find('body').find('body').find('geom').set('fromto',
"0 0 0 0.001 0 %0.3f" % length) # changing size of pole
et.write(self.xml_location)
self.model = mujoco_py.load_model_from_path(self.xml_location)
self.sim = mujoco_py.MjSim(self.model)
self.data = self.sim.data
def reset(self, mode='train'):
self.steps = 0
if mode == 'train' and not self.fixed:
self.pendulum_length = self.np_random.uniform(self.domain_low, self.domain_high)
self.set_length(self.pendulum_length)
elif self.mode != 'test' and mode == 'test' and not self.fixed: #starting adaptation
self.pendulum_length = self.test_domain
self.mode = mode
self.set_length(self.test_domain)
mujoco_env.MujocoEnv.reset(self)
return self._get_obs()
def get_image(self):
return self.render(mode='rgb_array', width=150, height=150)
'''
env = CartPoleEnv()
env.reset()
for i in range(1000):
print("fuckery")
env.step(0)
haha = env.render(mode='rgb_array', width=256, height=256)
from PIL import Image
im = Image.fromarray(haha)
im.save("your_file.jpeg")
env.reset()
'''
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,580
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/gen_cartpole_demos.py
|
from env.cartpole import CartPoleEnv, transition_function
import numpy as np
import pickle
if __name__ == '__main__':
counter =0
num_transitions = 10000
for i in range(0, 20):
print(counter)
w_1 = np.random.uniform(0.4, 0.8)
constraint_demo_data = transition_function(num_transitions, w_1, 0.8)
num_constraint_transitions = 0
num_constraint_violations = 0
for transition in constraint_demo_data:
num_constraint_violations += int(transition[2])
num_constraint_transitions += 1
print("Number of Constraint Transitions: ",
num_constraint_transitions)
print("Number of Constraint Violations: ",
num_constraint_violations)
with open("demos/cartpole/constraint_demos_" + str(counter) + ".pkl", 'wb') as handle:
pickle.dump(constraint_demo_data, handle)
counter+=1
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,157,581
|
JiahaoYao/mesa-safe-rl
|
refs/heads/main
|
/env/half_cheetah_disabled.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import numpy as np
import gym
from gym import utils
from learning_to_adapt.envs.mujoco_env import MujocoEnv
from learning_to_adapt.utils.serializable import Serializable
from gym.utils import seeding
HORIZON = 1000
def transition_function(num_transition, discount = 0.99):
env = HalfCheetahEnv()
transitions = []
rollouts = []
done = True
steps =0
while True:
if done:
steps =0
if len(rollouts):
mc_reward =0
for transition in rollouts[::-1]:
mc_reward = transition[2] + discount * mc_reward
transition.append(mc_reward)
transitions.extend(rollouts)
if len(transitions) > num_transition:
break
# Reset
state = env.reset()
rollouts = []
action = env.action_space.sample()
next_state, reward, _, info = env.step(action)
steps +=1
constraint = info['constraint']
done = steps == 1000
rollouts.append([state, action, constraint, next_state,
not constraint])
state = next_state
return transitions
class HalfCheetahEnv(MujocoEnv, Serializable, utils.EzPickle):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 50
}
def __init__(self, task='cripple', reset_every_episode=False):
self.no_task = True
self.task = None
Serializable.quick_init(self, locals())
self.cripple_mask = None
self.first = True
self._max_episode_steps = 1000
self.task = task#'cripple'
self.crippled_leg = 0
self.prev_torso = None
self.prev_qpos = None
self.transition_function = transition_function
MujocoEnv.__init__(self, os.path.join(os.path.abspath(os.path.dirname(__file__)), "assets", "half_cheetah_disabled.xml"))
self._init_geom_rgba = self.model.geom_rgba.copy()
self._init_geom_contype = self.model.geom_contype.copy()
self._init_geom_size = self.model.geom_size.copy()
self._init_geom_pos = self.model.geom_pos.copy()
self.dt = self.model.opt.timestep
self.act_high = self.action_space.high[0]
self.act_low = self.action_space.low[0]
self.cripple_mask = np.ones(self.action_space.shape)
#self.lmao = np.array([0.69248867, 0.75095502, 0.54999795, 0.82621723, 0.9406307, 0.88019476])
self.reward_range = (-np.inf, np.inf)
utils.EzPickle.__init__(self, locals())
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def get_current_obs(self):
if self.prev_qpos == None:
self.prev_qpos = self.get_body_com("torso")[:1]
self.dt = self.model.opt.timestep
if self.no_task:
return np.concatenate([
self.model.data.qpos.flatten()[1:],
self.model.data.qvel.flat,
self.get_body_com("torso").flat[1:],
])
else:
return np.concatenate([
self.model.data.qpos.flatten()[1:],
self.model.data.qvel.flat,
self.get_body_com("torso").flat[1:],
(self.get_body_com("torso")[:1] - self.prev_qpos)/self.dt, #reward
self.check_catastrophe(), #catastrophe indicator
])
def check_catastrophe(self):
for i in range(self.data.ncon):
contact = self.data.contact[i]
name_set = set()
name_set.add(self.model.geom_names[contact.geom1])
name_set.add(self.model.geom_names[contact.geom2])
if 'floor' in name_set and 'head' in name_set:
return [1]
return [0]
def get_body_xmat(self, body_name):
idx = self.model.body_names.index(body_name)
return self.model.data.xmat[idx].reshape((3, 3))
def get_body_com(self, body_name):
idx = self.model.body_names.index(body_name)
return self.model.data.com_subtree[idx]
def step(self, action, early_stop = True):
obs = self.get_current_obs()
# Clip
action = np.clip(action, -1.0, 1.0)
action = self.cripple_mask * action
#action = self.lmao * action
self.prev_qpos = self.get_body_com("torso").flat[:1]
self.forward_dynamics(action)
next_obs = self.get_current_obs()
ctrl_cost = 1e-1 * 0.5 * np.sum(np.square(action))
forward_reward = self.get_body_comvel("torso")[0]
reward = forward_reward - ctrl_cost
self.steps+=1
done = False
catastrophe = self.check_catastrophe()[0] #next_obs[-1] == 1
if early_stop:
done = catastrophe
info = {
"constraint": catastrophe,
"reward": reward,
"action": action,
"state": obs,
"next_state": next_obs,
}
if catastrophe and self.mode == 'test':
done = True
return next_obs, reward, done, info
def reward(self, obs, action, next_obs):
assert obs.ndim == 2
assert obs.shape == next_obs.shape
assert obs.shape[0] == action.shape[0]
ctrl_cost = 1e-1 * 0.5 * np.sum(np.square(action), axis=1)
forward_reward = (next_obs[:, 0] - obs[:, 0])/self.dt
reward = forward_reward - ctrl_cost
return reward
def reset_mujoco(self, init_state=None):
super(HalfCheetahEnv, self).reset_mujoco(init_state=init_state)
def reset_task(self, value=None):
value = 5
if self.first:
self.first = False
return
if self.task == 'cripple':
crippled_joint = value if value is not None else np.random.randint(1, self.action_dim)
self.cripple_mask = np.ones(self.action_space.shape)
self.cripple_mask[crippled_joint] = 0
geom_idx = self.model.geom_names.index(self.model.joint_names[crippled_joint+3])
geom_rgba = self._init_geom_rgba.copy()
geom_rgba[geom_idx, :3] = np.array([1, 0, 0])
self.model.geom_rgba = geom_rgba
elif self.task is None:
pass
else:
raise NotImplementedError
self.model.forward()
def reset(self, mode='train'):
self.steps=0
self.prev_qpos = None
self.mode = mode
if mode == 'train':
self.reset_task(value=np.random.randint(1, self.action_dim - 1))
else:
self.reset_task(value=self.action_dim - 1)
return MujocoEnv.reset(self)
def close(self):
self.stop_viewer()
|
{"/supplement_plots.py": ["/plotting_utils.py"], "/analyze_runs_brijen.py": ["/plotting_utils.py"], "/gen_maze_demos.py": ["/env/maze.py", "/env/mazes.py"], "/analyze_runs_ashwin.py": ["/plotting_utils.py"], "/main.py": ["/sac.py", "/gen_pointbot0_demos.py", "/env/cartpole.py", "/env/half_cheetah_disabled.py", "/env/ant_disabled.py"], "/env/image_maze.py": ["/env/maze_const_images.py"], "/constraint.py": ["/utils.py"], "/analyze_runs_michael.py": ["/plotting_utils.py"], "/env/maze.py": ["/env/maze_const.py"], "/sac.py": ["/utils.py", "/constraint.py", "/run_multitask.py"], "/gen_pointbot_demos.py": ["/env/simplepointbot1.py"], "/env/mazes.py": ["/env/maze_const.py", "/env/maze.py"], "/gen_cartpole_demos.py": ["/env/cartpole.py"]}
|
29,214,245
|
scapegoatjpg/ScanNET
|
refs/heads/master
|
/network_scanner.py
|
import time
import nmap
import who_is_on_my_wifi
from who_is_on_my_wifi import who
#pip3 install who-is-on-my-wifi (required pip install wmi AND MANUALY INSTALL NMAP [nmap.org/download.html] do nmap-7.91-setup.exe)
ipmacs = {}
WHO = who()
for i in range(0, len(WHO)):
ipmacs[WHO[i][1]] = WHO[i][3] #dictionary to put in ip addresses and corresponding MAC addresses
def netting():
while True:
print(ipmacs) #prints the list on terminal
time.sleep(3) #delays by 3 seconds
|
{"/sniffing.py": ["/DBconn.py"], "/scannetgui.py": ["/sniffing.py", "/DBconn.py", "/isMalSite.py", "/network_scanner.py"]}
|
29,214,246
|
scapegoatjpg/ScanNET
|
refs/heads/master
|
/scannetgui.py
|
import tkinter as tk
import tkinter.messagebox
from tkinter import ttk
import network_scanner
import threading
LARGE_FONT = ("Verdana", 22)
fakeloginsfornow = {
"laura115":"pass5",
"yeet":"whataburger_10"
}
class Pages(tk.Tk):
#starts us off in the login page
def __init__(self):
tk.Tk.__init__(self)
self.winfo_toplevel().title("ScanNET")
self.wm_minsize(800, 600)
self.wm_maxsize(800, 600)
container = tk.Frame(self)
container.grid(row=0, column=0)
self.frames = {}
for F in (Loginpage, GUI):
frame = F(container, self)
self.frames[F] = frame
frame.grid(row=0, column=0, sticky='NESW')
self.show_frame(Loginpage)
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
class Loginpage(tk.Frame):
#login page content
def __init__(self, parent, controller):
#=====button functions
def printlogins():
print (fakeloginsfornow)
def validation(self, controller):
u = (self.username.get())
p = (self.password.get())
if u == '' and p == '':
tkinter.messagebox.showerror("Error", "Please enter your credentials.")
else: #try except so that we don't deal with KeyError
try:
if p in fakeloginsfornow[u]:
controller.show_frame(GUI)
scanthread.start()
except KeyError:
tkinter.messagebox.showerror("Error", "Wrong credentials, please try again.")
resetting()
def registeruser(self, user, passw):
print("registering this new user...")
u = (user.get())
p = (passw.get())
if u == '' or p == '':
tkinter.messagebox.showerror("Error", "Please enter your credentials.")
printlogins()
else:
if u in fakeloginsfornow:
tkinter.messagebox.showerror("Error", "Username already taken, please use a different username.")
user.set('')
printlogins()
else:
fakeloginsfornow[u] = p
tkinter.messagebox.showinfo("Register", "Register successful.")
printlogins()
def registering():
regwin = new_window(self)
regwin.title("Register for ScanNET")
regwin.geometry("600x400")
newuser = tk.StringVar()
newpass = tk.StringVar()
tk.Label(regwin, text = "Please enter your information.", font=LARGE_FONT).pack()
tk.Label(regwin, text="").pack()
tk.Label(regwin, text="Username").pack()
userentry = tk.Entry(regwin, textvariable=newuser)
userentry.pack()
tk.Label(regwin, text="Password").pack()
passentry = tk.Entry(regwin, show='*',textvariable=newpass)
passentry.pack()
tk.Label(regwin, text="").pack()
tk.Button(regwin, text="Register", width=10, height=1, command=lambda: registeruser(self, newuser, newpass)).pack()
def resetting():
self.username.set('')
self.password.set('')
usertext.focus()
def exiting(self):
self.exiting = tkinter.messagebox.askyesno("Exit?", "Are you sure you want to exit?")
if self.exiting > 0:
self.quit()
else:
usertext.focus()
def new_window(self):
return tk.Toplevel(self.master)
tk.Frame.__init__(self,parent)
tk.Frame.configure(self, bg='darkseagreen3')
#=====username and password
self.username = tk.StringVar()
self.password = tk.StringVar()
#=====login label
loginlabel = tk.Label(self, text="ScanNET Login", bg='darkseagreen3', font=LARGE_FONT)
loginlabel.grid(row=0, column=0, columnspan=2, pady=40)
#=====frames
loginframe1 = tk.LabelFrame(self, width=800, height=600, bd=20, bg='darkseagreen3')
loginframe1.grid(row=1, column=0)
loginframe2 = tk.LabelFrame(self, width=600, height=400, bd=20, bg='darkseagreen3')
loginframe2.grid(row=2, column=0)
#=====Label and Entry
userlabel = tk.Label(loginframe1, text="Username", font=(20), bg='darkseagreen3')
userlabel.grid(row=0, column=0)
usertext = tk.Entry(loginframe1, font=(20), textvariable = self.username)
usertext.grid(row=0, column=1)
passlabel = tk.Label(loginframe1, text="Password", font=(20), bg='darkseagreen3')
passlabel.grid(row=1, column=0)
passtext = tk.Entry(loginframe1, font=(20), show='*',textvariable = self.password)
passtext.grid(row=1, column=1)
#=====Buttons
loginbutton = tk.Button(loginframe2, text="Login", width=17, font=(20), bg='darkseagreen3', command=lambda: validation(self, controller)) #need to make login system
loginbutton.grid(row=3, column=0, pady=20, padx=8)
registerbutton = tk.Button(loginframe2, text="Register", width=17, font=(20), bg='darkseagreen3', command=lambda: registering())
registerbutton.grid(row=3, column=1, pady=20, padx=8)
resetbutton = tk.Button(loginframe2, text="Reset", width=17, font=(20), bg='darkseagreen3', command=lambda: resetting())
resetbutton.grid(row=3, column=2, pady=20, padx=8)
closebutton = tk.Button(loginframe2, text="Exit", width=17, font=(20), bg='darkseagreen3', command=lambda: exiting(self))
closebutton.grid(row=3, column=3, pady=20, padx=8)
class GUI(tk.Frame):
def __init__(self, parent, controller):
#all widths and heights aren't official, most likely change
tk.Frame.__init__(self, parent)
#network_scanner.netting()
#the tabs
my_notebook = ttk.Notebook(self)
my_notebook.grid()
devicestab = tk.Frame(my_notebook, width=800, height=600)
reportstab = tk.Frame(my_notebook, width=800, height=600)
devicestab.pack(fill='both', expand=1)
reportstab.pack(fill='both', expand=1)
my_notebook.add(devicestab, text="Devices")
my_notebook.add(reportstab, text="Reports")
#contents for devices tab
devicesleft = tk.LabelFrame(devicestab, text="Devices found: ", padx=5, pady=5, width=500, height=600, bg='darkseagreen3')
devicesleft.grid(row=0, column=0)
devicesright = tk.LabelFrame(devicestab, text="Activity Feed: ", padx=5, pady=5, width=300 , height=600, bg='darkseagreen3')
devicesright.grid(row=0, column=1)
#contents for reports tab
reportsleft = tk.LabelFrame(reportstab, text="Report Summaries: ", padx=5, pady=5, width=400 , height=600, bg='darkseagreen3')
reportsleft.grid(row=0, column=0)
reportsright= tk.LabelFrame(reportstab, text="Charts and Diagrams: ", padx=5, pady=5, width=400 , height=600, bg='darkseagreen3')
reportsright.grid(row=0, column=1)
#threads so that two different processes can go at the same time
def backthread():
network_scanner.netting()
def forethread():
app = Pages()
app.mainloop()
scanthread = threading.Thread(target=backthread)
guithread = threading.Thread(target=forethread)
guithread.start()
|
{"/sniffing.py": ["/DBconn.py"], "/scannetgui.py": ["/sniffing.py", "/DBconn.py", "/isMalSite.py", "/network_scanner.py"]}
|
29,239,099
|
blink07/shop_admin
|
refs/heads/main
|
/apps/goods/serializers.py
|
from rest_framework.serializers import ModelSerializer
from goods.models import GoodsCategory
class GoodsCategorySerializer2(ModelSerializer):
class Meta:
model = GoodsCategory
fields = "__all__"
class GoodsCategorySerializer1(ModelSerializer):
children = GoodsCategorySerializer2(many=True)
class Meta:
model = GoodsCategory
fields = "__all__"
class GoodsCategorySerializer(ModelSerializer):
children = GoodsCategorySerializer1(many=True)
class Meta:
model = GoodsCategory
fields = "__all__"
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,100
|
blink07/shop_admin
|
refs/heads/main
|
/apps/user_manage/serializers.py
|
import re
from rest_framework import serializers
from django.contrib.auth import get_user_model
from rest_framework.validators import UniqueValidator
from adminDemo.settings import REGEX_MOBILE
from user_manage.models import SysUser, Role, PermissionRole
User = get_user_model()
class PermissionRoleSerializer(serializers.ModelSerializer):
class Meta:
model = PermissionRole
fields = "__all__"
class RoleSerializer(serializers.ModelSerializer):
# children = PermissionRoleSerializer
class Meta:
model = Role
fields = "__all__"
class UserSerializer(serializers.ModelSerializer):
role = RoleSerializer()
"""
用于查看用户信息
"""
class Meta:
model = SysUser
fields = ('id','username', 'email', "mobile", "role", "is_active", "state")
class UserRegSerializer(serializers.ModelSerializer):
"""
用户注册
在序列化类中重新声明的字段需要在Meta的fields中列出
"""
username = serializers.CharField(label="用户名", required=True, help_text="用户名", allow_blank=False,
validators=[UniqueValidator(queryset=User.objects.all(), message='用户已存在')])
# 输入密码的时候不显示明文
password = serializers.CharField(style={'input_type': 'password'}, label=True, write_only=True)
# 已使用信号量实现
# def create(self, validated_data):
# """
# 重写create方法,将password加密保存
# :param validated_data:
# :return:
# """
# user = super(UserRegSerializer, self).create(validated_data=validated_data)
# user.set_password(validated_data["password"])
# user.save()
# return user
def validate(self, attrs):
# attrs["mobile"] = attrs["username"] # 当username符合手机号码匹配规则时,将用户名赋值给电话号码
if re.match(REGEX_MOBILE, attrs["username"]):
attrs["mobile"] = attrs["username"]
return attrs
class Meta:
model = User
fields = ('username', 'mobile', 'email', 'password')
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,101
|
blink07/shop_admin
|
refs/heads/main
|
/apps/dashboard/consumers.py
|
import json
import time
# from asgiref.sync import async_to_sync
from asgiref.sync import async_to_sync
from channels.generic.websocket import AsyncWebsocketConsumer
from channels.layers import get_channel_layer
class ChatConsumer(AsyncWebsocketConsumer):
async def connect(self):
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'shop_%s' % self.room_name
# Join room group
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
# Leave room group
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket
async def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
print("aaaa:", self.room_group_name)
print(self.groups)
print("message:",message)
# Send message to room group
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'shop_message', # 和下面的消息处理函数要一致
'message': message
}
)
# time.sleep(3)
# Receive message from room group
async def shop_message(self, event):
message = event['message']
# Send message to WebSocket
await self.send(text_data=json.dumps({
'message': message
}))
def send_group_msg(room_name, message):
# 从Channels的外部发送消息给Channel
# 用户外部接口调用
# 推送流程:Django View -> 逻辑操作,保存数据到数据库 ->将消息发送到channel对应的group -> websocket将消息推送至接收方
"""
from dashboard import consumers
consumers.send_group_msg('ITNest', {'content': '这台机器硬盘故障了', 'level': 1})
consumers.send_group_msg('ITNest', {'content': '正在安装系统', 'level': 2})
consumers.send_group_msg('AAA', {'message': '登录成功', 'level': 2})
:param room_name:
:param message:
:return:
"""
# print("room_name, message",room_name, message)
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)(
'shop_{}'.format(room_name), # 构造Channels组名称
{
"type": "shop_message",
"message": message,
}
)
# consumer = ChatConsumer()
# consumer.channel_layer.group_send()
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,102
|
blink07/shop_admin
|
refs/heads/main
|
/utils/response.py
|
from rest_framework.response import Response
from utils.error import SUCCESS
def response(data=None, error=SUCCESS, **kwargs):
return Response(data={"payload":data, "message":error.message, "code":error.status_code})
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,103
|
blink07/shop_admin
|
refs/heads/main
|
/utils/success.py
|
from utils import base
class LOGIN_SUCCESS(base.OK200):
message = u'登录成功~~'
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,104
|
blink07/shop_admin
|
refs/heads/main
|
/apps/user_manage/models.py
|
import datetime
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
from menu.models import Permission
class Role(models.Model):
"""
角色表
"""
role_no = models.IntegerField("角色编号",blank=False, null=False, default=3)
role_name = models.CharField("角色名称",max_length=20, blank=False, null=False, default="访问角色")
role_descripte = models.CharField('角色描述', max_length=100, blank=False, null=False, default='游客')
status = models.IntegerField("状态", blank=False, null=False, default=1)
show_no = models.IntegerField("角色显示顺序", blank=False, null=False, default=100)
create_time = models.DateField("创建时间", blank=False, null=False, default=datetime.datetime.now)
class Meta:
db_table = "user_role"
verbose_name = "用户角色"
verbose_name_plural=verbose_name
# def __str__(self):
# return self.role_name
class PermissionRole(models.Model):
"""
权限-角色表
"""
per_name = models.CharField('权限名称', max_length=255, blank=False, null=False)
path = models.CharField('权限路径', max_length=255, blank=False, null=False)
level = models.IntegerField('权限级别', blank=False, null=False, default=1)
role = models.ForeignKey(Role, on_delete=models.CASCADE, related_name='permissions',blank=False, null=False, verbose_name="权限必须分配给角色")
children = models.ForeignKey('self', on_delete=models.CASCADE,related_name='permission_children',blank=True, null=True)
permission = models.ForeignKey(Permission, on_delete=models.CASCADE, related_name='per_permission', blank=False, null=False, default=1)
class Meta:
db_table='permission_role'
verbose_name = "权限角色列表"
verbose_name_plural = verbose_name
# def __str__(self):
# return self.per_name
class Department(models.Model):
"""
部门表
"""
dept_no = models.IntegerField("部门编号",blank=False, null=False)
dept_name = models.CharField("部门名称", max_length=100, blank=False, null=False, default='')
charge_person = models.CharField("负责人", max_length=10, blank=False, null=False, default= '')
email = models.EmailField("邮箱地址", blank=True)
show_no = models.IntegerField("显示排序",blank=True, null=False,default=10)
tel = models.CharField("联系电话", max_length=11, blank=True, null=False, default='')
status = models.IntegerField("部门状态", blank=True)
parent_comment = models.ForeignKey('self', on_delete=models.CASCADE, verbose_name="上级部门", blank=True, null=True)
class Meta:
db_table = 'department'
verbose_name = '部门信息'
verbose_name_plural = verbose_name
def __str__(self):
return self.dept_name
class SysUser(AbstractUser):
"""
用户表
"""
GENDER_CHOICE = (
("male", u"男"),
("female", u"女")
)
mobile = models.CharField("手机号码", max_length=11, blank=False, null=False)
gender = models.CharField("性别", max_length=6, choices=GENDER_CHOICE, default="male")
role = models.ForeignKey(Role, on_delete=models.CASCADE, blank=False, null=False, default=1)
avatar = models.CharField("头像", max_length=255, blank=True, null=True)
dept = models.ForeignKey(Department, on_delete=models.CASCADE, blank=True, null=True)
state = models.BooleanField(default=True, blank=True, null=True)
# address = models.CharField("地址", max_length=255, blank=True, null=True) # 还未同步到数据库
class Meta:
db_table = 'sys_user'
verbose_name = '系统用户'
verbose_name_plural = verbose_name
def __str__(self):
return self.username
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,105
|
blink07/shop_admin
|
refs/heads/main
|
/apps/goods/urls.py
|
from django.conf.urls import url
from .views import *
urlpatterns = [
url(r'categoryList', CategoryList.as_view({'get':'list'}))
]
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,106
|
blink07/shop_admin
|
refs/heads/main
|
/apps/user_manage/urls.py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^testSentry$', views.TestSentry.as_view(),name="sentry测试"),
url(r'^register/$', views.UserRegisterView.as_view({'post': 'create'}), name="自定义用户注册"),
url(r'^userinfo/(?P<pk>\d+)$', views.UserRegisterView.as_view({'get': 'retrieve', 'put': 'update', 'delete': 'destroy'}), name="用户详细信息"),
url(r'^statechange/(?P<pk>\d+)/$', views.ChangeUserState.as_view(), name="改变用户状态"),
# 角色模块
url(r'^roleperlist/$', views.RolePermissionListView.as_view(), name="获取角色列表"),
url(r'^roles/(?P<pk>\d+)/rights/(?P<pk1>\d+)$', views.RolePermissionListView.as_view(), name="删除角色的权限"),
url(r'^(?P<pk>\d+)/rights/$', views.PermissionDistribution.as_view(), name="给角色添加权限"),
url(r'roleList/$', views.RoleView.as_view({'get':'list'}), name="角色列表" ),
url(r'changeRole/(?P<pk>\d+)/role/(?P<pk1>\d+)/$', views.ChangeUserRole.as_view(), name="更新用户角色")
]
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,107
|
blink07/shop_admin
|
refs/heads/main
|
/apps/user_manage/migrations/0001_initial.py
|
# Generated by Django 3.0.5 on 2020-07-04 09:25
import datetime
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='Role',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('role_no', models.IntegerField(default=3, verbose_name='角色编号')),
('role_name', models.CharField(default='访问角色', max_length=20, verbose_name='角色名称')),
('role_descripte', models.CharField(default='游客', max_length=100, verbose_name='角色描述')),
('status', models.IntegerField(default=1, verbose_name='状态')),
('show_no', models.IntegerField(default=100, verbose_name='角色显示顺序')),
('create_time', models.DateField(default=datetime.datetime.now, verbose_name='创建时间')),
],
options={
'verbose_name': '用户角色',
'verbose_name_plural': '用户角色',
'db_table': 'user_role',
},
),
migrations.CreateModel(
name='PermissionRole',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('per_name', models.CharField(max_length=255, verbose_name='权限名称')),
('path', models.CharField(blank=True, max_length=255, verbose_name='权限路径')),
('level', models.IntegerField(default=1, verbose_name='权限级别')),
('children', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='user_manage.PermissionRole')),
('role', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user_manage.Role', verbose_name='权限必须分配给角色')),
],
options={
'verbose_name': '权限角色列表',
'verbose_name_plural': '权限角色列表',
'db_table': 'permission_role',
},
),
migrations.CreateModel(
name='Permission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('per_name', models.CharField(max_length=255, verbose_name='权限名称')),
('path', models.CharField(blank=True, max_length=255, null=True, verbose_name='权限路径')),
('level', models.IntegerField(default=1, verbose_name='权限级别')),
('children', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='user_manage.Permission')),
],
options={
'verbose_name': '权限基础信息列表',
'verbose_name_plural': '权限基础信息列表',
'db_table': 'permission',
},
),
migrations.CreateModel(
name='Department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dept_no', models.IntegerField(verbose_name='部门编号')),
('dept_name', models.CharField(default='', max_length=100, verbose_name='部门名称')),
('charge_person', models.CharField(default='', max_length=10, verbose_name='负责人')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='邮箱地址')),
('show_no', models.IntegerField(blank=True, default=10, verbose_name='显示排序')),
('tel', models.CharField(blank=True, default='', max_length=11, verbose_name='联系电话')),
('status', models.IntegerField(blank=True, verbose_name='部门状态')),
('parent_comment', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='user_manage.Department', verbose_name='上级部门')),
],
options={
'verbose_name': '部门信息',
'verbose_name_plural': '部门信息',
'db_table': 'department',
},
),
migrations.CreateModel(
name='SysUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('mobile', models.CharField(max_length=11, verbose_name='手机号码')),
('gender', models.CharField(choices=[('male', '男'), ('female', '女')], default='male', max_length=6, verbose_name='性别')),
('avatar', models.CharField(blank=True, max_length=255, null=True, verbose_name='头像')),
('state', models.BooleanField(blank=True, default=True, null=True)),
('dept', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='user_manage.Department')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('role', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='user_manage.Role')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': '系统用户',
'verbose_name_plural': '系统用户',
'db_table': 'sys_user',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,108
|
blink07/shop_admin
|
refs/heads/main
|
/utils/base.py
|
# -*- coding: utf-8 -*-
from abc import ABCMeta
class BaseReturn(Exception):
__metaclass__ = ABCMeta
# 1XX Informational
class Continue100(BaseReturn):
status_code = 100
class SwitchingProtocols101(BaseReturn):
status_code = 101
class Processing102(BaseReturn):
status_code = 102
# 2XX Success
class OK200(BaseReturn):
status_code = 200
class Created201(BaseReturn):
status_code = 201
# 4XX Client Error
class BadRequest400(BaseReturn):
status_code = 400
class Unauthorized401(BaseReturn):
status_code = 401
class PaymentRequired402(BaseReturn):
status_code = 402
class Forbidden403(BaseReturn):
status_code = 403
class NotFound404(BaseReturn):
status_code = 404
class MethodNotAllowed405(BaseReturn):
status_code = 405
class PermissionDenied406(BaseReturn):
status_code = 406
class RequestTimeout408(BaseReturn):
status_code = 408
# 5XX Server Error
class InternalServerError500(BaseReturn):
status_code = 500
class CodeError555(BaseReturn):
status_code = 555
class NotImplemented501(BaseReturn):
status_code = 501
class BadGateway502(BaseReturn):
status_code = 502
class ServiceUnavailable503(BaseReturn):
status_code = 503
class GatewayTimeout504(BaseReturn):
status_code = 504
class HTTPVersionNotSupported505(BaseReturn):
status_code = 505
class VariantAlsoNegotiates506(BaseReturn):
status_code = 506
class InsufficientStorage507(BaseReturn):
status_code = 507
class LoopDetected508(BaseReturn):
status_code = 508
class NotExtended510(BaseReturn):
status_code = 510
class NetworkAuthenticationRequired511(BaseReturn):
status_code = 511
class NetworkConnectTimeoutError599(BaseReturn):
status_code = 599
class SocketProcessError520(BaseReturn):
status_code = 520
class ValidationError512(BaseReturn):
status_code = 521
class UserOrPasswordError(BaseReturn):
status_code=600
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,109
|
blink07/shop_admin
|
refs/heads/main
|
/utils/exception_handlers.py
|
# from rest_framework.views import exception_handler
import traceback
from django.core.exceptions import PermissionDenied
# from rest_framework.compat import set_rollback
from rest_framework.exceptions import (AuthenticationFailed, MethodNotAllowed, NotAuthenticated,
PermissionDenied as RestPermissionDenied,
ValidationError,NotFound)
from rest_framework.views import set_rollback
from utils.error import ERROR_PermissionDenied, ERROR_ValidationError, ERROR_AuthenticationFailed, ERROR_FAULT, \
ERROR_NotFound, SUCCESS, ERROR_MethodNotAllowed
from utils.response import response
def exception_handler(exc, content):
error = SUCCESS
if isinstance(exc, (NotAuthenticated, AuthenticationFailed)):
# return Response(data, status=status.HTTP_403_FORBIDDEN)
error = ERROR_AuthenticationFailed
if isinstance(exc, PermissionDenied) or isinstance(exc, RestPermissionDenied):
# message = exc.detail if hasattr(exc, 'detail') else u'该用户没有该权限功能'
error = ERROR_PermissionDenied
error.message = exc.detail if hasattr(exc, 'detail') else u'该用户没有该权限功能'
# return Response(data, status=status.HTTP_403_FORBIDDEN)
else:
if isinstance(exc, ValidationError):
# message = exc.detail if hasattr(exc, 'detail') else u'参数校验失败'
error = ERROR_ValidationError
error.message = exc.detail if hasattr(exc, 'detail') else u'参数校验失败'
elif isinstance(exc, MethodNotAllowed):
error = ERROR_MethodNotAllowed
error.message = exc.detail if hasattr(exc, 'detail') else u'请求方法不被允许'
# elif isinstance(exc, Http404,HttpResponseNotFound):
elif isinstance(exc, NotFound):
# 这个好像捕捉不到,待考证
# print(404)
# # 更改返回的状态为为自定义错误类型的状态码
error = ERROR_NotFound
else:
# 调试模式
# logger.error(traceback.format_exc())
# print traceback.format_exc()
# if settings.RUN_MODE != 'PRODUCT':
# raise exc
print(exc)
error = ERROR_FAULT
set_rollback()
return response(error=error)
def custom_exception_handler(exc, context):
response = exception_handler(exc, context)
return response
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,110
|
blink07/shop_admin
|
refs/heads/main
|
/apps/goods/models.py
|
from datetime import datetime
from django.db import models
# Create your models here.
class GoodsCategory(models.Model):
"""
商品类别表
"""
CATEGORY_TYPE = (
(1, "一级类目"),
(2, "二级类目"),
(3, "三级类目"),
)
cate_name = models.CharField('类别名称',max_length=100, blank=False, null=False)
category_type = models.SmallIntegerField('商品分类级别', choices=CATEGORY_TYPE,blank=False, null=False)
parent_category = models.ForeignKey('self',on_delete=models.CASCADE, related_name='children', blank=True, null=True, help_text='父级类目')
is_active = models.BooleanField(default=True)
add_time = models.DateTimeField(default=datetime.now(), help_text='添加时间')
class Meta:
db_table = 'goods_category'
verbose_name = "商品分类表"
verbose_name_plural = verbose_name
def __str__(self):
return self.cate_name
class Goods(models.Model):
"""
商品表
"""
name = models.CharField('商品名称',max_length=100, blank=False, null=False)
goods_sn = models.CharField('商品唯一编号', max_length=50, blank=False, null=False)
click_num = models.IntegerField("点击数", default=0)
sold_num = models.IntegerField("商品销售量", default=0)
fav_num = models.IntegerField("收藏数", default=0)
goods_num = models.IntegerField("库存数", default=0)
market_price = models.DecimalField("市场价格", default=0, decimal_places=3,max_digits=11)
shop_price = models.DecimalField("本店价格", default=0, decimal_places=3,max_digits=11)
descripte = models.CharField('商品描述', max_length=255, blank=True, null=True)
goods_front_image = models.CharField(max_length=40, null=True, blank=True, verbose_name="封面图")
is_hot = models.BooleanField("是否热销", default=False, help_text='是否热销')
add_time = models.DateTimeField("添加时间", default=datetime.now)
category = models.ForeignKey(GoodsCategory, on_delete=models.CASCADE, verbose_name="商品类目")
class Meta:
verbose_name = '商品信息'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,111
|
blink07/shop_admin
|
refs/heads/main
|
/adminDemo/urls.py
|
"""adminDemo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url
from django.contrib import admin
from django.urls import path,include
from django.views.static import serve
from drf_yasg import openapi
from drf_yasg.views import get_schema_view
from django.conf.urls.static import static
from rest_framework import permissions
from rest_framework_jwt.views import obtain_jwt_token
import goods.urls
from user_manage.views import GetCodeInfo, UserList, CustomResponseObtainJSONWebToken
# from rest_framework.authtoken import views
# from user_manage import urls
# from menu import urls
import user_manage.urls
import menu.urls
from drf_yasg.views import get_schema_view
schema_view = get_schema_view(
openapi.Info(
title="Shop API",
default_version='v1',
description="Test description",
terms_of_service="https://www.blink07.com/policies/terms/",
contact=openapi.Contact(email="2954538230@qq.com"),
license=openapi.License(name="BSD License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
path('admin/', admin.site.urls),
# swagger接口文档路由
url(r'^docs/', schema_view, name="docs"),
url(r'getcodeinfo/$', GetCodeInfo.as_view()),
# url(r'login/', views.obtain_auth_token),
# path('o/', include('oauth2_provider.urls', namespace='oauth2_provider')),
path('users/', UserList.as_view({'get': 'list'})),
url(r'user/', include(user_manage.urls)),
# jwt的认证接口, 自定义登录
path('login/', CustomResponseObtainJSONWebToken.as_view()), # 写重写jwt认证类
url(r'menu/', include(menu.urls)),
url(r'goods/', include(goods.urls)),
# swagger
url(r'^swagger(?P<format>\.json|\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'),
url(r'^swagger/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
url(r'^redoc/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
path('chat/', include('dashboard.urls')),
] + static(settings.STATIC_URL, serve, document_root = settings.STATIC_ROOT)
# handler404 = Response(data={}, status=status.HTTP_404_NOT_FOUND,)
# urlpatterns += [
#
# ]
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,112
|
blink07/shop_admin
|
refs/heads/main
|
/tests/test_sentry.py
|
import sentry_sdk
sdn = "http://7596466671c94dbfaa436bbb3fea63b1@192.168.154.130:9000/2"
sentry_sdk.init("http://7596466671c94dbfaa436bbb3fea63b1@192.168.154.130:9000/2")
division_by_zero = 1 / 0
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,113
|
blink07/shop_admin
|
refs/heads/main
|
/apps/menu/urls.py
|
from django.conf.urls import url
from .views import *
urlpatterns = [
url(r'^menus$', MenuViewSet.as_view({'get':'list', 'post':'create'}),name="获取左侧菜单导航栏"),
# url(r'^rights/list$', RightsViewSet.as_view({'get':'list'}),name="获取权限列表"),
url(r'^rights$', RightsViewSet.as_view({'get':'list'}),name="获取权限列表"),
]
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,114
|
blink07/shop_admin
|
refs/heads/main
|
/apps/user_manage/filters.py
|
import django_filters
from .models import SysUser
# class UserFilter(django_filters.rest_framework.FilterSet):
# role = django_filters.CharFilter(name="role__name")
#
# class Meta:
# model = SysUser
# fields = ['username', 'role']
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,115
|
blink07/shop_admin
|
refs/heads/main
|
/utils/mixins.py
|
"""
Basic building blocks for generic class based views.
We don't bind behaviour to http method handlers yet,
which allows mixin classes to be composed in interesting ways.
"""
from __future__ import unicode_literals
# from rest_framework.mixins import CreateModelMixin
from rest_framework.response import Response
from .base import OK200
import pysnooper
def response_success(**kwargs):
if kwargs.get("message", None):
# return Response({"payload": kwargs["data"], "status": kwargs["status"], "message": kwargs["message"]})
return Response({"payload": kwargs["data"], "status": OK200.status_code, "message": kwargs["message"]})
else:
return Response({"payload": kwargs["data"], "status": OK200.status_code})
def response_error(errmsg):
return Response({"status": 0, "message": errmsg})
class CreateModelMixin:
"""
Create a model instance.
"""
@pysnooper.snoop()
def create(self, request, *args, **kwargs):
try:
print(request.data)
serializer = self.get_serializer(data=request.data)
print("serializer:>>>>>>>>>>>>>>>>>>>>",serializer)
serializer.is_valid(raise_exception=True)
print(serializer)
self.perform_create(serializer)
# headers = self.get_success_headers(serializer.data)
return response_success(data=serializer.data, message="提交成功")
except Exception as e:
return response_error(str(e))
def perform_create(self, serializer):
serializer.save()
class ListModelMixin:
"""
List a queryset.
"""
def list(self, request, *args, **kwargs):
# try:
queryset = self.filter_queryset(self.get_queryset())
# print(queryset)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
# return Response(serializer.data)
# print(serializer.data)
return response_success(data=serializer.data)
# except Exception as e:
# return response_error(str(e))
class RetrieveModelMixin:
"""
Retrieve a model instance.
"""
def retrieve(self, request, *args, **kwargs):
try:
instance = self.get_object()
serializer = self.get_serializer(instance)
return response_success(data=serializer.data)
except Exception as e:
return response_error(str(e))
class UpdateModelMixin:
"""
Update a model instance.
"""
# @pysnooper.snoop
def update(self, request,*args, **kwargs):
try:
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
if getattr(instance, '_prefetched_objects_cache', None):
instance._prefetched_objects_cache = {}
return response_success(data=serializer.data, message="更新成功")
except Exception as e:
return response_error(str(e))
def perform_update(self, serializer):
serializer.save()
def partial_update(self, request, *args, **kwargs):
kwargs['partial'] = True
return self.update(request, *args, **kwargs)
class DestroyModelMixin(object):
"""
Destroy a model instance.
"""
def destroy(self, request, *args, **kwargs):
try:
instance = self.get_object()
self.perform_destroy(instance)
return response_success(data="",message="删除成功")
except Exception as e:
return response_error(str(e))
def perform_destroy(self, instance):
instance.delete()
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,116
|
blink07/shop_admin
|
refs/heads/main
|
/apps/user_manage/views.py
|
from datetime import datetime
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
from django.db.models import Q
from django_filters.rest_framework import DjangoFilterBackend
from django.shortcuts import render
# Create your views here.
from oauth2_provider.contrib.rest_framework import TokenHasReadWriteScope
from rest_framework import generics, status, viewsets, filters
from rest_framework import permissions
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.views import APIView
from rest_framework.viewsets import GenericViewSet
from rest_framework_jwt.settings import api_settings
from rest_framework_jwt.views import ObtainJSONWebToken, jwt_response_payload_handler
from common.pagination import StandardResultsSetPagination
from dashboard.consumers import send_group_msg
from menu.models import Permission
from user_manage.serializers import UserSerializer, UserRegSerializer, RoleSerializer
from utils.error import ERROR_USER_RALATION
# from .filters import UserFilter
from .models import Role, PermissionRole
from .utils import Captcha
from utils.mixins import *
User = get_user_model()
class GetCodeInfo(APIView):
"""
获取图片验证码
"""
def get(self, request):
cap = Captcha(request)
code = cap.getVerificationCode()
print(code)
return response_success(data={"code": code})
class UserList(ListModelMixin, viewsets.GenericViewSet):
"""
获取用户列表
"""
# permission_classes = [permissions.IsAuthenticated, TokenHasReadWriteScope]
queryset = User.objects.all()
serializer_class = UserSerializer
pagination_class = StandardResultsSetPagination
filter_backends = (filters.SearchFilter,)
search_fields = ('username', 'email', 'role__role_name')
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return response_success(data=serializer.data)
class ChangeUserState(APIView):
def put(self, request, pk):
data = request.data
obj = User.objects.get(id=pk)
obj.state = data["state"]
obj.save()
return response_success(data="", message="更新成功~")
class ChangeUserRole(APIView):
def put(self, request, pk, pk1):
user_obj = User.objects.get(id=pk)
role_obj = Role.objects.get(id=pk1)
user_obj.role = role_obj
user_obj.save()
return response_success(data="", message='更新角色成功')
class CustomBackend(ModelBackend):
"""
用户自定义登录验证,在发送请求时将username的值换成电话号码即可:
ex:
{
"username":"15070070520",
"password":"admin123"
}
"""
def authenticate(self, request, username=None, password=None, **kwargs):
try:
user = User.objects.get(Q(username=username) | Q(mobile=username))
if user.check_password(password):
return user
except Exception as e:
return None
class CustomResponseObtainJSONWebToken(ObtainJSONWebToken):
"""
自定义登录失败时返回类型
"""
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
user = serializer.object.get('user') or request.user
token = serializer.object.get('token')
response_data = jwt_response_payload_handler(token, user, request)
response = Response(response_data)
send_group_msg("aaa", {"message":"登录成功"})
if api_settings.JWT_AUTH_COOKIE:
expiration = (datetime.utcnow() +
api_settings.JWT_EXPIRATION_DELTA)
response.set_cookie(api_settings.JWT_AUTH_COOKIE,
token,
expires=expiration,
httponly=True)
return response
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
return Response({"code": ERROR_USER_RALATION.status_code, "message": ERROR_USER_RALATION.message})
class UserRegisterView(CreateModelMixin, RetrieveModelMixin, UpdateModelMixin,DestroyModelMixin,GenericViewSet):
# authentication_classes = ()
# permission_classes = ()
"""
自定义用户注册
"""
# serializer_class = UserRegSerializer
queryset = User.objects.all()
def get_serializer_class(self):
if self.action=='create':
return UserRegSerializer
else:
return UserSerializer
def update(self, request,*args, **kwargs):
# self.dispatch()
data = request.data
instance = self.get_object()
instance.email = data["email"]
instance.mobile = data["mobile"]
instance.save()
return response_success(data="", message="更新成功~~")
class RolePermissionListView(APIView):
"""
获取角色列表
第一层为角色信息
第二层开始为权限信息,权限一共为3层
第三层没有children属性
"""
def get(self, request):
list = []
roles = Role.objects.all()
for role in roles:
role_dict = {}
permissions_ = role.permissions.all()
role_dict["id"] = role.id
role_dict["role_name"] = role.role_name
role_dict["role_descripte"] = role.role_descripte
role_dict["children"] = []
if permissions_:
permissions = permissions_.filter(level=1).all()
for permission in permissions:
per_dict = self.handler(permission, 1)
permissions_2_ = permission.permission_children.all()
if permissions_2_:
permissions_2 = permissions_2_.filter(level=2).all()
for per_2 in permissions_2:
per_2_dict = self.handler(per_2, 1)
permissions_3_ = per_2.permission_children.all()
if permissions_3_:
permissions_3 = permissions_3_.filter(level=3).all()
for per_3 in permissions_3:
per_3_dict = self.handler(per_3, 0)
per_2_dict["children"].append(per_3_dict)
per_dict["children"].append(per_2_dict)
role_dict["children"].append(per_dict)
list.append(role_dict)
return response_success(data=list)
def handler(self, obj, flag):
tmp_dict = {}
tmp_dict["id"] = obj.id
tmp_dict["per_name"] = obj.per_name
tmp_dict["path"] = obj.path
tmp_dict["permission_id"] = obj.permission.id
if flag:
tmp_dict["children"] = []
else:
pass
return tmp_dict
def delete(self, request, pk,pk1):
"""
TODO 当有高级别权限时,该高级别权限下一定要有低级别权限,否则该高级别权限页不应该存在
:param request:
:param pk: 角色的ID
:param pk1: 权限的ID
:return:
"""
# 1.删除权限外键id为pk1,角色外键为pk
# 2.获取删除后重新加载角色权限
PermissionRole.objects.get(role=pk, id=pk1).delete()
role = Role.objects.get(id=pk)
permissions_ = role.permissions.all()
data = []
if permissions_:
permissions = permissions_.filter(level=1).all()
for permission in permissions:
per_dict = self.handler(permission, 1)
permissions_2_ = permission.permission_children.all()
if permissions_2_:
permissions_2 = permissions_2_.filter(level=2).all()
for per_2 in permissions_2:
per_2_dict = self.handler(per_2, 1)
permissions_3_ = per_2.permission_children.all()
if permissions_3_:
permissions_3 = permissions_3_.filter(level=3).all()
for per_3 in permissions_3:
per_3_dict = self.handler(per_3, 0)
per_2_dict["children"].append(per_3_dict)
per_dict["children"].append(per_2_dict)
data.append(per_dict)
return response_success(data={})
class PermissionDistribution(APIView):
"""
给角色分配权限
"""
def post(self, request,pk):
"""
:param request:
:param pk:
:return:
"""
permission_keys = request.data
permission_keys_sort = sorted(permission_keys['keys'])
PermissionRole.objects.filter(role=pk).delete()
role = Role.objects.get(id=pk)
level_3 = []
for key in permission_keys_sort:
permission = Permission.objects.get(id=key)
if permission.level == 1:
PermissionRole.objects.create(per_name=permission.per_name, level=1, role=role, permission=permission)
elif permission.level == 2:
parent_id = permission.children.id
per_role_id = PermissionRole.objects.get(role=pk, permission=parent_id)
PermissionRole.objects.create(per_name=permission.per_name, level=permission.level, role=role, children=per_role_id, permission=permission)
else:
level_3.append(permission.id)
for i in level_3:
permission = Permission.objects.get(id=i)
parent_id = permission.children.id
per_role_id = PermissionRole.objects.get(role=pk, permission=parent_id)
PermissionRole.objects.create(per_name=permission.per_name, level=permission.level, role=role,
children=per_role_id, permission=permission)
return response_success(data={})
class RoleView(ListModelMixin, GenericViewSet):
queryset = Role.objects.all()
serializer_class = RoleSerializer
class TestSentry(APIView):
authentication_classes = ()
permission_classes = ()
"""
测试sentry
"""
def get(self, request):
a = [1, 2, 3]
for i in range(4):
print(a[i])
return response_success(data={"code": 1})
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,117
|
blink07/shop_admin
|
refs/heads/main
|
/apps/menu/migrations/0001_initial.py
|
# Generated by Django 3.0.5 on 2020-07-04 09:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Menu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('authName', models.CharField(max_length=40, verbose_name='菜单名')),
('path', models.CharField(blank=True, default='', max_length=255, null=True, verbose_name='路径')),
('menu_type', models.IntegerField(choices=[(1, '一级类目'), (2, '二级类目')], default=1, help_text='类目级别')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='sub_cat', to='menu.Menu')),
],
options={
'verbose_name': '导航菜单表',
'verbose_name_plural': '导航菜单表',
},
),
]
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,118
|
blink07/shop_admin
|
refs/heads/main
|
/adminDemo/settings/com_settings.py
|
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import sys
from datetime import datetime as d
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, os.path.join(BASE_DIR, 'apps'))
ALLOWED_HOSTS = ["*"]
APPEND_SLASH=False
# Quick-start development settingss - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+p@6^55xm1&pu+g&7hv8r&&^rzg80m8&_m*d6!ry=e^(h(6rwv'
# 将Django默认用户表改为SysUser
AUTH_USER_MODEL = 'user_manage.SysUser'
# Application definition
INSTALLED_APPS = [
'channels',
'dashboard',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'user_manage',
# 'oauth2_provider', # 使用oauth2鉴权登录
'corsheaders',
'menu',
'goods',
'drf_yasg',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware', # 配合CORS_ORIGIN_ALLOW_ALL = True
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'apps.middlewares.exception_middlewares.ExceptionMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'adminDemo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# WSGI_APPLICATION = 'adminDemo.wsgi.application'
ASGI_APPLICATION = 'adminDemo.asgi.application'
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
"hosts": [('192.168.154.134', 6379)],
},
},
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# APPEND_SLASH=False
# rest_framework settings
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
# 'oauth2_provider.contrib.rest_framework.OAuth2Authentication', # Oauth2认证
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework_jwt.authentication.JSONWebTokenAuthentication', # JWT认证
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
# 'DEFAULT_PAGINATION_CLASS': 'apps.common.pagination.StandardResultsSetPagination'
'EXCEPTION_HANDLER':'utils.exception_handlers.custom_exception_handler' # restframework 统一异常处理器,但是捕捉不到404异常, 是否可以重写CommonMiddleware中间件统一返回Response类型有待完善
}
# 自定义验证类
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.AllowAllUsersModelBackend',
'user_manage.views.CustomBackend',
]
import datetime
#有效期限
JWT_AUTH = {
'JWT_EXPIRATION_DELTA': datetime.timedelta(minutes=300), #也可以设置seconds=20
'JWT_AUTH_HEADER_PREFIX': 'JWT', #JWT跟前端保持一致,比如“token”这里设置成JWT
'JWT_RESPONSE_PAYLOAD_HANDLER':'utils.jwt_response_payload_handler.jwt_response_payload_handler'
}
# OAUTH2_PROVIDER = {
# # this is the list of available scopes
# 'SCOPES': {'read': 'Read scope', 'write': 'Write scope', 'groups': 'Access to your groups'}
# }
# 引入sentry作为系统监控
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
sentry_sdk.init(
dsn="http://19f9320561e1418cadefe269b12650e7@192.168.154.130:9000/3",
integrations=[DjangoIntegration()],
# If you wish to associate users to errors (assuming you are using
# django.contrib.auth) you may enable sending PII data.
send_default_pii=True
)
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
STATIC_URL = '/static/'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# 手机号匹配规则
REGEX_MOBILE = r'^1[358]\d{9}$|^147\d{8}$|^176\d{8}$'
# 邮箱匹配规则
REGEX_EMAIL = r'^[A-Za-z0-9.]+@[A-Za-z0-9.]+.com$'
# Django日志
BASE_LOG_DIR = os.path.join(BASE_DIR, "log")
if not os.path.exists(BASE_LOG_DIR):
os.mkdir(BASE_LOG_DIR)
LOGGING = {
'version': 1, # 保留字
'disable_existing_loggers': False, # 禁用已经存在的logger实例
# 日志文件的格式
'formatters': {
# 详细的日志格式
'standard': {
'format': '[%(asctime)s][%(threadName)s:%(thread)d][task_id:%(name)s][%(filename)s:%(lineno)d]'
'[%(levelname)s][%(message)s]'
},
# 简单的日志格式
'simple': {
'format': '[%(levelname)s][%(asctime)s][%(filename)s:%(lineno)d]%(message)s'
},
# 定义一个特殊的日志格式
'collect': {
'format': '%(message)s'
}
},
# 过滤器
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
# 处理器
'handlers': {
# 在终端打印
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'], # 只有在Django debug为True时才在屏幕打印日志
'class': 'logging.StreamHandler', #
'formatter': 'simple'
},
# 默认的
# 'default': {
# 'level': 'INFO',
# 'class': 'logging.handlers.RotatingFileHandler', # 保存到文件,自动切
# 'filename': os.path.join(BASE_LOG_DIR, "{}.log".format(datetime.now().date())), # 日志文件
# 'maxBytes': 1024 * 1024 * 50, # 日志大小 50M
# 'backupCount': -1, # 最多备份几个
# 'formatter': 'simple',
# 'encoding': 'utf-8',
# },
'default': {
'level': 'INFO',
'class': 'logging.handlers.TimedRotatingFileHandler', # 保存到文件,自动切
'filename': os.path.join(BASE_LOG_DIR, "admin.log"), # 日志文件
'when': 'midnight',
'interval': 1,
'backupCount': -1, # 最多备份几个
'atTime': d.now().time().replace(0, 0, 0), # 每天0时0分0秒进行翻转
'formatter': 'simple',
'encoding': 'utf-8',
},
},
'loggers': {
# 默认的logger应用如下配置
'': {
'handlers': ['default', 'console'], # 上线之后可以把'console'移除
'level': 'INFO',
'propagate': True, # 向不向更高级别的logger传递
},
},
}
# swagger
# swagger 地址需要过滤,不能按统一格式返回
SWAGGER_URL = ["/swagger/", "/redoc/", "/swagger.json", "/swagger.yaml"]
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,119
|
blink07/shop_admin
|
refs/heads/main
|
/apps/menu/views.py
|
from django.db.models import Q
from django.shortcuts import render
# Create your views here.
from rest_framework import viewsets
from menu.models import Menu, Permission
from menu.serializers import MenuSerializer, PermissionSerializer, \
PermissionSerializer3
from utils.mixins import ListModelMixin, CreateModelMixin, RetrieveModelMixin
class MenuViewSet(ListModelMixin,CreateModelMixin,viewsets.GenericViewSet):
"""
获取导航菜单栏
### Menu模块的新增菜单接口127.0.0.1:8001/menu/menus还有Bug,不能插入和取出时同时带有子健和子健别名
"""
queryset = Menu.objects.filter(menu_type=1).all()
serializer_class = MenuSerializer
# def get_queryset(self):
# return Menu.objects.filter(menu_type=1).all()
class RightsViewSet(ListModelMixin,viewsets.GenericViewSet):
"""
获取权限列表
"""
queryset = Permission.objects.all()
# serializer_class = PermissionSerializer2
def get_serializer_class(self):
# print(self.request.GET.get('type'))
type = self.request.GET.get('type', None)
if type=='list':
return PermissionSerializer3
else:
return PermissionSerializer
def get_queryset(self):
type = self.request.GET.get('type', None)
if type=='list':
return Permission.objects.all()
else:
return Permission.objects.filter(level=1)
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,120
|
blink07/shop_admin
|
refs/heads/main
|
/apps/menu/models.py
|
from django.db import models
# Create your models here.
class Menu(models.Model):
MENU_TYPE = (
(1, "一级类目"),
(2, "二级类目")
)
authName = models.CharField(max_length=40, blank=False, null=False, verbose_name="菜单名")
path = models.CharField(max_length=255, blank=True, null=True, default='',verbose_name='路径')
menu_type = models.IntegerField(choices=MENU_TYPE, default=1, help_text="类目级别")
parent = models.ForeignKey('self', on_delete=models.CASCADE, related_name="sub_cat", null=True, blank=True) # 这里一定要有一个related_name,不然会报错
class Meta:
verbose_name = '导航菜单表'
verbose_name_plural = verbose_name
def __str__(self):
return self.authName
# def validate_parent(self):
# pass
class Permission(models.Model):
"""
权限基础信息表
"""
per_name = models.CharField('权限名称', max_length=255, blank=False, null=False)
path = models.CharField('权限路径', max_length=255, blank=True, null=True)
level = models.IntegerField('权限级别', blank=False, null=False, default=1)
# role = models.ForeignKey(Role, on_delete=models.CASCADE, blank=False, null=False, name="不能没有隶属的权限")
children = models.ForeignKey('self', on_delete=models.CASCADE,related_name='sub_cat',blank=True, null=True)
class Meta:
db_table='permission'
verbose_name = "权限基础信息列表"
verbose_name_plural = verbose_name
def __str__(self):
return self.per_name
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,121
|
blink07/shop_admin
|
refs/heads/main
|
/adminDemo/settings/__init__.py
|
import platform
from .com_settings import *
if platform.node()=="server_pro":
from .pro_settings import *
else:
from .dev_settings import *
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,122
|
blink07/shop_admin
|
refs/heads/main
|
/apps/menu/serializers.py
|
from rest_framework.serializers import ModelSerializer, Serializer
from menu.models import Menu, Permission
class MenuSerializer2(ModelSerializer):
class Meta:
model = Menu
fields = "__all__"
class MenuSerializer(ModelSerializer):
sub_cat = MenuSerializer2(many=True)
# sub_cat = Menu.objects.filter(parent=)
class Meta:
model = Menu
fields = "__all__"
# def _validated_data(self, data):
#
# # return Menu.objects.get()
# print("data:>>>>>>>>>>>>>>>>",data)
#
# return True
# def create(self, validated_data):
# # validated_data.pop("sub_cat")
# # parent = Menu.objects.get(id=validated_data["parent"])
# return Menu(**validated_data)
class PermissionSerializer3(ModelSerializer):
class Meta:
model = Permission
fields = "__all__"
class PermissionSerializer2(ModelSerializer):
sub_cat = PermissionSerializer3(many=True)
class Meta:
model = Permission
fields = "__all__"
class PermissionSerializer(ModelSerializer):
sub_cat = PermissionSerializer2(many=True)
class Meta:
model = Permission
fields = "__all__"
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,123
|
blink07/shop_admin
|
refs/heads/main
|
/utils/jwt_response_payload_handler.py
|
from utils.success import LOGIN_SUCCESS
def jwt_response_payload_handler(token, user=None, request=None):
"""
自定义jwt认证成功返回数据
:param token:
:param user:
:param request:
:return:
"""
return {
"token":token,
"user_id":user.id,
"username":user.username,
# "department":user.department.depart_name,
# "position":user.position.name,
"code":LOGIN_SUCCESS.status_code,
"message":LOGIN_SUCCESS.message
}
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,124
|
blink07/shop_admin
|
refs/heads/main
|
/utils/error.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import base
class SUCCESS(base.OK200):
message = u"操作成功~~"
class ERROR_FAULT(base.ServiceUnavailable503):
message = u"服务器内部错误~~"
class ERROR_COMMON(base.InternalServerError500):
message = u'未知异常~~'
class CODE_ERROR_COMMON(base.CodeError555):
message = u'未知异常~~'
class ERROR_USER_RALATION(base.UserOrPasswordError):
message = u'用户名或密码错误~~'
class ERROR_SOCKET(base.UserOrPasswordError):
message = u'socket通讯异常~~'
class ERROR_AuthenticationFailed(base.Unauthorized401):
message = u'用户未登录或登录态失效,请使用登录链接重新登录'
class ERROR_PermissionDenied(base.PermissionDenied406):
message = u'该用户没有该权限功能'
class ERROR_ValidationError(base.ValidationError512):
message = u'参数校验失败'
class ERROR_NotFound(base.NotFound404):
message = u'请求接口为找到'
class ERROR_MethodNotAllowed(base.MethodNotAllowed405):
message = u'该请求未被允许'
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,125
|
blink07/shop_admin
|
refs/heads/main
|
/apps/goods/migrations/0001_initial.py
|
# Generated by Django 3.0.5 on 2020-07-23 13:44
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='GoodsCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cate_name', models.CharField(max_length=100, verbose_name='类别名称')),
('category_type', models.SmallIntegerField(choices=[(1, '一级类目'), (2, '二级类目'), (3, '三级类目')], verbose_name='商品分类级别')),
('add_time', models.DateTimeField(default=datetime.datetime(2020, 7, 23, 21, 44, 11, 607712), help_text='添加时间')),
('parent_category', models.ForeignKey(blank=True, help_text='父级类目', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='sub_cat', to='goods.GoodsCategory')),
],
options={
'verbose_name': '商品分类表',
'verbose_name_plural': '商品分类表',
'db_table': 'goods_category',
},
),
migrations.CreateModel(
name='Goods',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='商品名称')),
('goods_sn', models.CharField(max_length=50, verbose_name='商品唯一编号')),
('click_num', models.IntegerField(default=0, verbose_name='点击数')),
('sold_num', models.IntegerField(default=0, verbose_name='商品销售量')),
('fav_num', models.IntegerField(default=0, verbose_name='收藏数')),
('goods_num', models.IntegerField(default=0, verbose_name='库存数')),
('market_price', models.DecimalField(decimal_places=3, default=0, max_digits=11, verbose_name='市场价格')),
('shop_price', models.DecimalField(decimal_places=3, default=0, max_digits=11, verbose_name='本店价格')),
('descripte', models.CharField(blank=True, max_length=255, null=True, verbose_name='商品描述')),
('goods_front_image', models.CharField(blank=True, max_length=40, null=True, verbose_name='封面图')),
('is_hot', models.BooleanField(default=False, help_text='是否热销', verbose_name='是否热销')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.GoodsCategory', verbose_name='商品类目')),
],
options={
'verbose_name': '商品信息',
'verbose_name_plural': '商品信息',
},
),
]
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,126
|
blink07/shop_admin
|
refs/heads/main
|
/apps/common/pagination.py
|
from collections import OrderedDict
from rest_framework.pagination import PageNumberPagination
from utils.mixins import response_success
class StandardResultsSetPagination(PageNumberPagination):
# page_size = 1
"""
自定义分页,
"""
page_size_query_param = 'pagesize' # 自定义查询的结果的每一页大小参数
page_query_param = 'pagenum' # 自定义查询第几页参数
max_page_size = 1000 # 自定义查询最多多少页
def get_paginated_response(self, data):
"""
自定义返回结果格式
:param data:
:return:
"""
return response_success(data=OrderedDict([
('total', self.page.paginator.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data)
]))
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,127
|
blink07/shop_admin
|
refs/heads/main
|
/apps/goods/views.py
|
from django.db.models import Q
from django.shortcuts import render
# Create your views here.
from rest_framework import viewsets, filters
from common.pagination import StandardResultsSetPagination
from goods.models import GoodsCategory
from goods.serializers import GoodsCategorySerializer, GoodsCategorySerializer2, GoodsCategorySerializer1
from utils.mixins import ListModelMixin, response_success
class CategoryList(ListModelMixin, viewsets.GenericViewSet):
queryset = GoodsCategory.objects.all()
serializer_class = GoodsCategorySerializer
pagination_class = StandardResultsSetPagination
filter_backends = (filters.SearchFilter,)
search_fields = ('category_type', )
def get_serializer_class(self):
page = self.request.GET.get('pagenum', None)
if page:
return GoodsCategorySerializer
else:
return GoodsCategorySerializer1
# def get_queryset(self):
# page = self.request.GET.get('pagenum', None)
# if page:
# return GoodsCategory.objects.all()
# else:
# return GoodsCategory.objects.filter(~Q(category_type=3))
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,128
|
blink07/shop_admin
|
refs/heads/main
|
/apps/user_manage/utils.py
|
import base64
import os
import random
from io import BytesIO
from io import StringIO
from PIL import Image, ImageDraw, ImageFont
class Captcha(object):
def __init__(self, request):
self.django_request = request
self.session_key = request.session.session_key
self.charsource = "qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM1234567890"
# image size
self.img_width = 100
self.img_height = 48
def _createColor(self):
# 随机生成颜色
red = random.randint(0,255)
green = random.randint(0, 255)
blue = random.randint(0, 255)
return (red, green, blue)
def _saveCodeSession(self, code):
# 将验证码放入服务器内存和设置过期时间
self.django_request.session[self.session_key] = code
self.django_request.session.set_expiry(60)
def getVerificationCode(self):
image = Image.new("RGB", (self.img_width, self.img_height), self._createColor())
imageDraw = ImageDraw.Draw(image, "RGB")
ttf_cur_path = os.path.join(os.path.join(os.path.dirname(os.path.abspath(__file__)), "files"),"FZSJ-NIDBYJSW.TTF")
imageFont = ImageFont.truetype(ttf_cur_path, 24)
code = ""
for i in range(4):
ch = random.choice(self.charsource)
imageDraw.text((5+i*20,10), ch, fill=self._createColor(), font=imageFont) # 坐标, 写入内容, 背景颜色, 字体样式
code +=ch
self._saveCodeSession(code)
# 画图片上的麻子
for i in range(500):
x = random.randint(0,100)
y = random.randint(0,48)
imageDraw.point((x,y), fill=self._createColor())
# 将Image图片转为base64字节流返回出去
buf = BytesIO()
image.save(buf, format='gif')
byte_data = buf.getvalue()
data = base64.b64encode(byte_data)
# image.show()
return data
|
{"/adminDemo/settings/dev_settings.py": ["/adminDemo/settings/com_settings.py"], "/apps/user_manage/serializers.py": ["/adminDemo/settings/__init__.py"], "/utils/response.py": ["/utils/error.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"], "/utils/exception_handlers.py": ["/utils/error.py", "/utils/response.py"], "/apps/menu/urls.py": ["/apps/menu/views.py"], "/apps/user_manage/filters.py": ["/apps/user_manage/models.py"], "/utils/mixins.py": ["/utils/base.py"], "/apps/user_manage/views.py": ["/utils/error.py", "/apps/user_manage/models.py", "/apps/user_manage/utils.py", "/utils/mixins.py"], "/apps/menu/views.py": ["/utils/mixins.py"], "/adminDemo/settings/__init__.py": ["/adminDemo/settings/com_settings.py", "/adminDemo/settings/dev_settings.py"], "/utils/jwt_response_payload_handler.py": ["/utils/success.py"], "/apps/common/pagination.py": ["/utils/mixins.py"], "/apps/goods/views.py": ["/utils/mixins.py"]}
|
29,239,907
|
DuploMinh/Assignment3
|
refs/heads/master
|
/main.py
|
import status
import astro_logging
import astro_trivia
def op_a():
astro_logging.logging()
def op_b():
status.status()
def op_c():
astro_trivia.get_questions()
def op_d():
quit()
def get_options():
while True:
option = input(
"What would you like to do? \n\tA. Access logging\n\tB. View current status.\n\tC. Play a game of "
"Trivia\n\tD. Quit\nYour choice: ").strip().lower()
if option not in ['a', 'b', 'c', 'd']:
print("Invalid choice!!! Try Again~~")
continue
else:
break
option_dict = {'a': op_a, 'b': op_b, 'c': op_c, 'd': op_d}
def execute(args):
func = option_dict.get(args, 'null')
return func()
execute(option)
if __name__ == "__main__":
while True:
get_options()
if input("Do you want to do any thing else? (Y/N)\nYour choice: ").strip().lower() == "y":
continue
else:
break
|
{"/main.py": ["/status.py", "/astro_logging.py", "/astro_trivia.py"], "/astro_logging.py": ["/db_connection.py"]}
|
29,239,908
|
DuploMinh/Assignment3
|
refs/heads/master
|
/status.py
|
import time
from datetime import datetime
import os
def time_traveled():
"""Get the time passed since the launched time"""
outfile = open("time.txt", "r")
init_time = outfile.readlines()[0]
outfile.close()
# Convert the time_stamp back to datetime format
init_time = datetime.fromtimestamp(float(init_time))
cur_time = datetime.strptime(time.ctime(), "%a %b %d %H:%M:%S %Y")
# Calculate the time interval in seconds
time_passed = (cur_time - init_time).total_seconds()
return time_passed
def time_init():
"""Initialize start time and keep records of timestamp"""
# Get the time when the spaceship is launched with datetime format
init_time = datetime.strptime(time.ctime(), "%a %b %d %H:%M:%S %Y")
# Convert datetime to time stamp for storing into database/text file
time_stamp = time.mktime(init_time.timetuple())
# Store data
infile = open("time.txt", "a")
infile.write(str(time_stamp) + "\n")
infile.close()
return
def clear_screen():
"""Clear Screen Each second"""
unit = os.system('cls')
def status():
"""Ship Status"""
time_init() # Store time log each time the program runs
time_passed = time_traveled() # Important for data initialized
"""Initialize Data"""
v = 39500 # km/h
total_distance = 305000000 # km
fuel_level = 1000000#liters
fuel_burn_rate = 0.01 #liter/km
ship_health = 0 #percentage
distance_from_e = v * (time_passed / 3600)
distance_to_m = total_distance - distance_from_e
arrival_time = distance_to_m / v
time_travel = distance_from_e / v
fuel_level = fuel_level - (distance_from_e * fuel_burn_rate)
crew_members_health = 0 # percentage
"""Display Data"""
try:
while distance_to_m != 0:
clear_screen()
time_passed += 1 # Constantly display data every second
# Distance From Earth, updating every second
distance_from_e = v * (time_passed / 3600)
# Distance To Mars, updating every second
distance_to_m = total_distance - distance_from_e
# Arrival Time Calculation
arrival_time = distance_to_m / v
# Time Traveled Calculation
time_travel = distance_from_e / v
# Fuel Level Calculation
fuel_level = fuel_level - ((distance_from_e - (v * (time_passed - 1) / 3600)) * fuel_burn_rate)
# Ship Health is deteriorated by 1% every hour
ship_health = 100 - int(distance_from_e / 10000)
# Crew Member Health
crew_members_health = 100 - int(distance_from_e / 50000)
# Print Data
print("Local Time: ", time.ctime())
print("Velocity |" + str(v) + " Kilometer/Hour")
print("_____________________|____________________________")
print("Total_distance |" + str(total_distance) + " Kilometers")
print("_____________________|____________________________")
print("Fuel Burn Rate |" + str(fuel_burn_rate) + " Liter/Kilometer")
print("_____________________|____________________________")
print("Distance From Earth |" + str('%.2f' % distance_from_e) + " Kilometers")
print("_____________________|____________________________")
print("Distance To Mars |" + str('%.2f' % distance_to_m) + " Kilometers")
print("_____________________|____________________________")
print("Time Of Arrival |" + str('%.2f' % arrival_time) + " Hours")
print("_____________________|____________________________")
print("Time Traveled |" + str('%.2f' % time_travel) + " Hours")
print("_____________________|____________________________")
print("Fuel Level |" + str('%.2f' % fuel_level) + " Liters")
print("_____________________|____________________________")
print("Ship Health Status |" + str(ship_health) + " %")
print("_____________________|____________________________")
print("Crew Health Status |" + str(crew_members_health) + " %")
print("_____________________|____________________________")
print("Stop with Ctrl-C")
time.sleep(1)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
status()
|
{"/main.py": ["/status.py", "/astro_logging.py", "/astro_trivia.py"], "/astro_logging.py": ["/db_connection.py"]}
|
29,239,909
|
DuploMinh/Assignment3
|
refs/heads/master
|
/astro_trivia.py
|
import requests
import base64
import random
def decode_b64(entry):
"""
This function take in base64 code in ascii format and convert it into utf-8 string
:param entry:
:return: utf-8 string
"""
b64_bytes = entry.encode('ascii')
utf_bytes = base64.b64decode(b64_bytes)
output = utf_bytes.decode('utf-8')
return output
def get_questions():
"""
This function get trivia questions from opentdb.com api and generate an interactive quiz based on the questions
:return:
"""
num = 0
while True:
try:
num = int(input(
"Welcome to Trivia, how many questions would you like to play?(Max 10)\nQuestion Numbers: ").strip())
except ValueError:
print("Sorry, I didn't understand that. Please enter a valid number~~")
continue
break
request = requests.get('https://opentdb.com/api.php?amount={}&type=multiple&encode=base64'.format(num))
result = request.json()['results']
correct_count = 0
for i in range(num):
questions = result[i]
category = decode_b64(questions['category'])
difficulty = decode_b64(questions['difficulty'])
question = questions['question']
correct_answer = questions['correct_answer']
answers = questions['incorrect_answers'] + [correct_answer]
a = random.choice(answers)
answers.remove(a)
b = random.choice(answers)
answers.remove(b)
c = random.choice(answers)
answers.remove(c)
d = random.choice(answers)
answer_dict = {'a': a, 'b': b, 'c': c, 'd': d}
print("Question {}. This is a/an {} question and the category is: {}".format(i + 1, difficulty, category))
print(decode_b64(question))
print("A. " + decode_b64(a))
print("B. " + decode_b64(b))
print("C. " + decode_b64(c))
print("D. " + decode_b64(d))
while True:
user_answer = input("Your Answer: ").lower().strip()
if user_answer not in ['a', 'b', 'c', 'd']:
print("Invalid input.")
continue
else:
break
if answer_dict[user_answer] == correct_answer:
print("Congratulation, you are correct!")
correct_count += 1
else:
print("Incorrect. The correct answer is {}.".format(decode_b64(correct_answer)))
print("Congrats! You got {} out of {} questions correct.".format(correct_count, num))
if input("Do you want to play again?(Y/N): ").strip().lower() == 'y':
get_questions()
if __name__ == "__main__":
get_questions()
quit()
|
{"/main.py": ["/status.py", "/astro_logging.py", "/astro_trivia.py"], "/astro_logging.py": ["/db_connection.py"]}
|
29,239,910
|
DuploMinh/Assignment3
|
refs/heads/master
|
/astro_logging.py
|
import db_connection
db_file = r"db.sqlite3"
conn = db_connection.create_connection(db_file)
def logging():
"""
Initiate logging process
:return:
"""
while True:
option = input(
"What do you want to do?\n\tA. Create new log\n\tB. View previous log?\nYour choice: ").lower().strip()
if option in ['a', 'b']:
break
else:
print("Invalid option!! Try Again~~")
continue
def op_a():
print("The log number is: {}".format(db_connection.new_log(conn)))
def op_b():
db_connection.get_log(conn)
option_dict = {'a': op_a, 'b': op_b}
def execute(args):
func = option_dict.get(args, 'null')
return func()
execute(option)
if __name__ == '__main__':
logging()
|
{"/main.py": ["/status.py", "/astro_logging.py", "/astro_trivia.py"], "/astro_logging.py": ["/db_connection.py"]}
|
29,239,911
|
DuploMinh/Assignment3
|
refs/heads/master
|
/db_connection.py
|
import sqlite3
from sqlite3 import Error
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by the db_file
:param db_file: database file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
except Error as e:
print(e)
return conn
def insert_log(conn, message):
"""
Create a new log
:param conn:
:param message:
:return:
"""
sql = ''' INSERT INTO log(astronaut_name, content) VALUES (?,?)'''
cur = conn.cursor()
cur.execute(sql, message)
conn.commit()
return cur.lastrowid
def select_log(conn, amount):
"""
Select old logs
:param conn:
:param amount:
:return:
"""
sql = '''SELECT * FROM log
ORDER BY timestamp desc
LIMIT ? '''
cur = conn.cursor()
cur.execute(sql, amount)
result = cur.fetchall()
return result
def new_log(conn):
"""
Insert a new log into a SQLite database
:param conn: Connection object
:return:
"""
astronaut_name = input("Who is making this log?\nInput: ")
content = input("What is the content?\nInput: ")
log_message = (astronaut_name, content)
row_id = insert_log(conn, log_message)
print(row_id)
def get_log(conn):
"""
Retrieve logs from SQLite database
:param conn: connection object
:return:
"""
number_of_logs = input("How many logs do you want to retrieve?\nInput: ")
logs = select_log(conn, number_of_logs)
for log in logs:
print("Log number {} by {} on {}: {}".format(log[0], log[1], log[3], log[2]))
|
{"/main.py": ["/status.py", "/astro_logging.py", "/astro_trivia.py"], "/astro_logging.py": ["/db_connection.py"]}
|
29,334,274
|
EnigmaGun/anki-videodownloader
|
refs/heads/main
|
/__init__.py
|
from aqt import mw
# import all of the Qt GUI library
from aqt.qt import *
from .downloader import VideoDownloader
class SettingsDialog(QDialog):
def __init__(self, *args, **kwargs):
super(SettingsDialog, self).__init__(*args, **kwargs)
self.setWindowTitle("Video Downloader")
buttons = QDialogButtonBox.Ok | QDialogButtonBox.Cancel
self.button_box = QDialogButtonBox(buttons)
self.button_box.accepted.connect(self._start)
self.button_box.rejected.connect(self.reject)
self.layout = QVBoxLayout()
#description = QLabel('Video Downloader:')
#self.layout.addWidget(description)
#self.layout.addWidget(QLabel('All urls are gathered and downloaded'))
#output_path_label = QLabel('Output path')
#self.layout.addWidget(output_path_label)
#self._output_path = QLineEdit(self)
#self._output_path.setText("c:/temp/")
#self.layout.addWidget(self._output_path)
self.layout.addWidget(self.button_box)
self.setLayout(self.layout)
def _start(self):
downloader = VideoDownloader()
downloader.start()
self.accept()
class AddOnActivator():
def __init__(self):
action = QAction(mw)
action.setText("Video Downloader")
mw.form.menuTools.addAction(action)
def start_addon():
dlg = SettingsDialog()
if dlg.exec_():
print("Success!")
else:
print("Cancel!")
action.triggered.connect(start_addon)
AddOnActivator()
|
{"/__init__.py": ["/downloader.py"], "/downloader.py": ["/logger.py"]}
|
29,334,275
|
EnigmaGun/anki-videodownloader
|
refs/heads/main
|
/logger.py
|
import os
class Logger():
@staticmethod
def init(filename='log.txt'):
LOG_PATH = os.path.dirname(os.path.abspath(__file__)) + "/logs"
if not os.path.exists(LOG_PATH):
os.makedirs(LOG_PATH)
Logger.logger = open(f"{LOG_PATH}/{filename}", "w+", encoding="utf-8")
@staticmethod
def info(message):
Logger.logger.write(f'[INFO] {message}\n')
@staticmethod
def warn(message):
Logger.logger.write(f'[WARN] {message}\n')
@staticmethod
def error(message):
Logger.logger.write(f'[ERROR] {message}\n')
@staticmethod
def close():
Logger.logger.close()
|
{"/__init__.py": ["/downloader.py"], "/downloader.py": ["/logger.py"]}
|
29,334,276
|
EnigmaGun/anki-videodownloader
|
refs/heads/main
|
/youtube-dl.py
|
from __future__ import unicode_literals
import os
import youtube_dl
import getopt
import sys
'''
This is basically just a wrapper for youtube-dl as it is quite hard to
use custom packages with Anki.
'''
PATH = "C:/Users/berkal.XATRONIC/AppData\Roaming/Anki2/addons21/anki-videodownloader"
class MyLogger(object):
def debug(self, message):
print(message)
def warning(self, message):
print(message)
def error(self, message):
print(message)
def my_hook(data):
if data['status'] == 'finished':
print('Done downloading, now converting ...')
# print(f'filename: {data['filename']})
class UrlListReader():
def read(self, filename):
urls = []
file = open(filename, 'r')
lines = file.readlines()
count = 0
# Strips the newline character
for line in lines:
urls.append(line)
# print("Line{}: {}".format(count, line.strip()))
return urls
"""
--format (bestvideo[ext=mp4][height<=720][fps<30]/bestvideo[ext=mp4][height<=720]/bestvideo[ext=mp4][height>=1080]/bestvideo)+(bestaudio[ext=m4a]/bestaudio)/best
--download-archive archive.txt
--output %(playlist_uploader)s-%(playlist_title)s/%(autonumber)s.%(title)s.%(id)s.%(ext)s
--merge-output-format mp4
--restrict-filenames
--ignore-errors
--write-description
https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L128-L278
"""
class VideoDownloader():
def __init__(self):
pass
def start(self, urlfile, deck_id):
urls = UrlListReader().read(urlfile)
print(f'Found {len(urls)} urls')
out_directory = f'./videos/{deck_id}'
if not os.path.exists(out_directory):
os.makedirs(out_directory)
ydl_opts = {
'format': '(bestvideo[ext=mp4][height<=720][fps<30]/bestvideo[ext=mp4][height<=720]/bestvideo[ext=mp4][height>=1080]/bestvideo)+(bestaudio[ext=m4a]/bestaudio)/best',
'merge_output_format': 'mp4',
'download_archive': f'./data/archive_{deck_id}.txt',
'logger': MyLogger(),
'progress_hooks': [my_hook],
'ignoreerrors': True,
'source_address':'0.0.0.0',
#'simulate': True,
'outtmpl': f'{out_directory}/%(uploader)s-%(title)s-%(upload_date)s-%(id)s.%(ext)s',
'writedescription': True,
'restrictfilenames': True,
'quiet': True,
'verbose': False,
'writesubtitles': True,
'subtitleslangs': ['de','en'],
'writeautomaticsub': True,
'writethumbnail': True
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download(urls)
def main():
full_cmd_arguments = sys.argv
args = full_cmd_arguments[1:]
downloader = VideoDownloader()
downloader.start(urlfile=args[0],
deck_id=args[1]
)
if __name__ == '__main__':
main()
|
{"/__init__.py": ["/downloader.py"], "/downloader.py": ["/logger.py"]}
|
29,334,277
|
EnigmaGun/anki-videodownloader
|
refs/heads/main
|
/downloader.py
|
from aqt import mw
# import the "show info" tool from utils.py
import os
import re
import subprocess
from bs4 import BeautifulSoup
from .logger import Logger
class VideoDownloader():
def __init__(self):
pass
def start(self):
Logger.init()
working_directory = os.path.dirname(os.path.abspath(__file__))
if not os.path.exists(f'{working_directory}/data'):
os.makedirs(f'{working_directory}/data')
#decks = ['Magie', 'Tricksqueue', 'Import']
#decks = ['Magie']
#decks = ['Import']
deck_names = ['Magic']
deck_names = sorted(mw.col.decks.allNames())
for deck_name in deck_names:
deck_id = deck_name.lower()
urls_file = f'data/urls_{deck_id}.txt'
# read archive
urls_in_archive = ArchiveReader().read(
working_directory=working_directory,
deck_name=deck_name)
archived_urls = set()
for url in urls_in_archive:
cleaned = url.replace("\n", "")
Logger.info(f'-->archive -->{cleaned}<--')
archived_urls.add(cleaned)
urls = self._fetch_urls_from_deck(deck_name, archived_urls)
Logger.info(f'Found {len(urls)} urls in deck {deck_name}')
self._save_urls(
filename=f'{working_directory}/{urls_file}', urls=urls)
Logger.info(f'Wrote urls to {working_directory}/{urls_file}')
self._download_videos(
working_directory=working_directory, filename=urls_file, deck_id=deck_id)
Logger.close()
def _fetch_urls_from_deck(self, deck_name, archived_urls):
return UrlFinder().find(deck_name,archived_urls)
def _save_urls(self, filename, urls):
UrlListWriter().write(filename, urls)
def _download_videos(self, working_directory, filename, deck_id):
Logger.info(f'filename {filename} deck_id {deck_id}')
subprocess.run(['python',
'./youtube-dl.py',
filename, deck_id],
cwd=working_directory)
class UrlListWriter():
def write(self, filename, urls):
writer = open(
filename, "w+")
for url in urls:
writer.write(f'{url}\n')
writer.close()
# https: // www.reddit.com/r/Anki/comments/a6u2he/adding_background_image/
class ArchiveReader():
def read(self, working_directory, deck_name):
urls = []
deck_id = deck_name.lower()
filename = f'{working_directory}/data/archive_{deck_id}.txt'
if os.path.exists(filename):
file = open(filename, 'r')
lines = file.readlines()
# Strips the newline character
for line in lines:
youtube_id = line.split(' ')[1]
print(youtube_id)
urls.append(youtube_id)
# print("Line{}: {}".format(count, line.strip()))
else:
Logger.info(f'No archive file for {deck_id} found.')
return urls
class UrlsList():
def __init__(self, archived_urls):
self._all_urls = set() # []
self._archived_urls = archived_urls
def add_youtube_id(self, id, items):
youtube_url = f'https://youtu.be/{id}'
if url in self._archived_urls:
Logger.info(f'Skipping {youtube_url}, already downloaded')
elif youtube_url in self._all_urls:
Logger.info(f'Skipping {youtube_url}, duplicate entry')
else:
self._all_urls.add(youtube_url)
Logger.info(f'Added {youtube_url} NoteId {items[0][1]}')
def get_urls(self):
return self._all_urls
class UrlFinder():
def __init__(self):
pass
def find(self, deck_name, archived_urls):
new_video_urls = set() # []
#all_urls_list = UrlsList(archived_urls)
deckfilter = ""
deckfilter = f"deck:{deck_name}"
note_ids = mw.col.findNotes(deckfilter)
def should_download(video_id):
if video_id in archived_urls:
Logger.info(f'Skipping {video_id}, already downloaded')
return False
elif video_id in new_video_urls:
Logger.info(f'Skipping {video_id}, duplicate entry')
return False
return True
for (index, note_id) in enumerate(note_ids):
note = mw.col.getNote(note_id)
items = note.items()
for item in items:
if item[0] == 'YoutubeUrls':
urls = self._extract_urls_from_youtubeurls(item[1])
if urls:
for url in urls:
video_id = url.split('?')[0] # strip url parameters
#all_urls_list.add_youtube_id(id, items)
if should_download(video_id):
youtube_url = f'https://youtu.be/{video_id}'
new_video_urls.add(youtube_url)
Logger.info(f'Added {youtube_url} NoteId {items[0][1]}')
'''
youtube_url = f'https://youtu.be/{video_id}'
if video_id in archived_urls:
Logger.info(f'Skipping {youtube_url}, already downloaded')
elif youtube_url in all_urls:
Logger.info(f'Skipping {youtube_url}, duplicate entry')
else:
all_urls.add(youtube_url)
Logger.info(f'Added {youtube_url} NoteId {items[0][1]}')
'''
# all_urls.append()
else:
tag_content = item[1]
youtube_ids = self._get_youtube_ids_from_string(tag_content)
for video_id in youtube_ids:
#all_urls_list.add_youtube_id(id, items)
youtube_url = f'https://youtu.be/{video_id}'
if video_id in archived_urls:
Logger.info(f'Skipping {youtube_url}, already downloaded')
elif youtube_url in new_video_urls:
Logger.info(f'Skipping {youtube_url}, duplicate entry')
else:
new_video_urls.add(youtube_url)
Logger.info(f'Added {youtube_url} NoteId {items[0][1]}')
'''
soup = BeautifulSoup(tag_content, 'html.parser')
for link in soup.find_all('a'):
url = link.get('href')
if url not in all_urls:
all_urls.add(url)
# all_urls.append(link.get('href'))
'''
return new_video_urls #all_urls_list.get_urls() #
def _extract_urls_from_youtubeurls(self, content):
if content:
urls = []
cleanr = re.compile('<.*?>')
cleaned = re.sub(cleanr, '', content)
cleaned = cleaned.replace("\n", "")
entries = cleaned.split('|')
for entry in entries:
values = entry.split(';')
urls.append(values[0])
return urls
return None
def _get_youtube_ids_from_string(self, string):
urls = self._get_urls_from_string(string)
youtube_ids = []
for url in urls:
if url.startswith('https://youtu.be/'):
youtube_ids.append(url.replace('https://youtu.be/',''))
return youtube_ids
def _get_urls_from_string(self, string):
# findall() has been used
# with valid conditions for urls in string
regex = r"(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'\".,<>?«»“”‘’]))"
urls = re.findall(regex,string)
return [x[0] for x in urls]
'''
class UrlListChecker():
def check(deck_id):
# read archive file
# read url list
urls_file = f'data/urls_{deck_id}.txt'
urls = UrlListReader().read(urls_file)
urls_set = set
# put all youtube ids in a set
'''
|
{"/__init__.py": ["/downloader.py"], "/downloader.py": ["/logger.py"]}
|
29,354,427
|
zachang/jirgin
|
refs/heads/master
|
/bookings/authentication/views.py
|
from django.contrib.admin.utils import lookup_field
from django.contrib.auth.models import User
from django.shortcuts import render
from rest_framework import viewsets
from rest_framework import mixins
from rest_framework.decorators import api_view
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.response import Response
from rest_framework.status import (HTTP_200_OK, HTTP_201_CREATED, HTTP_400_BAD_REQUEST,
HTTP_404_NOT_FOUND)
from .permissions import IsOwnerOrReadOnly
from .serializers import UserSerializer
@api_view(['GET'])
def home(request):
return Response({
"message": "Welcome to jirgin, your one stop flight booking app"
})
class UserListViewSet(mixins.ListModelMixin,
mixins.CreateModelMixin,
viewsets.GenericViewSet):
"""
API viewset that allows users to create and view profile
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
|
{"/book/urls.py": ["/book/views.py"], "/authentication/views.py": ["/authentication/permissions.py", "/authentication/serializers.py", "/authentication/helpers.py", "/authentication/models.py"], "/flight/tests/test_flight_model.py": ["/flight/models.py"], "/book/models.py": ["/flight/models.py"], "/flight/urls.py": ["/flight/views.py"], "/book/tests/test_book_model.py": ["/book/models.py", "/flight/models.py"], "/authentication/urls.py": ["/authentication/views.py"], "/flight/views.py": ["/flight/serializers.py", "/flight/models.py", "/flight/helpers.py"], "/bookings/urls.py": ["/authentication/views.py"], "/book/tests/test_book.py": ["/book/models.py", "/flight/models.py"], "/book/views.py": ["/flight/models.py", "/book/models.py", "/book/serializers.py", "/book/helpers/flight_reservation_email.py"], "/book/helpers/email_reminder_cron.py": ["/book/models.py"], "/flight/tests/test_flight.py": ["/flight/models.py", "/authentication/helpers.py"], "/flight/serializers.py": ["/flight/models.py"], "/authentication/tests/test_user_model.py": ["/authentication/models.py"], "/authentication/tests/test_user_list_create.py": ["/authentication/helpers.py"], "/flight/admin.py": ["/flight/models.py"], "/authentication/serializers.py": ["/authentication/models.py", "/authentication/helpers.py"], "/book/serializers.py": ["/book/models.py"], "/bookings/authentication/views.py": ["/bookings/authentication/serializers.py"], "/bookings/authentication/urls.py": ["/bookings/authentication/views.py"], "/bookings/bookings/urls.py": ["/authentication/views.py"]}
|
29,354,428
|
zachang/jirgin
|
refs/heads/master
|
/bookings/authentication/urls.py
|
from django.urls import path, include
from rest_framework import routers
from rest_framework_jwt.views import obtain_jwt_token
from .views import UserListViewSet
router = routers.DefaultRouter()
router.register(r'^users', UserListViewSet, basename='users')
app_name = 'authentication'
urlpatterns = [
path('', include(router.urls)),
path('login/', obtain_jwt_token),
]
|
{"/book/urls.py": ["/book/views.py"], "/authentication/views.py": ["/authentication/permissions.py", "/authentication/serializers.py", "/authentication/helpers.py", "/authentication/models.py"], "/flight/tests/test_flight_model.py": ["/flight/models.py"], "/book/models.py": ["/flight/models.py"], "/flight/urls.py": ["/flight/views.py"], "/book/tests/test_book_model.py": ["/book/models.py", "/flight/models.py"], "/authentication/urls.py": ["/authentication/views.py"], "/flight/views.py": ["/flight/serializers.py", "/flight/models.py", "/flight/helpers.py"], "/bookings/urls.py": ["/authentication/views.py"], "/book/tests/test_book.py": ["/book/models.py", "/flight/models.py"], "/book/views.py": ["/flight/models.py", "/book/models.py", "/book/serializers.py", "/book/helpers/flight_reservation_email.py"], "/book/helpers/email_reminder_cron.py": ["/book/models.py"], "/flight/tests/test_flight.py": ["/flight/models.py", "/authentication/helpers.py"], "/flight/serializers.py": ["/flight/models.py"], "/authentication/tests/test_user_model.py": ["/authentication/models.py"], "/authentication/tests/test_user_list_create.py": ["/authentication/helpers.py"], "/flight/admin.py": ["/flight/models.py"], "/authentication/serializers.py": ["/authentication/models.py", "/authentication/helpers.py"], "/book/serializers.py": ["/book/models.py"], "/bookings/authentication/views.py": ["/bookings/authentication/serializers.py"], "/bookings/authentication/urls.py": ["/bookings/authentication/views.py"], "/bookings/bookings/urls.py": ["/authentication/views.py"]}
|
29,354,429
|
zachang/jirgin
|
refs/heads/master
|
/bookings/authentication/serializers.py
|
from rest_framework import serializers
from rest_framework_jwt.settings import api_settings
from django.contrib.auth.models import User
from .models import UserProfile
class UserProfileSerializer(serializers.ModelSerializer):
class Meta:
model = UserProfile
fields = ('id',)
class UserSerializer(serializers.ModelSerializer):
"""A serializer for Admin profile object with jwt rendered"""
email = serializers.EmailField()
class Meta:
model = User
fields = ('id', 'first_name', 'last_name','username', 'password', 'email')
extra_kwargs = {
'password': {'write_only': True, 'min_length': 6},
'username': {'min_length': 2},
}
def create(self, validated_data):
user = User(
first_name=validated_data['first_name'],
last_name=validated_data['last_name'],
username=validated_data['username'],
email=validated_data['email'],
)
user.set_password(validated_data['password'])
user.save()
return user
|
{"/book/urls.py": ["/book/views.py"], "/authentication/views.py": ["/authentication/permissions.py", "/authentication/serializers.py", "/authentication/helpers.py", "/authentication/models.py"], "/flight/tests/test_flight_model.py": ["/flight/models.py"], "/book/models.py": ["/flight/models.py"], "/flight/urls.py": ["/flight/views.py"], "/book/tests/test_book_model.py": ["/book/models.py", "/flight/models.py"], "/authentication/urls.py": ["/authentication/views.py"], "/flight/views.py": ["/flight/serializers.py", "/flight/models.py", "/flight/helpers.py"], "/bookings/urls.py": ["/authentication/views.py"], "/book/tests/test_book.py": ["/book/models.py", "/flight/models.py"], "/book/views.py": ["/flight/models.py", "/book/models.py", "/book/serializers.py", "/book/helpers/flight_reservation_email.py"], "/book/helpers/email_reminder_cron.py": ["/book/models.py"], "/flight/tests/test_flight.py": ["/flight/models.py", "/authentication/helpers.py"], "/flight/serializers.py": ["/flight/models.py"], "/authentication/tests/test_user_model.py": ["/authentication/models.py"], "/authentication/tests/test_user_list_create.py": ["/authentication/helpers.py"], "/flight/admin.py": ["/flight/models.py"], "/authentication/serializers.py": ["/authentication/models.py", "/authentication/helpers.py"], "/book/serializers.py": ["/book/models.py"], "/bookings/authentication/views.py": ["/bookings/authentication/serializers.py"], "/bookings/authentication/urls.py": ["/bookings/authentication/views.py"], "/bookings/bookings/urls.py": ["/authentication/views.py"]}
|
29,354,430
|
zachang/jirgin
|
refs/heads/master
|
/bookings/bookings/urls.py
|
from django.contrib import admin
from django.urls import path, include
from authentication.views import home
urlpatterns = [
path('', home),
path('auth/api/', include('authentication.urls', namespace='authentication')),
path('admin/', admin.site.urls),
]
|
{"/book/urls.py": ["/book/views.py"], "/authentication/views.py": ["/authentication/permissions.py", "/authentication/serializers.py", "/authentication/helpers.py", "/authentication/models.py"], "/flight/tests/test_flight_model.py": ["/flight/models.py"], "/book/models.py": ["/flight/models.py"], "/flight/urls.py": ["/flight/views.py"], "/book/tests/test_book_model.py": ["/book/models.py", "/flight/models.py"], "/authentication/urls.py": ["/authentication/views.py"], "/flight/views.py": ["/flight/serializers.py", "/flight/models.py", "/flight/helpers.py"], "/bookings/urls.py": ["/authentication/views.py"], "/book/tests/test_book.py": ["/book/models.py", "/flight/models.py"], "/book/views.py": ["/flight/models.py", "/book/models.py", "/book/serializers.py", "/book/helpers/flight_reservation_email.py"], "/book/helpers/email_reminder_cron.py": ["/book/models.py"], "/flight/tests/test_flight.py": ["/flight/models.py", "/authentication/helpers.py"], "/flight/serializers.py": ["/flight/models.py"], "/authentication/tests/test_user_model.py": ["/authentication/models.py"], "/authentication/tests/test_user_list_create.py": ["/authentication/helpers.py"], "/flight/admin.py": ["/flight/models.py"], "/authentication/serializers.py": ["/authentication/models.py", "/authentication/helpers.py"], "/book/serializers.py": ["/book/models.py"], "/bookings/authentication/views.py": ["/bookings/authentication/serializers.py"], "/bookings/authentication/urls.py": ["/bookings/authentication/views.py"], "/bookings/bookings/urls.py": ["/authentication/views.py"]}
|
29,356,255
|
EHoggard/TryExcept1
|
refs/heads/main
|
/LoopFunHoggard1.py
|
import datetime
now = datetime.datetime.now()
print("Current date and time : ")
print (now.strftime("%Y-%m-%d %H:%M:%S"))
import random
import HoggardDatabase1
import validation1
from getpass import getpass
database = {}
userdatabase = {}
def init():
isValidOptionSelected = False
print("Welcome to Bank of Hoggard")
while isValidOptionSelected == False:
haveAccount = int(input("Do you have an account with us?: 1 (yes) 2 (no) \n"))
if(haveAccount == 1):
isValidOptionSelected = True
login()
elif(haveAccount == 2):
isValidOptionSelected = True
register()
else:
print("You have selected an invalid option")
def login():
print("Login to your account")
name = input("What is your username? \n")
allowedUsernames = ['Applejack489', 'Walker911', 'Zebra065']
allowedPassword = ['passwordSucess','passwordOld','passwordToo']
if(name in allowedUsernames):
password = input("Your password? \n")
userID = allowedUsernames.index(name)
if(password == allowedPassword[userID]):
print("Password Accepted")
else:
print('Account or username is not valid')
bankOperation(allowedUsernames)
def register():
print("Register for new account")
email = input("What is your email address? \n")
first_name = input("What is your first name? \n")
last_name = input("What is your last name? \n")
password = input("Create password \n")
accountNumber = generateAccountNumber()
print(accountNumber)
usercreated = HoggardDatabase1.create(accountNumber, first_name, last_name, email, password)
if usercreated:
print("Your account has been created")
login()
else:
print("Invalid error, please try again")
register()
def bankOperation(allowedUsernames):
print("Welcome %s %s " % ( allowedUsernames[0], allowedUsernames[1] ) )
selectedOption = int(input("What would you like to do? (1) Deposit (2) Withdrawl (3) Complaint (4) Exit \n"))
if (selectedOption == 1):
depositOperation()
elif (selectedOption == 2):
withdrawlOperation()
elif (selectedOption == 3):
Complaint()
elif (selectedOption == 4):
exit()
else:
print("Invalid Option Selected")
bankOperation(allowedUsernames)
Balance = 3000
def withdrawlOperation():
#withdrawl = int(input("How much would you like to withdrawl? \n"))
#x = lambda withdrawl: withdrawl - Balance
#print(x)
print("Please take your cash")
def depositOperation():
Deposit = input("How much would you like to deposit? \n")
sum = float(Balance) + float(Deposit)
print('Your current Balance is', sum)
def Complaint():
input("What issue would you like to report? \n")
print("Thank you for contacting us")
def generateAccountNumber():
return random.randrange(1111111111,9999999999)
init()
|
{"/TryExceptLamATM.py": ["/Validation.py", "/HoggardDatabase.py"], "/LoopFunHoggard1.py": ["/HoggardDatabase1.py"]}
|
29,534,879
|
hayesspencer/python
|
refs/heads/master
|
/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/urls.py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index),
path('chickens/create', views.create_chicken),
path('chickens/<int:chicken_id>', views.show_chicken),
path('chickens/<int:chicken_id>/destroy', views.delete_chicken),
path('chickens/<int:chicken_id>/edit', views.edit_chicken),
path('chickens/<int:chicken_id>/update',views.update_chicken),
]
|
{"/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/models.py"]}
|
29,534,880
|
hayesspencer/python
|
refs/heads/master
|
/Dojo_Assignments/python_stack/my_environments/dojos_ninjas_proj/dojo_ninjas_app/urls.py
|
from django.urls import path
from . import views
urlpatterns =[
path('', views.index),
path('ninjas/create', views.create_ninja),
path('dojos/create', views.create_dojo),
]
|
{"/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/models.py"]}
|
29,534,881
|
hayesspencer/python
|
refs/heads/master
|
/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/views.py
|
from django.shortcuts import render, redirect
import bcrypt
from django.contrib import messages
from .models import *
def index(request):
return render(request, "index.html",)
def create_user(request):
if request.method == "POST":
errors = User.objects.create_valdiator(request.POST)
if len(errors) >0:
for key, value in errors.items():
messages.error(request, value)
return redirect('/')
else:
password = request.POST['password']
pw_hash = bcrypt.hashpw(password.encode(), bcrypt.gensalt()).decode()
user = User.objects.create(name=request.POST['user_name'], email=request.POST['email'], password=pw_hash)
request.session['user_id'] = user.id
return redirect('/main_page')
return redirect('/')
def main_page(request):
if 'user_id' not in request.session:
return redirect('/')
context ={
'current_user': User.objects.get(id=request.session['user_id']),
'all_giraffes': Giraffe.objects.all()
}
return render(request, "main_page.html", context)
def login(request):
if request.method =="POST":
users_with_email = User.objects.filter(email=request.POST['email'])
if users_with_email:
user = users_with_email[0]
if bcrypt.checkpw(request.POST['password'].encode(), user.password.encode()):
request.session['user_id'] = user.id
return redirect('/main_page')
messages.error(request, "Email or password are not right")
return redirect('/')
def logout(request):
request.session.flush()
return redirect('/')
def create_giraffe(request):
if 'user_id' not in request.session:
return redirect('/')
if request.method == "POST":
errors = Giraffe.objects.create_valdiator(request.POST)
if len(errors) >0:
for key, value in errors.items():
messages.error(request, value)
return redirect('/')
else:
giraffe = Giraffe.objects.create(name=request.POST['giraffe_name'], catchphrase=request.POST['catchphrase']
, owner=User.objects.get(id=request.session['user_id']))
return redirect('/main_page')
return redirect('/main_page')
# Create your views here.
|
{"/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/models.py"]}
|
29,534,882
|
hayesspencer/python
|
refs/heads/master
|
/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/models.py
|
from django.db import models
import re
# Create your models here.
class UserManager(models.Manager):
def create_valdiator(self, reqPOST):
errors = {}
if len(reqPOST['user_name']) < 3:
errors['user_name'] = "Name is too short"
if len(reqPOST['email']) < 6:
errors['email'] = "Email is too short"
if len(reqPOST['password']) < 8:
errors['email'] = "Password is too short"
if reqPOST['password'] != reqPOST['password_conf']:
errors['match'] = "Password and password confirmation dont match"
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
if not EMAIL_REGEX.match(reqData['email']):
errors['regex'] = ("Email in wrong format")
users_with_email = User.objects.filter(email=reqPOST['email'])
if len(users_with_email) >= 1:
errors['dup'] = "Email taken, use another"
return errors
class User(models.Model):
name = models.TextField()
email = models.TextField()
password = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
class GiraffeManager(models.Manager):
def create_validator(self, reqPOST):
if len(reqPOST['giraffe_name']) < 3:
errors['giraffe_name'] = "Name is too short"
if len(reqPOST['catchphrase']) < 6:
errors['catchphrase'] = "Catchphrase is too short"
if len(reqPOST['password']) < 8:
errors['email'] = "Password is too short"
class Giraffe(models.Model):
name = models.TextField()
catchphrase = models.TextField()
owner = models.ForeignKey(User, related_name="giraffes_owned", on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = GiraffeManager()
|
{"/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/models.py"]}
|
29,534,883
|
hayesspencer/python
|
refs/heads/master
|
/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/apps.py
|
from django.apps import AppConfig
class MarcrudappConfig(AppConfig):
name = 'marCRUDApp'
|
{"/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/models.py"]}
|
29,534,884
|
hayesspencer/python
|
refs/heads/master
|
/Dojo_Assignments/python_stack/_python/OOP/user.py
|
class User:
def __init__(self, name, email):
self.name = name
self.email = email
self.account_balance = 0
def make_withdrawal(self, amount):
self.account_balance -= amount
return self
def make_deposit(self, amount):
self.account_balance += amount
return self
def display_user_balance(self):
print(f"User:{self.name}, Balance:${self.account_balance}")
return self
def transfer_money(self, other_user, amount):
other_user.account_balance += amount
self.account_balance -= amount
troy = User("Troy", "troy@python.com")
mike = User("Mike", "mike@python.com")
kevin = User("Kevin", "kevin@python.com")
troy.make_deposit(100).make_deposit(200).make_deposit(
300).make_withdrawal(200).display_user_balance()
mike.make_deposit(200).make_deposit(400).make_withdrawal(
300).make_withdrawal(100).display_user_balance()
kevin.make_deposit(5000).make_withdrawal(1000).make_withdrawal(
500).make_withdrawal(500).display_user_balance()
kevin.transfer_money(mike, 1000)
kevin.display_user_balance()
mike.display_user_balance()
|
{"/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/models.py"]}
|
29,534,885
|
hayesspencer/python
|
refs/heads/master
|
/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/urls.py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index),
path('courses/create', views.create_course),
path('courses/destroy/<int:course_id>', views.destroy_course),
path('courses/delete/<int:course_id>', views.delete_course),
]
|
{"/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/models.py"]}
|
29,534,886
|
hayesspencer/python
|
refs/heads/master
|
/Dojo_Assignments/python_stack/my_environments/firstDjango/appOne/views.py
|
from django.shortcuts import render, HttpResponse, redirect
# Create your views here.
def index(request):
return HttpResponse("placeholder to later display list of blogs")
def new(request):
return HttpResponse("Placedholder to display a new form to creat a new blog")
def create(request):
return redirect('/')
def show(request, number):
return HttpResponse(f"Placeholder to display blog number {number}.")
def edit(request, number):
return HttpResponse(f"Placeholder to edit blog {number}.")
def destroy(request, number):
return redirect('/')
def djangoOne(request):
return render(request, "index.html")
# Create your views here
|
{"/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/models.py"]}
|
29,534,887
|
hayesspencer/python
|
refs/heads/master
|
/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/models.py
|
from django.db import models
# Create your models here.
class CourseManager(models.Manager):
def create_validator(self, reqPOST):
errors = {}
if len(reqPOST['course_name']) < 6:
errors['name'] = "Course name is too short"
if len(reqPOST['description']) < 16:
errors['desc'] = "Description is too short"
return errors
class Course(models.Model):
name = models.TextField()
description = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = CourseManager()
|
{"/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/models.py"]}
|
29,534,888
|
hayesspencer/python
|
refs/heads/master
|
/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/views.py
|
from django.shortcuts import render
# Create your views here.
from django.shortcuts import render, redirect
from .models import *
from django.contrib import messages
# Create your views here.
def index(request):
context = {
"all_courses": Course.objects.all()
}
return render(request, "index.html", context)
def create_course(request):
if request.method == "POST":
errors = Course.objects.create_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
else:
course = Course.objects.create(name=request.POST['course_name'], description=request.POST['description'])
return redirect('/')
def destroy_course(request, course_id):
context = {
'one_course': Course.objects.get(id=course_id)
}
return render(request, "delete_page.html", context)
def delete_course(request, course_id):
if request.method == "POST":
course_to_delete = Course.objects.get(id=course_id)
course_to_delete.delete()
return redirect('/')
|
{"/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/models.py"]}
|
29,534,889
|
hayesspencer/python
|
refs/heads/master
|
/Dojo_Assignments/python_stack/my_environments/word_Generator/randApp/apps.py
|
from django.apps import AppConfig
class RandappConfig(AppConfig):
name = 'randApp'
|
{"/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/models.py"]}
|
29,534,890
|
hayesspencer/python
|
refs/heads/master
|
/Dojo_Assignments/python_stack/my_environments/ninja_Gold/goldApp/views.py
|
from django.shortcuts import render, redirect
import random
from datetime import datetime
# helper dictionary, for easy access to min/max gold values
GOLD_MAP = {
"farm": (10,20),
"cave": (5,10),
"house": (2,5),
"casino": (0,50)
}
# Create your views here.
def index(request):
# check if either 'gold' or 'activities' keys are not in session (yet)
if not "gold" in request.session or "activities" not in request.session:
# set these to initial values if that is the case!
request.session['gold'] = 0
request.session['activities'] = []
return render(request, 'index.html')
def reset(request):
request.session.clear()
return redirect('/')
def process_gold(request):
if request.method == 'GET':
return redirect('/')
building_name = request.POST['building']
# access the correct mix/max values from the user's form submission
building = GOLD_MAP[building_name]
# upper case string (for message)
building_name_upper = building_name[0].upper() + building_name[1:]
# calculate the correct random number for this building
curr_gold = random.randint(building[0], building[1])
# generate a datetime string, with the proper format, for RIGHT NOW
now_formatted = datetime.now().strftime("%m/%d/%Y %I:%M%p")
# for formatting message color! (this will correspond to a css class)
result = 'earn'
message = f"Earned {curr_gold} from the {building_name_upper}! ({now_formatted})"
# check if we need to do casino stuff
if building_name == 'casino':
# if so, see if we lost money
if random.randint(0,1) > 0: # 50% chance of being True/False
# if we lost money, we need a different message!
message = f"Entered a {building_name_upper} and lost {curr_gold} golds... Ouch... ({now_formatted})"
# we also need to convert our turn's gold amount to a negative number
curr_gold = curr_gold * -1
result = 'lose'
# update session gold value
request.session['gold'] += curr_gold
# update session activities with new message
# NOTE: each 'activity' is a dictionary, with the message as well as the 'result' for css purposes
request.session['activities'].append({"message": message, "result": result})
return redirect('/')
|
{"/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/models.py"]}
|
29,534,891
|
hayesspencer/python
|
refs/heads/master
|
/Dojo_Assignments/python_stack/my_environments/amadon/amadonApp/views.py
|
from django.shortcuts import render, redirect
from .models import Order, Product
from django.db.models import Sum
def index(request):
context = {
"all_products": Product.objects.all()
}
return render(request, "index.html", context)
def checkout(request):
last = Order.objects.last()
price=last.total_price
full_order = Order.objects.aggregate(Sum('quantity_ordered'))['quantity_ordered__sum']
full_price = Order.objects.aggregate(Sum('total_price'))['total_price__sum']
context = {
'orders':full_order,
'total':full_price,
'bill':price,
}
return render(request, "checkout.html",context)
def purchase(request):
if request.method == 'POST':
this_product = Product.objects.filter(id=request.POST["id"])
if not this_product:
return redirect('/')
else:
quantity = int(request.POST["quantity"])
total_charge = quantity*(float(this_product[0].price))
Order.objects.create(quantity_ordered=quantity, total_price=total_charge)
return redirect('/checkout')
else:
return redirect('/')
|
{"/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/pythonReview/pythonReviewApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/courses/coursesApp/models.py"], "/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/views.py": ["/Dojo_Assignments/python_stack/my_environments/marCRUD/marCRUDApp/models.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.