index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
24,087
|
sumitsk/HER
|
refs/heads/master
|
/replay_buffer.py
|
# import threading
import numpy as np
import random
# from baselines.common.segment_tree import SumSegmentTree, MinSegmentTree
class ReplayBuffer:
def __init__(self, buffer_shapes, size_in_transitions, T, sampler):
"""Creates a replay buffer.
Args:
buffer_shapes (dict of ints): the shape for all buffers that are used in the replay
buffer
size_in_transitions (int): the size of the buffer, measured in transitions
T (int): the time horizon for episodes
"""
self.buffer_shapes = buffer_shapes
self.size = size_in_transitions // T
self.T = T
self.sampler = sampler
# self.buffers is {key: array(size_in_episodes x T or T+1 x dim_key)}
self.buffers = {key: np.empty([self.size, *shape])
for key, shape in buffer_shapes.items()}
# memory management
self.current_size = 0
self.n_transitions_stored = 0
# self.lock = threading.Lock()
@property
def full(self):
# with self.lock:
return self.current_size == self.size
def sample(self, batch_size):
"""Returns a dict {key: array(batch_size x shapes[key])}
"""
buffers = {}
# with self.lock:
assert self.current_size > 0
for key in self.buffers.keys():
buffers[key] = self.buffers[key][:self.current_size]
buffers['o_2'] = buffers['o'][:, 1:, :]
buffers['ag_2'] = buffers['ag'][:, 1:, :]
transitions = self.sampler.sample(buffers, batch_size)
for key in (['r', 'o_2', 'ag_2'] + list(self.buffers.keys())):
assert key in transitions, "key %s missing from transitions" % key
return transitions
def store_episode(self, episode_batch):
"""episode_batch: array(batch_size x (T or T+1) x dim_key)
"""
batch_sizes = [len(episode_batch[key]) for key in episode_batch.keys()]
assert np.all(np.array(batch_sizes) == batch_sizes[0])
batch_size = batch_sizes[0]
# with self.lock:
idxs = self._get_storage_idx(batch_size)
# load inputs into buffers
for key in self.buffers.keys():
self.buffers[key][idxs] = episode_batch[key]
self.n_transitions_stored += batch_size * self.T
def get_current_episode_size(self):
# with self.lock:
return self.current_size
def get_current_size(self):
# with self.lock:
return self.current_size * self.T
def get_transitions_stored(self):
# with self.lock:
return self.n_transitions_stored
def clear_buffer(self):
# with self.lock:
self.current_size = 0
def _get_storage_idx(self, inc=None):
inc = inc or 1 # size increment
assert inc <= self.size, "Batch committed to replay is too large!"
# go consecutively until you hit the end, and then go randomly.
if self.current_size+inc <= self.size:
idx = np.arange(self.current_size, self.current_size+inc)
elif self.current_size < self.size:
overflow = inc - (self.size - self.current_size)
idx_a = np.arange(self.current_size, self.size)
idx_b = np.random.randint(0, self.current_size, overflow)
idx = np.concatenate([idx_a, idx_b])
else:
idx = np.random.randint(0, self.size, inc)
# update replay size
self.current_size = min(self.size, self.current_size+inc)
if inc == 1:
idx = idx[0]
return idx
# class PrioritizedReplayBuffer:
# def __init__(self, buffer_shapes, size_in_transitions, T, sample_transitions, alpha=0.5):
# raise NotImplementedError('implement PrioritizedReplayBuffer')
# """Creates a replay buffer for prioritized sampling.
# Args:
# buffer_shapes (dict of ints): the shape for all buffers that are used in the replay
# buffer
# size_in_transitions (int): the size of the buffer, measured in transitions
# T (int): the time horizon for episodes
# sample_transitions (function): a function that samples from the replay buffer
# alpha (float): parameter for prioritized sampling
# """
# self.buffer_shapes = buffer_shapes
# # size in episodes
# self.size = size_in_transitions // T
# self.T = T
# self.sample_transitions = sample_transitions
# # self.buffers is {key: array(size_in_episodes x T or T+1 x dim_key)}
# self.buffers = {key: np.empty([self.size, *shape]) for key, shape in buffer_shapes.items()}
# # memory management
# # number of episodes stored in replay buffer
# self.current_size = 0
# # number of transitions stored in replay buffer (= size of segment tree)
# self.current_count = 0
# # total number of transitions so far
# self.n_transitions_stored = 0
# self.lock = threading.Lock()
# # Prioritized Experience Replay specific
# assert alpha >=0
# self._alpha = alpha
# it_capacity = 1
# max_transitions = self.size * T
# while it_capacity < max_transitions:
# it_capacity *= 2
# self._it_sum = SumSegmentTree(it_capacity)
# self._it_min = MinSegmentTree(it_capacity)
# self._max_priority = 1.0
# self.flatten_indices = np.arange(max_transitions).reshape(self.size, T)
# @property
# def full(self):
# with self.lock:
# return self.current_size == self.size
# def get_current_episode_size(self):
# with self.lock:
# return self.current_size
# def get_current_size(self):
# with self.lock:
# return self.current_size * self.T
# def get_transitions_stored(self):
# with self.lock:
# return self.n_transitions_stored
# def clear_buffer(self):
# with self.lock:
# self.current_size = 0
# self.current_count = 0
# def sample(self, batch_size, beta, obj=None):
# """Returns a dict {key: array(batch_size x shapes[key])}
# """
# buffers = {}
# with self.lock:
# assert self.current_size > 0
# for key in self.buffers.keys():
# buffers[key] = self.buffers[key][:self.current_size]
# buffers['o_2'] = buffers['o'][:, 1:, :]
# buffers['ag_2'] = buffers['ag'][:, 1:, :]
# # indices of all the samples to be replayed, some of these will be substituted by HER samples
# with self.lock:
# transition_indices = self._sample_proportional(batch_size)
# # convert transition indices to episode indices and timestep of that episode
# episode_idxs, t_samples = self._get_episode_and_time_indices(transition_indices)
# # sample transitions and return original and her samples
# transitions, original_samples, her_samples = self.sample_transitions(buffers, batch_size, episode_idxs=episode_idxs,
# t_samples=t_samples, return_indices=True)
# for key in (['r', 'o_2', 'ag_2'] + list(self.buffers.keys())):
# assert key in transitions, "key %s missing from transitions" % key
# # weights of all the samples
# weights = np.ones(batch_size)
# # compute weights of original samples
# with self.lock:
# org_weights = self._compute_sample_weights(transition_indices[original_samples], beta)
# weights[original_samples] = org_weights
# if obj is not None:
# her_transitions = {key: value[her_samples] for key, value in transitions.items()}
# her_priorities = obj.get_priorities(her_transitions)
# her_weights = self._compute_her_sample_weights(her_priorities, beta)
# # NOTE: normalizing max her weight to be equal to max org weight
# her_weights = her_weights / max(her_weights) * max(org_weights)
# weights[her_samples] = her_weights
# return transitions, weights, (transition_indices, original_samples)
# def _compute_sample_weights(self, idxes, beta):
# # returns weights of all the original samples (not HER ones)
# weights = np.full(len(idxes), 1.0)
# p_min = self._it_min.min() / self._it_sum.sum()
# max_weight = (p_min * self.current_count) ** (-beta)
# for i, idx in enumerate(idxes):
# p_sample = self._it_sum[idx] / self._it_sum.sum()
# weight = (p_sample * self.current_count) ** (-beta)
# weights[i] = weight / max_weight
# return weights
# def _compute_her_sample_weights(self, priorities, beta):
# priorities = priorities ** self._alpha
# probs = priorities / sum(priorities)
# # TODO: is self.current_count right here ?
# weights = (probs * self.current_count) ** (-beta)
# weights = weights / max(weights)
# return weights
# def store_episode(self, episode_batch):
# # episode_batch: array(batch_size x (T or T+1) x dim_key)
# batch_sizes = [len(episode_batch[key]) for key in episode_batch.keys()]
# assert np.all(np.array(batch_sizes) == batch_sizes[0])
# batch_size = batch_sizes[0]
# with self.lock:
# idxs = self._get_storage_idx(batch_size)
# # load inputs into buffers
# for key in self.buffers.keys():
# self.buffers[key][idxs] = episode_batch[key]
# self.n_transitions_stored += batch_size * self.T
# # update tree for all the new additions in the buffer
# new_indices = self._get_new_flatten_indices(idxs)
# priority = self._max_priority ** self._alpha
# for new_idx in new_indices:
# self._it_sum[new_idx] = priority
# self._it_min[new_idx] = priority
# def _get_storage_idx(self, inc=None):
# # return episode incides where the incoming transitions will be stored
# inc = inc or 1 # size increment
# assert inc <= self.size, "Batch committed to replay is too large!"
# # go consecutively until you hit the end, and then go randomly.
# if self.current_size+inc <= self.size:
# idx = np.arange(self.current_size, self.current_size+inc)
# elif self.current_size < self.size:
# overflow = inc - (self.size - self.current_size)
# idx_a = np.arange(self.current_size, self.size)
# idx_b = np.random.randint(0, self.current_size, overflow)
# idx = np.concatenate([idx_a, idx_b])
# else:
# idx = np.random.randint(0, self.size, inc)
# # update replay size
# self.current_size = min(self.size, self.current_size+inc)
# self.current_count = min(self.size*self.T, self.current_size*self.T)
# if inc == 1:
# idx = idx[0]
# return idx
# def update_priorities(self, idxes_tuple, priorities):
# """Update priorities of sampled transitions.
# sets priority of transition at index idxes[i] in buffer to priorities[i].
# Parameters
# ----------
# idxes: tuple
# List of idxes of sampled transitions, list of indices which are original samples (not HER)
# priorities: [float]
# List of updated priorities corresponding to transitions at the sampled idxes denoted by variable `idxes`.
# """
# priorities = priorities.squeeze()
# idxes, org_indexes = idxes_tuple
# assert len(idxes) == len(priorities)
# # update the priority for the original indexes only (as they were the ones used in the replay)
# # the remaining ones are HER transitions (goal-substituted ones) which are not present in the experience replay
# org_idxes = idxes[org_indexes]
# org_priorities = priorities[org_indexes]
# with self.lock:
# count = self.current_count
# for idx, priority in zip(org_idxes, org_priorities):
# assert priority > 0
# assert 0 <= idx < count
# self._it_sum[idx] = priority ** self._alpha
# self._it_min[idx] = priority ** self._alpha
# self._max_priority = max(self._max_priority, priority)
# def _sample_proportional(self, batch_size):
# # prioritized sampling, returns transitions indices
# res = np.full(batch_size, 0)
# p_total = self._it_sum.sum(0, self.current_count - 1)
# every_range_len = p_total / batch_size
# for i in range(batch_size):
# mass = random.random() * every_range_len + i * every_range_len
# idx = self._it_sum.find_prefixsum_idx(mass)
# res[i] = idx
# return res
# def _get_new_flatten_indices(self, row_indices):
# return self.flatten_indices[row_indices].flatten()
# def _get_episode_and_time_indices(self, idxs):
# episode_idxs, t_samples = np.unravel_index(idxs, (self.size, self.T))
# return episode_idxs, t_samples
|
{"/main.py": ["/utils.py", "/arguments.py", "/learner.py", "/policy.py"], "/policy.py": ["/model.py", "/replay_buffer.py", "/her.py", "/normalizer.py", "/utils.py"]}
|
24,088
|
sumitsk/HER
|
refs/heads/master
|
/arguments.py
|
import argparse
def get_args():
parser = argparse.ArgumentParser(description='RL')
# environment
parser.add_argument('--env-name', default='FetchReach-v1', help='gym environment')
parser.add_argument('--render', action='store_true')
parser.add_argument('--relative-goal', action='store_true')
# training
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--num-processes', type=int, default=2)
parser.add_argument('--n-epochs', type=int, default=50)
parser.add_argument('--n-cycles', type=int, default=10)
parser.add_argument('--n-batches', type=int, default=40)
parser.add_argument('--actor-lr', default=1e-3, type=float)
parser.add_argument('--critic-lr', default=1e-3, type=float)
# logging
parser.add_argument('--logid', default=None, type=int, help='unique id for each run (default: date_time)')
parser.add_argument('--save-every', default=5, type=int, help='save policy after every ... epochs')
# Miscellaneous
parser.add_argument('--test', action='store_true')
args = parser.parse_args()
return args
|
{"/main.py": ["/utils.py", "/arguments.py", "/learner.py", "/policy.py"], "/policy.py": ["/model.py", "/replay_buffer.py", "/her.py", "/normalizer.py", "/utils.py"]}
|
24,089
|
sumitsk/HER
|
refs/heads/master
|
/her.py
|
# modified from OPENAI baselines version
import numpy as np
class HER_sampler(object):
def __init__(self, replay_k, reward_fun):
# replay strategy is future
self.future_p = 1 - (1. / (1 + replay_k))
self.reward_fun = reward_fun
def sample(self, episode_batch, batch_size_in_transitions):
# episode_batch is {key: array(buffer_size x T x dim_key)}
T = episode_batch['u'].shape[1]
rollout_batch_size = episode_batch['u'].shape[0]
batch_size = batch_size_in_transitions
# Select which episodes and time steps to use.
# if episode_idxs is None or t_samples is None:
episode_idxs = np.random.randint(0, rollout_batch_size, batch_size)
t_samples = np.random.randint(T, size=batch_size)
transitions = {key: episode_batch[key][episode_idxs, t_samples].copy()
for key in episode_batch.keys()}
# Select future time indexes proportional with probability future_p.
probs = np.random.uniform(size=batch_size)
her_indexes = np.where(probs < self.future_p)[0]
future_offset = np.random.uniform(size=batch_size) * (T - t_samples)
future_offset = future_offset.astype(int)
future_t = (t_samples + 1 + future_offset)[her_indexes]
# Replace goal with achieved goal but only for the previously-selected HER transitions (as defined by her_indexes).
# For the other transitions, keep the original goal.
future_ag = episode_batch['ag'][episode_idxs[her_indexes], future_t]
transitions['g'][her_indexes] = future_ag
# Reconstruct info dictionary for reward computation.
info = {}
for key, value in transitions.items():
if key.startswith('info_'):
info[key.replace('info_', '')] = value
# Re-compute reward since we may have substituted the goal.
reward_params = {k: transitions[k] for k in ['ag_2', 'g']}
reward_params['info'] = info
transitions['r'] = self.reward_fun(**reward_params)
# her transition rewards are not always 0
transitions = {k: transitions[k].reshape(batch_size, *transitions[k].shape[1:])
for k in transitions.keys()}
assert(transitions['u'].shape[0] == batch_size_in_transitions)
return transitions
|
{"/main.py": ["/utils.py", "/arguments.py", "/learner.py", "/policy.py"], "/policy.py": ["/model.py", "/replay_buffer.py", "/her.py", "/normalizer.py", "/utils.py"]}
|
24,090
|
sumitsk/HER
|
refs/heads/master
|
/learner.py
|
import torch
import numpy as np
from collections import deque
class Learner:
def __init__(self, policy, params):
self.policy = policy
self.envs = params['envs']
self.num_processes = params['num_processes']
history_len = 100
self.success_history = deque(maxlen=history_len)
self.exploit = params['exploit']
self.noise_eps = 0.2 if not self.exploit else 0
self.random_eps = 0.3 if not self.exploit else 0
self.n_episodes = 0
env = params['cached_env']
self.T = env._max_episode_steps
def generate_rollouts(self):
obs = self.envs.reset()
o, ag, g = self.policy.split_obs(obs)
obs, goals, achieved_goals, actions, successes = [], [], [], [], []
for t in range(self.T):
with torch.no_grad():
act = self.policy.get_actions(o, ag, g, noise_eps=self.noise_eps, random_eps=self.random_eps)
# reward will be recomputed while HER sampling
next_o, _, _, info = self.envs.step(act)
next_o, next_ag, _ = self.policy.split_obs(next_o)
obs.append(o.copy())
achieved_goals.append(ag.copy())
goals.append(g.copy())
actions.append(act.copy())
succ = np.reshape([i['is_success'] for i in info], (-1,1))
successes.append(succ.copy())
o = next_o.copy()
ag = next_ag.copy()
obs.append(o.copy())
achieved_goals.append(ag.copy())
# num_processes x (T+1) x dim
obs = np.stack(obs, 1)
achieved_goals = np.stack(achieved_goals, 1)
# num_processes x T x dim
goals = np.stack(goals, 1)
actions = np.stack(actions, 1)
successes = np.stack(successes, 1)
# HER replay buffer expects obs to be goal removed
episode = dict(o=obs, u=actions, g=goals, ag=achieved_goals, info_is_success=successes)
self.n_episodes += self.num_processes
success_rate = successes[:, -1].squeeze().mean()
self.success_history.append(success_rate)
return episode
def clear_history(self):
self.success_history.clear()
def logs(self):
logs = {'success_rate': np.mean(self.success_history), 'episode': self.n_episodes}
return logs
|
{"/main.py": ["/utils.py", "/arguments.py", "/learner.py", "/policy.py"], "/policy.py": ["/model.py", "/replay_buffer.py", "/her.py", "/normalizer.py", "/utils.py"]}
|
24,091
|
sumitsk/HER
|
refs/heads/master
|
/normalizer.py
|
import gym_vecenv
import numpy as np
class RMSNormalizer:
def __init__(self, size, min_std=1e-2, clipob=10.0):
self.rms = gym_vecenv.RunningMeanStd(shape=size)
self.clipob = clipob
self.min_std = min_std
@property
def mean(self):
return self.rms.mean
@property
def std(self):
var = np.maximum(self.min_std**2, self.rms.var)
return np.maximum(self.min_std, var**.5)
def update(self, obs):
self.rms.update(obs)
def recompute_stats(self):
pass
def normalize(self, obs):
obs = np.clip((obs - self.mean) / self.std, -self.clipob, self.clipob)
return obs
class Normalizer:
def __init__(self, size, min_std=1e-2, clipob=10.0):
self.clipob = clipob
self.min_std = min_std
self.size = size
self.obs_sum = np.zeros(self.size, np.float32)
self.obs_sumsq = np.zeros(self.size, np.float32)
self.count = 0
self.mean = np.zeros(self.size, np.float32)
self.std = np.ones(self.size, np.float32)
def update(self, v):
v = v.reshape(-1, self.size)
self.obs_sum += v.sum(axis=0)
self.obs_sumsq += (np.square(v)).sum(axis=0)
self.count += v.shape[0]
def reset(self):
self.count = 0
self.obs_sum[...] = 0
self.obs_sumsq[...] = 0
def recompute_stats(self):
self.mean = self.obs_sum / self.count
var = self.obs_sumsq / self.count - (self.obs_sum / self.count)**2
var = np.maximum(self.min_std**2, var)
self.std = np.maximum(var**.5, self.min_std)
self.reset()
def normalize(self, obs):
norm_obs = np.clip((obs - self.mean) / self.std, -self.clipob, self.clipob)
return norm_obs
class IdentityNormalizer:
def __init__(self, size, std=1.0):
self.mean = np.zeros(size, np.float32)
self.std = std*np.ones(size, np.float32)
def update(self, v):
pass
def recompute_stats(self):
pass
def normalize(self, x):
return (x-self.mean)/self.std
|
{"/main.py": ["/utils.py", "/arguments.py", "/learner.py", "/policy.py"], "/policy.py": ["/model.py", "/replay_buffer.py", "/her.py", "/normalizer.py", "/utils.py"]}
|
24,092
|
sumitsk/HER
|
refs/heads/master
|
/utils.py
|
import os
import shutil
import sys
import gym_vecenv
import gym
def check_logdir(log_dir):
# raise warning if log directory already exists
if os.path.exists(log_dir):
print('\nLog directory exists already! Enter')
ch = input('c (rename the existing directory with _old and continue)\ns (stop)!\ndel (delete existing dir): ')
if ch == 's':
sys.exit(0)
elif ch == 'c':
os.rename(log_dir, log_dir+'_old')
elif ch == 'del':
shutil.rmtree(log_dir)
else:
raise NotImplementedError('Unknown input')
os.makedirs(log_dir)
def make_robotics_env(env_name, seed):
def _thunk():
env = gym.make(env_name)
# flatten for subprocvec support
env = gym.wrappers.FlattenDictWrapper(env, ['observation', 'achieved_goal', 'desired_goal'])
env.seed(seed)
return env
return _thunk
def make_parallel_envs(env_name, seed, num_processes):
envs = [make_robotics_env(env_name=env_name, seed=seed+i*1000000) for i in range(num_processes)]
if num_processes > 1:
envs = gym_vecenv.SubprocVecEnv(envs, no_reset=True)
else:
envs = gym_vecenv.DummyVecEnv(envs, no_reset=True)
# do not normalize envs
return envs
def get_cached_env(env_name):
return gym.make(env_name)
def get_buffer_shapes(env, T):
env.reset()
obs, _, _, info = env.step(env.action_space.sample())
shapes = {
'o': (T+1, obs['observation'].shape[0]),
'ag': (T+1, obs['desired_goal'].shape[0]),
'u': (T, env.action_space.shape[0]),
'g': (T, obs['desired_goal'].shape[0]),
'info_is_success': (T, 1)
}
return shapes
def soft_update(target, source, tau):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(target_param.data * (1.0-tau) + param.data * tau)
def hard_update(target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
|
{"/main.py": ["/utils.py", "/arguments.py", "/learner.py", "/policy.py"], "/policy.py": ["/model.py", "/replay_buffer.py", "/her.py", "/normalizer.py", "/utils.py"]}
|
24,093
|
sumitsk/HER
|
refs/heads/master
|
/main.py
|
import torch
import os
import numpy as np
import datetime
from copy import deepcopy
import utils
from arguments import get_args
from learner import Learner
from policy import Policy
import logger
from tensorboard_logger import configure, log_value
if __name__ == '__main__':
args = get_args()
logid = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f") if args.logid is None else str(args.logid)
logdir = os.path.join('save', logid)
utils.check_logdir(logdir)
logger.configure(logdir)
configure(logdir)
params = vars(args)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
params['device'] = device
params['cached_env'] = utils.get_cached_env(params['env_name'])
policy = Policy(params)
train_envs = utils.make_parallel_envs(params['env_name'], params['seed'], params['num_processes'])
trainer_params = deepcopy(params)
trainer_params['envs'] = train_envs
trainer_params['exploit'] = False
trainer = Learner(policy, trainer_params)
eval_seed = np.random.randint(0, 100)
eval_num_processes = params['num_processes']
eval_envs = utils.make_parallel_envs(params['env_name'], eval_seed, eval_num_processes)
evaluator_params = deepcopy(params)
evaluator_params['num_processes'] = eval_num_processes
evaluator_params['envs'] = eval_envs
evaluator_params['exploit'] = True
evaluator = Learner(policy, evaluator_params)
n_test_rollouts = 10
best_success_rate = -1
latest_policy_path = os.path.join(logger.get_dir(), 'policy_latest.pt')
best_policy_path = os.path.join(logger.get_dir(), 'policy_best.pt')
periodic_policy_path = os.path.join(logger.get_dir(), 'policy_{}.pt')
n_batches = trainer_params['n_batches']*trainer_params['num_processes']
for epoch in range(params['n_epochs']):
trainer.clear_history()
policy.set_train_mode()
for i in range(params['n_cycles']):
episode = trainer.generate_rollouts()
policy.store_episode(episode)
for _ in range(n_batches):
critic_loss, policy_loss = policy.train()
step = epoch*params['n_cycles']+i
log_value('critic_loss', critic_loss, step)
log_value('policy_loss', policy_loss, step)
# policy.update_target_net()
evaluator.clear_history()
policy.set_eval_mode()
for _ in range(n_test_rollouts):
evaluator.generate_rollouts()
# log statistics
logger.record_tabular('epoch', epoch)
test_stats = evaluator.logs()
for key, val in test_stats.items():
logger.record_tabular('test/'+key, val)
train_stats = trainer.logs()
for key, val in train_stats.items():
logger.record_tabular('train/'+key, val)
for key, val in policy.logs():
logger.record_tabular(key, val)
logger.dump_tabular()
log_value('train_success_rate', train_stats['success_rate'], epoch)
log_value('test_success_rate', test_stats['success_rate'], epoch)
success_rate = test_stats['success_rate']
if success_rate >= best_success_rate:
best_success_rate = success_rate
logger.info('New best success rate: {}. Saving policy to {} ...'.format(best_success_rate, best_policy_path))
policy.save(epoch, best_policy_path)
policy.save(epoch, latest_policy_path)
if epoch % params['save_every'] == 0:
policy_path = periodic_policy_path.format(epoch)
logger.info('Saving periodic policy to {} ...'.format(policy_path))
policy.save(epoch, policy_path)
|
{"/main.py": ["/utils.py", "/arguments.py", "/learner.py", "/policy.py"], "/policy.py": ["/model.py", "/replay_buffer.py", "/her.py", "/normalizer.py", "/utils.py"]}
|
24,094
|
sumitsk/HER
|
refs/heads/master
|
/policy.py
|
import torch
import numpy as np
from model import Actor, Critic
from replay_buffer import ReplayBuffer
from her import HER_sampler
from normalizer import Normalizer
import utils
class Policy:
def __init__(self, params):
self.relative_goal = params['relative_goal']
env = params['cached_env']
self.T = env._max_episode_steps
self.max_u = 1.0
self.action_l2 = 1.0
self.clip_obs = 200.0
self.gamma = 1 - 1.0/self.T
self.tau = 0.001
self.clip_pos_returns = True
self.clip_return = self.T
self.train_batch_size = 256
self.max_grad_norm = 0.5
self.device = params['device']
buffer_shapes = utils.get_buffer_shapes(env, self.T)
self.obs_dim = buffer_shapes['o'][-1]
self.goal_dim = buffer_shapes['g'][-1]
input_size = self.obs_dim + self.goal_dim
self.action_dim = env.action_space.shape[0]
self.actor = Actor(input_size, self.action_dim, self.max_u).to(self.device)
self.target_actor = Actor(input_size, self.action_dim, self.max_u).to(self.device)
self.critic = Critic(input_size+self.action_dim).to(self.device)
self.target_critic = Critic(input_size+self.action_dim).to(self.device)
utils.hard_update(self.target_actor, self.actor)
utils.hard_update(self.target_critic, self.critic)
self.norm_clip = 5.0
self.o_stats = Normalizer(self.obs_dim, clipob=self.norm_clip)
self.g_stats = Normalizer(self.goal_dim, clipob=self.norm_clip)
self.optim_actor = torch.optim.Adam(self.actor.parameters(), params['actor_lr'])
self.optim_critic = torch.optim.Adam(self.critic.parameters(), params['critic_lr'])
self.critic_loss = torch.nn.MSELoss()
def reward_fun(ag_2, g, info):
return env.compute_reward(achieved_goal=ag_2, desired_goal=g, info=info)
self.her_sampler = HER_sampler(replay_k=4, reward_fun=reward_fun)
buffer_size = int(1e6)
self.buffer = ReplayBuffer(buffer_shapes, buffer_size, self.T, self.her_sampler)
def get_actions(self, obs, ag, goal, noise_eps=0, random_eps=0):
o, g = self.preprocess_og(obs, ag, goal)
o = self.o_stats.normalize(o)
g = self.g_stats.normalize(g)
inp = self.append_goal(o, g)
inp = torch.from_numpy(inp).float().to(self.device)
u = self.actor(inp)
u = u.cpu().numpy()
# Gaussian noise
noise = noise_eps * self.max_u * np.random.randn(*u.shape)
u += noise
u = np.clip(u, -self.max_u, self.max_u)
# eps-greedy
u += np.random.binomial(1, random_eps, u.shape[0]).reshape(-1, 1) * (self._random_action(u.shape[0]) - u)
return u
def _random_action(self, n):
return np.random.uniform(low=-self.max_u, high=self.max_u, size=(n, self.action_dim))
def append_goal(self, obs, goal):
return np.concatenate([obs, goal], -1)
def split_obs(self, obs):
return np.split(obs, [self.obs_dim, self.obs_dim+self.goal_dim], -1)
def preprocess_og(self, obs, ag, goal):
if self.relative_goal:
goal = goal - ag
obs = np.clip(obs, -self.clip_obs, self.clip_obs)
goal = np.clip(goal, -self.clip_obs, self.clip_obs)
return obs, goal
def update_target_net(self):
utils.soft_update(self.target_critic, self.critic, self.tau)
utils.soft_update(self.target_actor, self.actor, self.tau)
def store_episode(self, episode_batch, update_stats=True):
self.buffer.store_episode(episode_batch)
if update_stats:
# add transitions to normalizer
episode_batch['o_2'] = episode_batch['o'][:, 1:, :]
episode_batch['ag_2'] = episode_batch['ag'][:, 1:, :]
num_normalizing_transitions = episode_batch['u'].shape[0]*episode_batch['u'].shape[1]
transitions = self.her_sampler.sample(episode_batch, num_normalizing_transitions)
o, g = self.preprocess_og(transitions['o'], transitions['ag'], transitions['g'])
self.o_stats.update(o)
self.g_stats.update(g)
self.o_stats.recompute_stats()
self.g_stats.recompute_stats()
def train(self):
batch = self.buffer.sample(self.train_batch_size)
o, g = self.preprocess_og(batch['o'], batch['ag'], batch['g'])
o = self.o_stats.normalize(o)
g = self.g_stats.normalize(g)
obs = self.append_goal(o, g)
obs = torch.from_numpy(obs).float().to(self.device)
o_2, g_2 = self.preprocess_og(batch['o_2'], batch['ag_2'], batch['g'])
o_2 = self.o_stats.normalize(o_2)
g_2 = self.g_stats.normalize(g_2)
obs_2 = self.append_goal(o_2, g_2)
obs_2 = torch.from_numpy(obs_2).float().to(self.device)
rew = torch.from_numpy(batch['r']).float().to(self.device)
act = torch.from_numpy(batch['u']).float().to(self.device)
with torch.no_grad():
act_2 = self.target_actor(obs_2)
next_qsa = self.target_critic(obs_2, act_2).squeeze()
target_q = rew + self.gamma * next_qsa
clip_range = (-self.clip_return, 0. if self.clip_pos_returns else np.inf)
target_q = torch.clamp(target_q, *clip_range)
self.optim_critic.zero_grad()
main_q = self.critic(obs, act).squeeze()
# with torch.no_grad():
# td_error = target_q - q
cr_loss = self.critic_loss(main_q, target_q)
cr_loss.backward()
torch.nn.utils.clip_grad_norm_(self.critic.parameters(), self.max_grad_norm)
self.optim_critic.step()
self.optim_actor.zero_grad()
pred_act = self.actor(obs)
policy_loss = -self.critic(obs, pred_act).mean()
policy_loss += self.action_l2 * ((pred_act/self.max_u)**2).mean()
policy_loss.backward()
torch.nn.utils.clip_grad_norm_(self.actor.parameters(), self.max_grad_norm)
self.optim_actor.step()
self.update_target_net()
return cr_loss.item(), policy_loss.item()
def logs(self):
logs = []
logs += [('stats_o/mean', np.mean(self.o_stats.mean))]
logs += [('stats_o/std', np.mean(self.o_stats.std))]
logs += [('stats_g/mean', np.mean(self.g_stats.mean))]
logs += [('stats_g/std', np.mean(self.g_stats.std))]
return logs
def set_train_mode(self):
self.actor.train()
self.critic.train()
self.target_actor.train()
self.target_critic.train()
def set_eval_mode(self):
self.actor.eval()
self.critic.eval()
self.target_actor.eval()
self.target_critic.eval()
def save(self, epoch, filename):
checkpoint = {'actor': self.actor.state_dict(),
'critic': self.critic.state_dict(),
'optim_actor': self.optim_actor.state_dict(),
'optim_critic': self.optim_critic.state_dict(),
'epoch': epoch
}
torch.save(checkpoint, filename)
|
{"/main.py": ["/utils.py", "/arguments.py", "/learner.py", "/policy.py"], "/policy.py": ["/model.py", "/replay_buffer.py", "/her.py", "/normalizer.py", "/utils.py"]}
|
24,098
|
NatashaKSS/CS4248Assg2-Viterbi
|
refs/heads/master
|
/cross_validator.py
|
# Import standard modules
import sys
import string
import re
import math
import pickle
# Import custom modules
from PennTreebankPOSTags import POS_TAGS
from PennTreebankPOSTags import START_MARKER
from PennTreebankPOSTags import END_MARKER
from Tokenizer import Tokenizer
from HMMProbGenerator import HMMProbGenerator
from POSTagModelTrainer import POSTagModelTrainer
from POSTagger import POSTagger
#===========================================================================#
# CrossValidator
#
# PERFORMS 10-FOLD CROSS VALIDATION OF OUR MODEL ON A SPECIFIED TRAINING SET.
#
# Prints the accuracies of each fold and the average accuracy of all 10 folds
# to the console.
#===========================================================================#
class CrossValidator():
def __init__(self, PATH_TO_DATA_TRAIN):
print('== [CrossValidator instantiated] ==')
# Set up tokenizer before everything else
self.tokenizer = Tokenizer()
# Set up the Model Trainer
self.POS_trainer = POSTagModelTrainer(PATH_TO_DATA_TRAIN, True)
# Initialized constants
self.DATA_TRAIN = open(PATH_TO_DATA_TRAIN).read()
#=====================================================#
# 10-FOLD CROSS VALIDATION
#=====================================================#
def validate(self):
print('Validating model...please wait...')
sentences = self.tokenizer.get_sentences(self.DATA_TRAIN)
LEN_SENTENCES = len(sentences)
ONE_FOLD_SIZE = int(math.floor(0.1 * LEN_SENTENCES))
acc_scores_so_far = []
for i in range(10):
print('Performing validation on fold no.:', i + 1, 'please wait...')
sentences = self.shift(sentences, ONE_FOLD_SIZE)
test_sentences = sentences[-ONE_FOLD_SIZE:]
training_sentences = sentences[0:-ONE_FOLD_SIZE]
# Tokenizing Training data
training_dataset = self.tokenizer.flatten_list_of_sentences(training_sentences)
list_of_str_postag = self.tokenizer.tokenize_document(training_dataset)
list_of_word_postag_pairs = self.tokenizer.get_pairs_of_word_tags(list_of_str_postag)
# Training the model
model = HMMProbGenerator(list_of_word_postag_pairs).generate_probs()
# Running the POS Tagger
self.POS_tagger = POSTagger('', '', model, True)
# Run the model on the test data
best_postags_and_gold_standard_tags = self.POS_tagger.run_with_provided_sentences(test_sentences)
best_postags = best_postags_and_gold_standard_tags[0]
gold_standard_tags = best_postags_and_gold_standard_tags[1]
# Compute accuracy
acc_scores_so_far.append(self.compute_accuracy(gold_standard_tags, best_postags))
print('COMPLETED validation on fold no.:', i + 1, '!', 10 - (i + 1), 'more to go!')
print(acc_scores_so_far)
print("Average Cross Validation Score:", self.get_average(acc_scores_so_far))
"""
Shifts a list by n spaces to the right and returns a copy of that array
arr List to shift
n Number of elements to shift by to the right
return
"""
def shift(self, arr, n):
return arr[n:] + arr[:n]
"""
Computes the accuracy of our predicted postags as compared to a list of
true positive postags.
true_postags List of true positive POS tags in the format
[['<S>', 'FW', 'FW', 'FW', 'NNP', 'NNP', '<E>'], ...]
test_postags List of predicted POS tags in the format
[['<S>', 'FW', 'FW', 'FW', 'NNP', 'NNP', '<E>'], ...], that
is, a list of sentences where each sentence is a list of
every word's tag
return Percentage accuracy of our model on the testset in a single
10-fold cross validation runthrough
"""
def compute_accuracy(self, true_postags, test_postags):
N = self.compute_accuracy_N(test_postags)
correct = 0
for i in range(len(test_postags)):
for j in range(len(test_postags[i])):
if test_postags[i][j] == true_postags[i][j]:
correct += 1
return float(correct) / float(N)
"""
Helper function for compute_accuracy to compute the total number of tags in
our entire test set
test_postags List of predicted POS tags in the format
[['<S>', 'FW', 'FW', 'FW', 'NNP', 'NNP', '<E>'], ...], that
is, a list of sentences where each sentence is a list of every
word's tag
return Total number of tags in the entire test set
"""
def compute_accuracy_N(self, test_postags):
N = 0
for sentence in test_postags:
for postag in sentence:
N = N + 1
return N
"""
Computes the average of a list of numbers
scores List of numbers. In this context, it's the list of accuracy scores
after cross-validation
return Average of a list of numbers
"""
def get_average(self, scores):
total = 0
for score in scores:
total += score
return float(total) / len(scores)
#=====================================================#
# EXECUTION OF CrossValidator
#=====================================================#
PATH_TO_DATA_TRAIN = sys.argv[1]
print("Path to training data:", PATH_TO_DATA_TRAIN)
CrossValidator(PATH_TO_DATA_TRAIN).validate()
|
{"/cross_validator.py": ["/PennTreebankPOSTags.py", "/Tokenizer.py", "/HMMProbGenerator.py", "/POSTagModelTrainer.py", "/POSTagger.py"], "/POSTagger.py": ["/Tokenizer.py", "/PennTreebankPOSTags.py"], "/cross_valid_investigate_errors.py": ["/PennTreebankPOSTags.py"], "/Tokenizer.py": ["/PennTreebankPOSTags.py"], "/build_tagger.py": ["/POSTagModelTrainer.py"], "/HMMProbGenerator.py": ["/PennTreebankPOSTags.py"], "/POSTagModelTrainer.py": ["/Tokenizer.py", "/HMMProbGenerator.py"], "/run_tagger.py": ["/Tokenizer.py", "/PennTreebankPOSTags.py", "/POSTagger.py"]}
|
24,099
|
NatashaKSS/CS4248Assg2-Viterbi
|
refs/heads/master
|
/POSTagger.py
|
# Import standard modules
import sys
import math
import pickle
# Import custom modules
from Tokenizer import Tokenizer
from PennTreebankPOSTags import POS_TAGS, START_MARKER, END_MARKER
# Define constants
UNK = '<UNK>'
#===========================================================================#
# POSTagger
# Executes the viterbi & backpointer algorithms to generate the best POS tags
#===========================================================================#
class POSTagger():
def __init__(self, PATH_TO_DATA_TEST, PATH_TO_DATA_MODEL, model=None, VALIDATE_MODE=False):
MODEL = None
if VALIDATE_MODE:
print("== [POSTagger instantiated] CROSS VALIDATION MODE ==")
MODEL = model
else:
print("== [POSTagger instantiated] ==")
self.PATH_TO_DATA_TEST = PATH_TO_DATA_TEST
self.PATH_TO_DATA_MODEL = PATH_TO_DATA_MODEL
MODEL = self.load_model()
# Matrix representing P(t_i | t_i-1), where rows: t_i-1, cols: t_i
self.PROB_TAG_GIVEN_TAG = MODEL[0]
# Matrix representing P(w_i | t_i), where rows: t_i, cols: w_i
self.PROB_WORD_GIVEN_TAG = MODEL[1]
# List of seen words
self.VOCAB_WORDS = self.PROB_WORD_GIVEN_TAG['NN'].keys()
self.tokenizer = Tokenizer()
# Runs the tagger and formats the result for sents.out
def run(self):
sentences = self.load_document_as_sentences()
best_postags_with_sentences = [self.get_best_postags(sentences), [sentence.split(' ') for sentence in sentences]]
return self.format_best_postags_and_sentences(best_postags_with_sentences)
# Runs the tagger for cross validation purposes
def run_with_provided_sentences(self, sentences):
return self.get_best_postags_for_cross_validation(sentences)
# Gets the best POS tag sequence for a list of input sentences, where sentences
# are of the form ['<S>/<S> The/DT ...', '<S>/<S> The/DT']
def get_best_postags(self, sentences):
print("-- RUNNING THE PART OF SPEECH TAGGER --")
sen_as_tokens_list = self.generate_tokens_for_test_doc_sentences(sentences, self.VOCAB_WORDS)
best_postags_list = []
for i in range(len(sen_as_tokens_list)):
best_postags = self.tag(sen_as_tokens_list[i])
best_postags_list.append(best_postags)
return best_postags_list
# Gets the best POS tag sequence for a list of input sentences for cross
# validation, where sentences are of the form ['<S>/<S> The/DT ...', '<S>/<S> The/DT']
def get_best_postags_for_cross_validation(self, sentences):
print("-- RUNNING THE PART OF SPEECH TAGGER FOR CROSS VALIDATION --")
# Prep the sentences to tag using our tokenizer
sentences = self.tokenizer.insert_start_end_sentence_tags(sentences)
test_sentences_and_tags = self.tokenizer.extract_tags_from_test_dataset(sentences, self.VOCAB_WORDS)
test_sentences = test_sentences_and_tags[0]
test_tags = test_sentences_and_tags[1]
sen_as_tokens_list = self.generate_tokens_for_test_doc_sentences(test_sentences, self.VOCAB_WORDS)
# Tag the provided list of sentences
best_postags_list = []
for i in range(len(sen_as_tokens_list)):
best_postags = self.tag(sen_as_tokens_list[i])
best_postags_list.append(best_postags)
return (best_postags_list, test_tags)
#=====================================================#
# VITERBI ALGORITHM
#=====================================================#
def tag(self, tokens):
LEN_TOKENS = len(tokens)
LEN_POSTAG = len(POS_TAGS)
# Initialize memo & backpointers for the best POS tags
# Visualize best_postags as a HMM network laid out, denoting each viterbi
# node's best chosen backpointer to some previous viterbi node
#
# Visualize memo as a HMM network laid out, denoting each viterbi
# node's best chosen emission probability from some previous viterbi node
memo = []
best_postags = []
for i in range(LEN_TOKENS):
memo.append([])
best_postags.append([])
for j in range(LEN_POSTAG):
memo[i].append(sys.float_info.min)
best_postags[i].append(-1)
if i == 0:
# Initialize probability at '<S>' to 1 since it occurs for all documents
memo[i][j] = 0 # (log scale equivalent of probability = 1 is 0)
# Compute most probable path & store in memo and best_postags arrays
for i in range(1, LEN_TOKENS):
for j in range(LEN_POSTAG):
curr_max = -sys.float_info.max
back_ptr = -1
for k in range(LEN_POSTAG):
transition_prob = memo[i - 1][k] + \
self.PROB_TAG_GIVEN_TAG[POS_TAGS[k]][POS_TAGS[j]] + \
self.PROB_WORD_GIVEN_TAG[POS_TAGS[j]][tokens[i]]
if transition_prob > curr_max:
curr_max = transition_prob
back_ptr = k if i > 1 else 0
memo[i][j] = curr_max
best_postags[i][j] = back_ptr
best_postag_at_end_of_sentence = self.find_arg_of_max(memo[LEN_TOKENS - 1])
found_best_postag_value = best_postag_at_end_of_sentence[0]
found_best_postag_index = best_postag_at_end_of_sentence[1]
return self.get_best_viterbi_path(best_postags, found_best_postag_index)
# To traverse a sentence from last POS tag to 1st POS tag
def get_best_viterbi_path(self, back_ptrs, best_end_of_sentence_back_ptr):
# Note: ignore the 1st tag since its back pointer is undefined
# (i.e. there's no reasonable back ptr for the 1st tag of a sentence)
back_ptrs = list(reversed(back_ptrs[1:]))
LEN_BACK_PTRS = len(back_ptrs)
# last POS TAG is always an END_MARKER
# we can say this because we always add a START_MARKER & END_MARKER between
# sentences during the tokenization phase of the test set
best_pos_tag_sequence = [END_MARKER]
for i in range(LEN_BACK_PTRS): # traversing sentence backwards
best_pos_tag_sequence.append(POS_TAGS[back_ptrs[i][best_end_of_sentence_back_ptr]])
best_end_of_sentence_back_ptr = back_ptrs[i][best_end_of_sentence_back_ptr]
# remember to reverse sequence of best POS tags since we traversed sentence backwards
return list(reversed(best_pos_tag_sequence))
#=====================================================#
# FORMAT TOKENS
#=====================================================#
# Helper function to generate tokens needed for the Viterbi tagger from
# sentences in the format of ['<S> The cow...ate grass . <E>', '<S> The man...', ...]
def generate_tokens_for_test_doc_sentences(self, sentences, word_vocab):
sen_as_tokens_list = [self.tokenizer.tokenize_test_document(sentence, word_vocab) for sentence in sentences]
sen_as_tokens_list = [sen_tokens for sen_tokens in sen_as_tokens_list if sen_tokens != []] # remove any empty lists due to empty sentences
return sen_as_tokens_list
"""
Formats the output of the Viterbi POS tagger in this Assignment's output
format.
postags_with_sents In the format of
[[<list_of_best_postags>, <list_of_test_sentences>]],
where <list_of_best_postags> is the list of POS tags
which corresponds to each entry in <list_of_test_sentences>
return String in the format
'<word1>/<tag1> <word2>/<tag2>\n<word3>/<tag3>\n'
"""
def format_best_postags_and_sentences(self, postags_with_sents):
result = ''
postags = postags_with_sents[0]
sentence_tokens = postags_with_sents[1]
for i in range(len(sentence_tokens)):
for j in range(len(sentence_tokens[i])):
if (postags[i][j] != START_MARKER and postags[i][j] != END_MARKER):
token_postag = sentence_tokens[i][j] + '/' + postags[i][j]
result += token_postag + ' '
result = result.strip() + '\n'
return result
#=====================================================#
# LOAD FILES & INITIALIZE MODEL
#=====================================================#
def load_document_as_sentences(self):
DATA_TEST = open(self.PATH_TO_DATA_TEST).read()
return self.tokenizer.generate_sentences_from_test_document(DATA_TEST)
def load_model(self):
return pickle.load(open(self.PATH_TO_DATA_MODEL, 'rb'))
#=====================================================#
# HELPER METHODS
#=====================================================#
def find_arg_of_max(self, lst):
curr_max = -sys.float_info.max
curr_max_index = -1
for i in range(len(lst)):
if lst[i] > curr_max:
curr_max = lst[i]
curr_max_index = i
return (curr_max, curr_max_index)
|
{"/cross_validator.py": ["/PennTreebankPOSTags.py", "/Tokenizer.py", "/HMMProbGenerator.py", "/POSTagModelTrainer.py", "/POSTagger.py"], "/POSTagger.py": ["/Tokenizer.py", "/PennTreebankPOSTags.py"], "/cross_valid_investigate_errors.py": ["/PennTreebankPOSTags.py"], "/Tokenizer.py": ["/PennTreebankPOSTags.py"], "/build_tagger.py": ["/POSTagModelTrainer.py"], "/HMMProbGenerator.py": ["/PennTreebankPOSTags.py"], "/POSTagModelTrainer.py": ["/Tokenizer.py", "/HMMProbGenerator.py"], "/run_tagger.py": ["/Tokenizer.py", "/PennTreebankPOSTags.py", "/POSTagger.py"]}
|
24,100
|
NatashaKSS/CS4248Assg2-Viterbi
|
refs/heads/master
|
/cross_valid_investigate_errors.py
|
# Import standard modules
import sys
import math
import pickle
from PennTreebankPOSTags import POS_TAGS, START_MARKER, END_MARKER
#=====================================================#
# EXECUTION OF PROGRAM
#=====================================================#
errors = pickle.load(open('cross-validation-results-investigate', 'rb'))
for postag in POS_TAGS:
if postag in errors.keys():
print('==', postag, '==')
print(errors[postag])
|
{"/cross_validator.py": ["/PennTreebankPOSTags.py", "/Tokenizer.py", "/HMMProbGenerator.py", "/POSTagModelTrainer.py", "/POSTagger.py"], "/POSTagger.py": ["/Tokenizer.py", "/PennTreebankPOSTags.py"], "/cross_valid_investigate_errors.py": ["/PennTreebankPOSTags.py"], "/Tokenizer.py": ["/PennTreebankPOSTags.py"], "/build_tagger.py": ["/POSTagModelTrainer.py"], "/HMMProbGenerator.py": ["/PennTreebankPOSTags.py"], "/POSTagModelTrainer.py": ["/Tokenizer.py", "/HMMProbGenerator.py"], "/run_tagger.py": ["/Tokenizer.py", "/PennTreebankPOSTags.py", "/POSTagger.py"]}
|
24,101
|
NatashaKSS/CS4248Assg2-Viterbi
|
refs/heads/master
|
/Tokenizer.py
|
# Import standard modules
import string
import re
import math
# Import custom modules
from PennTreebankPOSTags import START_MARKER
from PennTreebankPOSTags import END_MARKER
# Define constants
NEWLINE = 'NEWLINE' # represents the char \n
UNK = '<UNK>' # symbol representing out-of-vocabulary words
NUM_SYMBOL = '<NUM>' # symbol representing numerics
NUM_SYMBOL_REGEX = r'^(\d+[.,\-]*\d*)+$' # regex pattern to detect NUM_SYMBOL
#===========================================================================#
# Tokenizer
# Tokenizes the training set and test set sentences into token formats that
# can be read my POSTagModelTrainer and POSTagger and CrossValidator.
#===========================================================================#
class Tokenizer():
def __init__(self):
print("== [Tokenizer instantiated] ==")
#=====================================================#
# TOKENIZER FOR UNSEEN SENTENCES
#=====================================================#
def generate_sentences_from_test_document(self, doc_string):
sentences = self.get_sentences(doc_string)
return self.insert_start_end_sentence_tokens(sentences)
"""
Tokenizes a string for the Test set.
doc_string String of the document to tokenize
return List of tokens split according to the RegEX rules
"""
def tokenize_test_document(self, doc_string, word_vocab):
doc_tokens = self.get_test_data_tokens(doc_string)
doc_tokens = self.replace_numeric_tokens_with_NUM_SYMBOL(doc_tokens)
doc_tokens = self.replace_unseen_tokens_with_UNK(doc_tokens, word_vocab)
return self.remove_empty_sentence_at_end(doc_tokens)
# Removes the 'S' and 'E' at the back of [...'<S>', '', '<E>'].
# This occurs when we have an extra sentence at the last line in a file
def remove_empty_sentence_at_end(self, tokens):
LENGTH_TOKENS = len(tokens)
if LENGTH_TOKENS >= 3:
if tokens[LENGTH_TOKENS - 3] == START_MARKER and tokens[LENGTH_TOKENS - 2] == '' and tokens[LENGTH_TOKENS - 1] == END_MARKER:
# in the format of [...'<S>', '', '<E>']
return tokens[0 : -3]
# 1-word tokens will never have empty sentences at the end
return tokens
# Sandwich START & END of sentence markers between each sentence in a list of sentences
# Any leading and trailing space between sentences will be removed too
# Unlike the tokenizer for labelled corpus, we DON'T add the labelled START & END MARKER
def insert_start_end_sentence_tokens(self, sentences):
START_SENTENCE = START_MARKER + ' '
END_SENTENCE = ' ' + END_MARKER
return [START_SENTENCE + sentence.strip(' ') + END_SENTENCE for sentence in sentences]
# Terms in test data are separated by spaces, so this splits them
def get_test_data_tokens(self, doc_string_with_S_E):
return doc_string_with_S_E.split(' ')
# Replace words not seen in the vocabulary with <UNK>
def replace_unseen_tokens_with_UNK(self, tokens, word_vocab):
result = []
for token in tokens:
result.append(self.replace_unseen_token_with_UNK(token, word_vocab))
return result
def replace_unseen_token_with_UNK(self, token, word_vocab):
if token not in word_vocab:
return UNK
else:
return token
# Replace numbers in vocabulary with <NUM>
def replace_numeric_tokens_with_NUM_SYMBOL(self, tokens):
result = []
for token in tokens:
if re.match(NUM_SYMBOL_REGEX, token):
result.append(NUM_SYMBOL)
else:
result.append(token)
return result
# == UNUSED ==
# Tokenizes some raw corpus based on these RegEX rules.
# Since the test set is in a well-defined format, for this assignment, we need
# not use these rules. Splitting on ' ' is fine.
def get_test_data_tokens_UNUSED(self, doc_string):
return re.findall(r"[\w]+|[.,!?;\(\)\[\](...)('s)('d)(n't)('ll)('S)('D)(N'T)('LL)]+", doc_string)
#=====================================================#
# TOKENIZER FOR LABELLED CORPUS
#=====================================================#
"""
Tokenizes a string close to the Penn TreeBank tokenizer format.
Extracts whole words, punctuations and apostrophes.
doc_string String of the document to tokenize
return List of tokens split according to the RegEX rules
"""
def tokenize_document(self, doc_string):
sentences = self.get_sentences(doc_string)
sentences_S_E_tags = self.insert_start_end_sentence_tags(sentences)
doc_str_with_S_E_tags = self.flatten_list_of_sentences(sentences_S_E_tags)
return self.get_train_data_tokens(doc_str_with_S_E_tags)
# Terms in training data are separated by spaces, so this splits them
def get_train_data_tokens(self, doc_string_with_S_E_tags):
return doc_string_with_S_E_tags.split(' ')
# Sentences in training data are separated by '\n', so this splits them
def get_sentences(self, doc_string):
list_of_sentences = doc_string.split('\n')
return [x for x in list_of_sentences if len(x) > 0]
# Sandwich START & END of sentence markers between each sentence in a list of sentences
# Any leading and trailing space between sentences will be removed too
def insert_start_end_sentence_tags(self, sentences):
START_SENTENCE = START_MARKER + '/' + START_MARKER + ' '
END_SENTENCE = ' ' + END_MARKER + '/' + END_MARKER
return [START_SENTENCE + sentence.strip(' ') + END_SENTENCE for sentence in sentences]
# Stitch each sentence into 1 big String
def flatten_list_of_sentences(self, sentences):
return ' '.join(sentences)
# Stitch each token into 1 big String
def flatten_list_of_tokens(self, tokens):
return ' '.join(tokens)
"""
Tokenizes a word & pos_tag. Ignores empty strings if they exist.
list_of_str_postag ['perhaps/RB', 'forced/VBN' ...]
return List of token pairs [['perhaps', 'RB'], ['forced', 'VBN'] ...]
"""
def get_pairs_of_word_tags(self, list_of_str_postag):
return [self.__convert_to_word_postag_pair(str_postag) for str_postag in list_of_str_postag if len(str_postag) > 0]
# Converts a 'perhaps/RB' string to ['perhaps', 'RB']
def __convert_to_word_postag_pair(self, str_postag):
str_postag_pair = str_postag.rsplit('/', 1)
# RULES FOR TOKENS WE SEE DURING TRAINING
# This checks if a token is in our defined numeric format by using RegEx
# and replaces that token with an arbitrary symbol defined at the top of this code
if re.match(NUM_SYMBOL_REGEX, str_postag_pair[0]):
return [NUM_SYMBOL, str_postag_pair[1]]
else:
return [str_postag_pair[0], str_postag_pair[1]]
#=====================================================#
# TOKENIZER FOR CROSS VALIDATOR
#=====================================================#
"""
Extracts sentences of the form ["As/IN part/NN of/IN the/DT agreement/NN", ...]
into ["As part of the agreement ..."] and [['IN', 'NN', 'IN', 'DT', 'NN', ...]]
and returns them both as a 2-tuple.
sentences List of sentence strings of the format
["As/IN part/NN of/IN the/DT agreement/NN", ...]
word_vocab List of all word types from the training set
return 2-tuple in the format of ["As part of the agreement ...", "You want ..."]
and [['IN', 'NN', 'IN', 'DT', 'NN', ...]]
"""
def extract_tags_from_test_dataset(self, sentences, word_vocab):
list_of_sentence_in_str_form = []
list_of_postags = []
for i in range(len(sentences)):
sentence_str = ''
postags = []
list_of_token_tag_strs = sentences[i].split(' ')
for token_tag_str in list_of_token_tag_strs:
str_postag_pair = token_tag_str.rsplit('/', 1)
sentence_str += str_postag_pair[0] + ' '
postags.append(str_postag_pair[1])
list_of_sentence_in_str_form.append(sentence_str.strip())
list_of_postags.append(postags)
return (list_of_sentence_in_str_form, list_of_postags)
|
{"/cross_validator.py": ["/PennTreebankPOSTags.py", "/Tokenizer.py", "/HMMProbGenerator.py", "/POSTagModelTrainer.py", "/POSTagger.py"], "/POSTagger.py": ["/Tokenizer.py", "/PennTreebankPOSTags.py"], "/cross_valid_investigate_errors.py": ["/PennTreebankPOSTags.py"], "/Tokenizer.py": ["/PennTreebankPOSTags.py"], "/build_tagger.py": ["/POSTagModelTrainer.py"], "/HMMProbGenerator.py": ["/PennTreebankPOSTags.py"], "/POSTagModelTrainer.py": ["/Tokenizer.py", "/HMMProbGenerator.py"], "/run_tagger.py": ["/Tokenizer.py", "/PennTreebankPOSTags.py", "/POSTagger.py"]}
|
24,102
|
NatashaKSS/CS4248Assg2-Viterbi
|
refs/heads/master
|
/PennTreebankPOSTags.py
|
"""
PENN TREEBANK POS TAG SET
A store of all POS tags used for this assignment.
Summary of additional tag symbols:
<S> e.g. start-of-sentence marker
<E> e.g. end-of-sentence marker
`` e.g. left quote "
'' e.g. right quote "
-LRB- e.g. (, [, {
-RRB- e.g. ), ], }
"""
START_MARKER = '<S>'
END_MARKER = '<E>'
POS_TAGS = [
START_MARKER, END_MARKER,
'CC', 'CD', 'DT', 'EX', 'FW', 'IN',
'JJ', 'JJR', 'JJS', 'LS', 'MD',
'NN', 'NNS', 'NNP', 'NNPS',
'PDT', 'POS', 'PRP', 'PRP$',
'RB', 'RBR', 'RBS', 'RP',
'SYM', 'TO', 'UH',
'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ',
'WDT', 'WP', 'WP$', 'WRB',
'$', '#', '``', '\'\'', '-LRB-', '-RRB-', ',', '.', ':'
]
|
{"/cross_validator.py": ["/PennTreebankPOSTags.py", "/Tokenizer.py", "/HMMProbGenerator.py", "/POSTagModelTrainer.py", "/POSTagger.py"], "/POSTagger.py": ["/Tokenizer.py", "/PennTreebankPOSTags.py"], "/cross_valid_investigate_errors.py": ["/PennTreebankPOSTags.py"], "/Tokenizer.py": ["/PennTreebankPOSTags.py"], "/build_tagger.py": ["/POSTagModelTrainer.py"], "/HMMProbGenerator.py": ["/PennTreebankPOSTags.py"], "/POSTagModelTrainer.py": ["/Tokenizer.py", "/HMMProbGenerator.py"], "/run_tagger.py": ["/Tokenizer.py", "/PennTreebankPOSTags.py", "/POSTagger.py"]}
|
24,103
|
NatashaKSS/CS4248Assg2-Viterbi
|
refs/heads/master
|
/build_tagger.py
|
# Import standard modules
import sys
import pickle
# Import custom modules
from POSTagModelTrainer import POSTagModelTrainer
#===========================================================================#
# BUILD_TAGGER
# EXECUTES THE TRAINING PHASE OF THE VITERBI TAGGER ON sents.train & PREPS
# THE MODEL_FILE.
#
# Writes the resulting P(w_i | t_i) and P(t_i | t_i-1) probabilities to a
# Python pickle file to be used for run_tagger.py during testing.
#===========================================================================#
PATH_TO_DATA_TRAIN = sys.argv[1]
PATH_TO_DATA_DEVT = sys.argv[2]
PATH_TO_DATA_MODEL = sys.argv[3]
print("Training data:", PATH_TO_DATA_TRAIN + "Devt Data:", PATH_TO_DATA_DEVT, "Model file:", PATH_TO_DATA_MODEL)
model = POSTagModelTrainer(PATH_TO_DATA_TRAIN).train()
pickle.dump(model, open(PATH_TO_DATA_MODEL, 'wb'))
print("=== FINISHED TRAINING...MODEL SAVED IN " + PATH_TO_DATA_MODEL + " ===")
|
{"/cross_validator.py": ["/PennTreebankPOSTags.py", "/Tokenizer.py", "/HMMProbGenerator.py", "/POSTagModelTrainer.py", "/POSTagger.py"], "/POSTagger.py": ["/Tokenizer.py", "/PennTreebankPOSTags.py"], "/cross_valid_investigate_errors.py": ["/PennTreebankPOSTags.py"], "/Tokenizer.py": ["/PennTreebankPOSTags.py"], "/build_tagger.py": ["/POSTagModelTrainer.py"], "/HMMProbGenerator.py": ["/PennTreebankPOSTags.py"], "/POSTagModelTrainer.py": ["/Tokenizer.py", "/HMMProbGenerator.py"], "/run_tagger.py": ["/Tokenizer.py", "/PennTreebankPOSTags.py", "/POSTagger.py"]}
|
24,104
|
NatashaKSS/CS4248Assg2-Viterbi
|
refs/heads/master
|
/HMMProbGenerator.py
|
# Import standard modules
import sys
from math import log
# Import custom modules
from PennTreebankPOSTags import POS_TAGS, START_MARKER, END_MARKER
# Define constants
UNK = '<UNK>' # symbol representing out-of-vocabulary words
#===========================================================================#
# HMMProbGenerator
# GENERATES THE MODEL.
#
# Computes the resulting P(w_i | t_i) and P(t_i | t_i-1) probabilities.
#===========================================================================#
class HMMProbGenerator():
def __init__(self, word_postag_pairs):
print("== [HMMProbGenerator instantiated] ==")
self.WORD_POSTAG_PAIRS = word_postag_pairs
#==================================================#
# Constructing the Vocabulary for words & tags
#==================================================#
# Vocabulary in this Dictionary format: { 'the': 41107, 'gracious': 1, ... }
self.WORD_VOCAB = self.get_word_vocabulary_with_counts(word_postag_pairs)
# Vocabulary in this Dictionary format: { 'NN': 1123, 'VBN': 2323, ... }
self.POSTAG_VOCAB = self.get_tag_vocabulary_with_counts(word_postag_pairs)
#==================================================#
# Initialize probabilities required for the model
#==================================================#
# Matrix representing P(t_i | t_i-1), where rows: t_i-1, cols: t_i
self.PROB_TAG_GIVEN_TAG = self.initialize_prob_tag_given_tag()
# Matrix representing P(w_i | t_i), where rows: t_i, cols: w_i
self.PROB_WORD_GIVEN_TAG = self.initialize_word_given_tag()
#=======================================================#
# GENERATE P(t_i | t_i-1) AND P(w_i | t_i) PROBABILITIES
#=======================================================#
"""
Generate emission & transition probabilities from a labelled corpus
Modifies self.PROB_TAG_GIVEN_TAG and self.PROB_WORD_GIVEN_TAG
return Model as a list of 2 elements [P(t_i | t_i-1), P(w_i | t_i)]
"""
def generate_probs(self):
self.generate_prob_word_given_tag()
self.generate_prob_tag_given_tag()
return [self.PROB_TAG_GIVEN_TAG, self.PROB_WORD_GIVEN_TAG]
"""
Generates P(t_i | t_i-1) bigram tags' occurrence probability matrix.
Ensures that probabilities stored are the log probabilities as the magnitude
of the raw probabilities can cause underflow.
Modifies self.PROB_TAG_GIVEN_TAG, a matrix representing P(t_i | t_i-1), where rows: t_i-1, cols: t_i
"""
def generate_prob_tag_given_tag(self):
# Count number of tags at position (i) following another tag at position (i + 1)
WORD_POSTAG_PAIRS_LENGTH = len(self.WORD_POSTAG_PAIRS)
for i in range(WORD_POSTAG_PAIRS_LENGTH):
if i + 1 < WORD_POSTAG_PAIRS_LENGTH: # prevents index out of bounds
bi_tag_i_minus_1 = self.WORD_POSTAG_PAIRS[i][1]
bi_tag_i = self.WORD_POSTAG_PAIRS[i + 1][1]
self.PROB_TAG_GIVEN_TAG[bi_tag_i_minus_1][bi_tag_i] += 1
# Convert to probability from raw counts
for tag_i_minus_1 in self.PROB_TAG_GIVEN_TAG:
for tag_i in self.PROB_TAG_GIVEN_TAG[tag_i_minus_1]:
if self.POSTAG_VOCAB[tag_i_minus_1] != 0:
# Raw counts to probability
self.PROB_TAG_GIVEN_TAG[tag_i_minus_1][tag_i] = \
float(self.PROB_TAG_GIVEN_TAG[tag_i_minus_1][tag_i]) / float(self.POSTAG_VOCAB[tag_i_minus_1])
# Convert entry to log probability
value = self.PROB_TAG_GIVEN_TAG[tag_i_minus_1][tag_i]
self.PROB_TAG_GIVEN_TAG[tag_i_minus_1][tag_i] = log(value) if value != 0.0 else log(sys.float_info.min)
return None
"""
Generates P(w_i | t_i) word and POS tag occurrence probability matrix.
Handles out-of-vocabulary words by using add-1 smoothing.
Ensures that probabilities stored are the log probabilities as the magnitude
of the raw probabilities can cause underflow.
Modifies self.PROB_WORD_GIVEN_TAG, a matrix representing P(w_i | t_i), where rows: t_i, cols: w_i
"""
def generate_prob_word_given_tag(self):
# Count number of words co-occurring with a given tag & mutate PROB_WORD_GIVEN_TAG matrix
for word_postag_pair in self.WORD_POSTAG_PAIRS:
word = word_postag_pair[0]
postag = word_postag_pair[1]
self.PROB_WORD_GIVEN_TAG[postag][word] += 1
# Set count of out-of-vocabulary words to 1, normalized probability
for postag in self.PROB_WORD_GIVEN_TAG:
self.PROB_WORD_GIVEN_TAG[postag][UNK] += 0.5
# Convert to probability from raw counts
for postag in self.PROB_WORD_GIVEN_TAG:
for word in self.PROB_WORD_GIVEN_TAG[postag]:
if self.POSTAG_VOCAB[postag] != 0: # prevents division by 0 errors
# Raw counts to probability
self.PROB_WORD_GIVEN_TAG[postag][word] = \
float(self.PROB_WORD_GIVEN_TAG[postag][word]) / float(self.POSTAG_VOCAB[postag])
else:
self.PROB_WORD_GIVEN_TAG[postag][word] = 0
# Convert entry to log probability
value = self.PROB_WORD_GIVEN_TAG[postag][word]
self.PROB_WORD_GIVEN_TAG[postag][word] = log(value) if value != 0.0 else log(sys.float_info.min)
return None
#=====================================================#
# INITIALIZE ALL PROBABILITY MATRICES NEEDED FOR VITERBI
#=====================================================#
"""
Initializes matrix representing bigram tags' occurrence probabilities, i.e.
P(t_i | t_i-1).
Rows: t_i-1
Cols: t_i
return Dictionary of POS tags at (i-1)th position with nested Dictionary
of POS tags at (i)th position
"""
def initialize_prob_tag_given_tag(self):
prob_tag_given_tag = {}
for tag_i_minus_1 in POS_TAGS:
prob_tag_given_tag[tag_i_minus_1] = {}
for tag_i in POS_TAGS:
prob_tag_given_tag[tag_i_minus_1][tag_i] = 0
return prob_tag_given_tag
"""
Initializes matrix representing word and POS tag occurrence probabilities, i.e.
P(w_i | t_i)
Rows: t_i
Cols: w_i
return Dictionary of POS tags at (i)th position with nested Dictionary
of words at (i)th position
"""
def initialize_word_given_tag(self):
prob_word_given_tag = {}
for postag in POS_TAGS:
prob_word_given_tag[postag] = {}
prob_word_given_tag[postag][UNK] = 0
for word in self.WORD_VOCAB:
prob_word_given_tag[postag][word] = 0
return prob_word_given_tag
#=====================================================#
# INITIALIZE ALL WORD & POS TAG VOCABULARIES NEEDED FOR VITERBI
#=====================================================#
"""
Returns the corpus' seen words vocabulary
return Vocabulary in this Dictionary format: { 'the': 41107, 'gracious': 1, ... }
"""
def get_word_vocabulary_with_counts(self, word_postag_pairs):
result = {}
# Count seen words & add to our Words vocab
for word_postag in word_postag_pairs:
word = word_postag[0]
if word in result:
result[word] = result[word] + 1
else:
result[word] = 1
return result
"""
Returns the corpus' seen tags vocabulary
return Vocabulary in this Dictionary format: { 'NN': 1123, 'VBN': 2323, ... }
"""
def get_tag_vocabulary_with_counts(self, word_postag_pairs):
result = {}
# Initialize Tags vocab with all possible POS tag types
for POS_TAG in POS_TAGS:
result[POS_TAG] = 0
# Count Tags & add to our Tags vocab
for word_postag in word_postag_pairs:
postag = word_postag[1]
if postag in result: # check if key exists, just in case, although unneeded
result[postag] = result[postag] + 1
return result
|
{"/cross_validator.py": ["/PennTreebankPOSTags.py", "/Tokenizer.py", "/HMMProbGenerator.py", "/POSTagModelTrainer.py", "/POSTagger.py"], "/POSTagger.py": ["/Tokenizer.py", "/PennTreebankPOSTags.py"], "/cross_valid_investigate_errors.py": ["/PennTreebankPOSTags.py"], "/Tokenizer.py": ["/PennTreebankPOSTags.py"], "/build_tagger.py": ["/POSTagModelTrainer.py"], "/HMMProbGenerator.py": ["/PennTreebankPOSTags.py"], "/POSTagModelTrainer.py": ["/Tokenizer.py", "/HMMProbGenerator.py"], "/run_tagger.py": ["/Tokenizer.py", "/PennTreebankPOSTags.py", "/POSTagger.py"]}
|
24,105
|
NatashaKSS/CS4248Assg2-Viterbi
|
refs/heads/master
|
/POSTagModelTrainer.py
|
# Import standard modules
import sys
import pickle
# Import custom modules
from Tokenizer import Tokenizer
from HMMProbGenerator import HMMProbGenerator
#===========================================================================#
# POSTagModelTrainer
# LOADS TRAINING DATA AND EXECUTES HMMProbGenerator TO GENERATE THE MODEL &
# TRAIN THE POS TAGGER
#===========================================================================#
class POSTagModelTrainer():
def __init__(self, PATH_TO_DATA_TRAIN, VALIDATE_MODE=False):
if VALIDATE_MODE:
print("== [POSTagModelTrainer instantiated] CROSS VALIDATION MODE ==")
else:
print("== [POSTagModelTrainer instantiated] ==")
# Set up tokenizer before everything else
self.tokenizer = Tokenizer()
# Initialized constants
self.DATA_TRAIN = open(PATH_TO_DATA_TRAIN).read()
self.LIST_OF_WORD_POSTAG_PAIRS = self.load_training_data()
"""
Trains the model against our specified training set using the HMMProbGenerator
which helps us generate our model's probabilities.
return The trained model, as a list of 2 elements [P(t_i | t_i-1), P(w_i | t_i)].
"""
def train(self):
list_of_labelled_words = self.LIST_OF_WORD_POSTAG_PAIRS # [['its', 'PRP$'], ['to', 'TO'] ...]
model = HMMProbGenerator(list_of_labelled_words).generate_probs()
return model
"""
Loads the training data in-memory and tokenizes it.
return List of strings in the format
['<word1>/<pos_tag1>', '<word2>/<pos_tag2>', ...]
"""
def load_training_data(self):
list_of_str_postag = self.tokenizer.tokenize_document(self.DATA_TRAIN)
list_of_word_postag_pairs = self.tokenizer.get_pairs_of_word_tags(list_of_str_postag)
return list_of_word_postag_pairs
|
{"/cross_validator.py": ["/PennTreebankPOSTags.py", "/Tokenizer.py", "/HMMProbGenerator.py", "/POSTagModelTrainer.py", "/POSTagger.py"], "/POSTagger.py": ["/Tokenizer.py", "/PennTreebankPOSTags.py"], "/cross_valid_investigate_errors.py": ["/PennTreebankPOSTags.py"], "/Tokenizer.py": ["/PennTreebankPOSTags.py"], "/build_tagger.py": ["/POSTagModelTrainer.py"], "/HMMProbGenerator.py": ["/PennTreebankPOSTags.py"], "/POSTagModelTrainer.py": ["/Tokenizer.py", "/HMMProbGenerator.py"], "/run_tagger.py": ["/Tokenizer.py", "/PennTreebankPOSTags.py", "/POSTagger.py"]}
|
24,106
|
NatashaKSS/CS4248Assg2-Viterbi
|
refs/heads/master
|
/run_tagger.py
|
# Import standard modules
import sys
import math
import pickle
# Import custom modules
from Tokenizer import Tokenizer
from PennTreebankPOSTags import POS_TAGS, START_MARKER, END_MARKER
from POSTagger import POSTagger
#===========================================================================#
# RUN_TAGGER
# EXECUTES THE VITERBI TAGGER ON SENTS.TEST.
#
# Writes the resulting best part-of-speech tags of the sentences in the test
# set to a file sents.out as specified in the assignment requirements.
#===========================================================================#
PATH_TO_DATA_TEST = sys.argv[1]
PATH_TO_DATA_MODEL = sys.argv[2]
PATH_TO_DATA_TEST_LABELLED = sys.argv[3]
print("sents.test:", PATH_TO_DATA_TEST + ", model_file:", PATH_TO_DATA_MODEL + ", labelled test data sents.out:", PATH_TO_DATA_TEST_LABELLED)
# Get the best POS tags for the test set
output = POSTagger(PATH_TO_DATA_TEST, PATH_TO_DATA_MODEL).run()
# Print to an output file. In this assignment, it is called 'sents.out'
with open(PATH_TO_DATA_TEST_LABELLED, 'w') as sents_out_file:
sents_out_file.write(output)
|
{"/cross_validator.py": ["/PennTreebankPOSTags.py", "/Tokenizer.py", "/HMMProbGenerator.py", "/POSTagModelTrainer.py", "/POSTagger.py"], "/POSTagger.py": ["/Tokenizer.py", "/PennTreebankPOSTags.py"], "/cross_valid_investigate_errors.py": ["/PennTreebankPOSTags.py"], "/Tokenizer.py": ["/PennTreebankPOSTags.py"], "/build_tagger.py": ["/POSTagModelTrainer.py"], "/HMMProbGenerator.py": ["/PennTreebankPOSTags.py"], "/POSTagModelTrainer.py": ["/Tokenizer.py", "/HMMProbGenerator.py"], "/run_tagger.py": ["/Tokenizer.py", "/PennTreebankPOSTags.py", "/POSTagger.py"]}
|
24,107
|
ammarhassan/intpret
|
refs/heads/master
|
/int_graph/urls.py
|
from django.conf.urls import url
from . import views
urlpatterns =[
url(r'^index/$', views.shell, name='shell'),
url(r'^index/(?P<group_id>\d+)/$', views.shell, name='shell'),
url(r'^index/interactive_shell/$', views.ajax_shell, name='interactive_shell'),
]
|
{"/int_graph/views.py": ["/int_graph/models.py"]}
|
24,108
|
ammarhassan/intpret
|
refs/heads/master
|
/int_graph/apps.py
|
from django.apps import AppConfig
class IntGraphConfig(AppConfig):
name = 'int_graph'
|
{"/int_graph/views.py": ["/int_graph/models.py"]}
|
24,109
|
ammarhassan/intpret
|
refs/heads/master
|
/int_graph/models.py
|
from django.db import models
# Create your models here.
class QueriesStore(models.Model):
query = models.TextField(help_text='Query string')
group = models.IntegerField(help_text='group identifier', null=True, blank=True)
def __unicode__(self):
return self.query
|
{"/int_graph/views.py": ["/int_graph/models.py"]}
|
24,110
|
ammarhassan/intpret
|
refs/heads/master
|
/int_graph/views.py
|
from django.shortcuts import render
from django.http import JsonResponse
from .models import QueriesStore
from django.db.models import Max
# Create your views here.
def shell(request, group_id=None):
queries = []
query = None
if request.method=='POST':
# print('i am in post')
if not group_id:
group_id = QueriesStore.objects.all(
).aggregate(group_max=Max('group'))['group_max']
print('there was not id, but we found it', group_id)
if group_id is not None:
group_id+=1
else:
group_id=0
query_string = request.POST.get('light-shell')
# print('query string is'+query_string)
# print('group_id is '+ str(group_id))
if query_string:
query = QueriesStore(query=query_string, group=group_id)
query.save()
else:
pass
if group_id is not None:
queries = QueriesStore.objects.filter(group=group_id)
return render(request, 'shell.html', {'queries':queries, 'group_id':group_id})
def ajax_shell(request):
return JsonResponse({'data':[
['Task', 'Hours per Day'],
['Work', 11],
['Eat', 2],
['Commute', 2],
['Watch TV', 2],
['Sleep', 7]
]})
|
{"/int_graph/views.py": ["/int_graph/models.py"]}
|
24,115
|
newpanjing/myblog
|
refs/heads/master
|
/setup.py
|
from setuptools import setup
setup(
name='myblog',
version='1.0',
packages=['models', 'models.migrations', 'myblog', 'myblog.utils', 'myblog.templatetags', 'article',
'article.migrations'],
url='88cto.com',
license='Apache License 2.0',
author='panjing',
author_email='newpanjing@163.com',
description='个人博客系统',
install_requires=[
'django',
'shortid8',
'oss2',
'requests',
'pymysql',
'whoosh',
'django-haystack',
'jieba',
'redis'
],
)
|
{"/models/admin.py": ["/models/models.py"], "/article/admin.py": ["/article/models.py"], "/article/tests.py": ["/article/models.py"], "/draw/draw.py": ["/models/models.py"], "/aliyun/oss_backends.py": ["/models/models.py"], "/mdeditor/views.py": ["/models/models.py"], "/myblog/test.py": ["/article/models.py"], "/myblog/views.py": ["/article/models.py", "/models/models.py"], "/myblog/templatetags/common.py": ["/article/models.py", "/models/models.py"]}
|
24,116
|
newpanjing/myblog
|
refs/heads/master
|
/myblog/utils/pager.py
|
import math
def get_page_range(page_num, show_num, current_page):
# 分偶数和奇数
current_page = int(current_page)
page_num = int(page_num)
show_num = int(show_num)
if show_num % 2 == 0:
fore = int((show_num - 1) / 2)
after = int(show_num / 2)
else:
fore = int(show_num / 2)
after = math.ceil((show_num - 1) / 2)
star = current_page - fore
end = current_page + after
if star == 0:
star = 1
end += star
elif star < 0:
end += int(math.fabs(star))
end += 1
star = 1
if end > page_num:
star -= end - page_num
end = page_num
if star < 1:
star = 1
if end > page_num:
end = page_num
return star, end
# 获取页码
def get_numbers(total, size, current, show_number):
current = int(current)
total_page_num = int((total - 1) / size + 1)
start, end = get_page_range(total_page_num, show_number, current)
array = []
# 循环计算页码
for i in range(start, end + 1):
array.append(i)
return array
# current = 100
# arr = get_numbers(1011, 5, current, 7)
# print(arr)
|
{"/models/admin.py": ["/models/models.py"], "/article/admin.py": ["/article/models.py"], "/article/tests.py": ["/article/models.py"], "/draw/draw.py": ["/models/models.py"], "/aliyun/oss_backends.py": ["/models/models.py"], "/mdeditor/views.py": ["/models/models.py"], "/myblog/test.py": ["/article/models.py"], "/myblog/views.py": ["/article/models.py", "/models/models.py"], "/myblog/templatetags/common.py": ["/article/models.py", "/models/models.py"]}
|
24,117
|
newpanjing/myblog
|
refs/heads/master
|
/models/admin.py
|
from django.contrib import admin
from .models import *
from myblog.utils import cache
# Register your models here.
@admin.register(Config)
class ConfigAdmin(admin.ModelAdmin):
"""
系统配置
"""
list_display = ('id', 'group', 'key', 'value')
search_fields = ('group', 'key')
list_filter = ('group',)
list_display_links = ('id', 'group', 'key', 'value')
def save_model(self, request, obj, form, change):
super(ConfigAdmin, self).save_model(request, obj, form, change)
cache.delete(cache.CACHE_COMMON_KEY)
@admin.register(Site)
class SiteAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'site', 'contact', 'contactType', 'sort', 'createDate')
search_fields = ('contactType',)
list_filter = ('contactType',)
list_display_links = ('id', 'name', 'site')
list_editable = ('sort',)
def save_model(self, request, obj, form, change):
super(SiteAdmin, self).save_model(request, obj, form, change)
cache.delete(cache.CACHE_COMMON_KEY)
@admin.register(Page)
class PageAdmin(admin.ModelAdmin):
list_display = ('id', 'alias', 'title', 'keywords', 'description', 'createDate', 'display')
search_fields = ('title', 'alias')
list_filter = ('display',)
list_display_links = ('id', 'alias', 'title')
@admin.register(Menu)
class MenuAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'icon', 'href', 'sort', "display")
list_display_links = ('id', 'name')
search_fields = ('name',)
list_editable = ('display', 'sort')
ordering = ('sort',)
def save_model(self, request, obj, form, change):
super(MenuAdmin, self).save_model(request, obj, form, change)
cache.delete(cache.CACHE_COMMON_KEY)
@admin.register(Notice)
class NoticeAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'createDate')
search_fields = ('title',)
list_display_links = ('title',)
def save_model(self, request, obj, form, change):
super(NoticeAdmin, self).save_model(request, obj, form, change)
cache.delete(cache.CACHE_COMMON_KEY)
|
{"/models/admin.py": ["/models/models.py"], "/article/admin.py": ["/article/models.py"], "/article/tests.py": ["/article/models.py"], "/draw/draw.py": ["/models/models.py"], "/aliyun/oss_backends.py": ["/models/models.py"], "/mdeditor/views.py": ["/models/models.py"], "/myblog/test.py": ["/article/models.py"], "/myblog/views.py": ["/article/models.py", "/models/models.py"], "/myblog/templatetags/common.py": ["/article/models.py", "/models/models.py"]}
|
24,118
|
newpanjing/myblog
|
refs/heads/master
|
/myblog/mdtest.py
|
import markdown
print(markdown.markdown('''
[TOC]
# 这是什么操作
这是一个无人机
## 后来
```python
aa=123
print(aa)
for i in range(1,100):
print(i)
```
''', extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
'markdown.extensions.toc',
]))
|
{"/models/admin.py": ["/models/models.py"], "/article/admin.py": ["/article/models.py"], "/article/tests.py": ["/article/models.py"], "/draw/draw.py": ["/models/models.py"], "/aliyun/oss_backends.py": ["/models/models.py"], "/mdeditor/views.py": ["/models/models.py"], "/myblog/test.py": ["/article/models.py"], "/myblog/views.py": ["/article/models.py", "/models/models.py"], "/myblog/templatetags/common.py": ["/article/models.py", "/models/models.py"]}
|
24,119
|
newpanjing/myblog
|
refs/heads/master
|
/models/models.py
|
from django.db import models
from mdeditor.fields import MDTextField
# Create your models here.
class Config(models.Model):
key = models.CharField(max_length=128, db_index=True)
value = models.CharField(max_length=512)
group = models.CharField(max_length=128, verbose_name='组', db_index=True)
class Meta:
verbose_name = "字典列表"
verbose_name_plural = "字典列表"
# app_label = "config"
def __str__(self):
return self.group + '.' + self.key
class Site(models.Model):
site = models.CharField(max_length=256, verbose_name='网址')
name = models.CharField(max_length=128, verbose_name='名称')
contact_choices = ((0, 'QQ'),
(1, '微信'),
(2, '邮箱'),
(3, '手机')
)
contactType = models.IntegerField(choices=contact_choices, verbose_name='类型', default=0)
contact = models.CharField(max_length=128, verbose_name='联系方式')
createDate = models.DateTimeField(verbose_name='创建时间', auto_now_add=True)
sort = models.IntegerField(verbose_name='排序', default=0, null=True, blank=True, db_index=True)
class Meta:
verbose_name = "友链"
verbose_name_plural = "友链管理"
def __str__(self):
return self.name
class Page(models.Model):
alias = models.CharField(max_length=256, verbose_name='别名', db_index=True)
title = models.CharField(max_length=256, verbose_name='标题')
keywords = models.CharField(max_length=512, verbose_name='关键字', null=True, blank=True)
description = models.CharField(max_length=512, verbose_name='描述', null=True, blank=True)
content = MDTextField(verbose_name='内容')
createDate = models.DateTimeField(verbose_name='创建时间', auto_now_add=True)
display = models.BooleanField(verbose_name='是否显示', default=True, db_index=True)
head = models.TextField(verbose_name='头部脚本', null=True, blank=True)
footer = models.TextField(verbose_name='尾部脚本', null=True, blank=True)
class Meta:
verbose_name = '页面'
verbose_name_plural = '页面管理'
def __str__(self):
return self.title
class Menu(models.Model):
name = models.CharField(max_length=16, verbose_name='菜单名')
icon = models.CharField(max_length=32, verbose_name='图标字体', null=True, blank=True)
href = models.CharField(max_length=128, verbose_name='链接地址')
createDate = models.DateTimeField(verbose_name='创建时间', auto_now_add=True)
display = models.BooleanField(verbose_name='显示', default=True, db_index=True)
sort = models.IntegerField(verbose_name='排序', default=0, db_index=True)
class Meta:
verbose_name = '菜单'
verbose_name_plural = '菜单管理'
def __str__(self):
return self.name
class Notice(models.Model):
title = models.CharField(max_length=128, verbose_name='标题')
content = models.TextField(verbose_name='内容')
createDate = models.DateTimeField(verbose_name='创建时间', auto_now_add=True, db_index=True)
class Meta:
verbose_name = '公告'
verbose_name_plural = '公告管理'
def __str__(self):
return self.title
|
{"/models/admin.py": ["/models/models.py"], "/article/admin.py": ["/article/models.py"], "/article/tests.py": ["/article/models.py"], "/draw/draw.py": ["/models/models.py"], "/aliyun/oss_backends.py": ["/models/models.py"], "/mdeditor/views.py": ["/models/models.py"], "/myblog/test.py": ["/article/models.py"], "/myblog/views.py": ["/article/models.py", "/models/models.py"], "/myblog/templatetags/common.py": ["/article/models.py", "/models/models.py"]}
|
24,120
|
newpanjing/myblog
|
refs/heads/master
|
/models/__init__.py
|
default_app_config = 'models.apps.ModelsConfig'
|
{"/models/admin.py": ["/models/models.py"], "/article/admin.py": ["/article/models.py"], "/article/tests.py": ["/article/models.py"], "/draw/draw.py": ["/models/models.py"], "/aliyun/oss_backends.py": ["/models/models.py"], "/mdeditor/views.py": ["/models/models.py"], "/myblog/test.py": ["/article/models.py"], "/myblog/views.py": ["/article/models.py", "/models/models.py"], "/myblog/templatetags/common.py": ["/article/models.py", "/models/models.py"]}
|
24,121
|
newpanjing/myblog
|
refs/heads/master
|
/article/admin.py
|
import random
from django.contrib import admin
from .models import *
import re
from myblog.utils import oss
from jieba import analyse
from shortid import short_id
from myblog.utils import cache
from draw import draw
# Register your models here.
# 获取简介
def get_subject(html):
# 移除style标签和script标签
regexs = [r'([&]{0,1}(\w+;))',
r'\r|\n|\t|\s'
r'<script>.*?</script>',
r'<style.*?</style>',
r'<[^>]+>'
]
for r in regexs:
p = re.compile(r)
html = re.sub(p, '', html)
return html
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'alias', 'date', 'sort', 'display')
list_display_links = ('id', 'name', 'alias')
search_fields = ('name',)
list_editable = ('display', 'sort')
def save_model(self, request, obj, form, change):
super(CategoryAdmin, self).save_model(request, obj, form, change)
cache.delete(cache.CACHE_COMMON_KEY)
@admin.register(Article)
class ArticleAdmin(admin.ModelAdmin):
list_display = ('id', 'sid', 'title_url', 'comment_count', 'category', 'user', 'hits', 'tags', 'top', 'createDate')
list_filter = ('category', 'user')
search_fields = ('title',)
list_display_links = ('id', 'sid', 'title_url')
list_editable = ('top',)
list_per_page = 10
def delete_model(self, request, obj):
cache.delete(cache.CACHE_HOME_KEY)
super(ArticleAdmin, self).delete_model(request, obj)
def save_model(self, request, obj, form, change):
obj.user = request.user
subject = get_subject(obj.content)
# oss.put_object(obj.image.file.file)
# 不超过200字
if len(subject) > 200:
subject = subject[0:200]
# 短id
if not obj.sid:
obj.sid = short_id.get_short_id()
obj.subject = subject
# 处理标签
tags = obj.tags
# 自动生成
if not tags:
r = analyse.extract_tags(subject, topK=5)
tags = ",".join(r)
obj.tags = tags
# 如果没有封面就生成
if obj.image.name == '' or not obj.image.name:
total = Cover.objects.count()
c = Cover.objects.all()[random.randint(0, total - 1)]
url = draw.draw(text=obj.title, url=c.image.url, font_size=c.font_size, color=c.color, x=c.x, y=c.y)
obj.image.name = url
super(ArticleAdmin, self).save_model(request, obj, form, change)
cache.delete(cache.CACHE_HOME_KEY)
@admin.register(Member)
class MemberAdmin(admin.ModelAdmin):
list_per_page = 10
list_display = (
'id', 'name', 'email', 'nodeId', 'type', 'avatar_img', 'github_url', 'blog_url', 'createDate', 'updateDate')
list_display_links = ('id', 'name', 'email', 'nodeId', 'createDate', 'updateDate')
search_fields = ('name', 'email',)
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ('id', 'type', 'show_content', 'member', 'atMember', 'parentId', 'targetId', 'createDate')
list_per_page = 10
list_filter = ('member', 'type')
search_fields = ('content',)
@admin.register(Cover)
class CoverAdmin(admin.ModelAdmin):
list_display = ('id', 'x', 'y', 'font_size', 'color_display', 'image_display')
list_per_page = 20
list_editable = ('x', 'y', 'font_size')
|
{"/models/admin.py": ["/models/models.py"], "/article/admin.py": ["/article/models.py"], "/article/tests.py": ["/article/models.py"], "/draw/draw.py": ["/models/models.py"], "/aliyun/oss_backends.py": ["/models/models.py"], "/mdeditor/views.py": ["/models/models.py"], "/myblog/test.py": ["/article/models.py"], "/myblog/views.py": ["/article/models.py", "/models/models.py"], "/myblog/templatetags/common.py": ["/article/models.py", "/models/models.py"]}
|
24,122
|
newpanjing/myblog
|
refs/heads/master
|
/myblog/utils/cache.py
|
import redis
import os
import time
import datetime
import json
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
# redis key命名参照alibaba规范
# 项目列表缓存
CACHE_PROJECT_KEY = 'cache:project'
# 通用数据
CACHE_COMMON_KEY = 'cache:common'
# 推荐
CACHE_RECOMMEND_KEY = 'cache:recommend'
# 首页缓存
CACHE_HOME_KEY = 'cache:home'
def _get_config(name):
config = os.environ.get(name, getattr(settings, name, None))
if config is not None:
if isinstance(config, str):
return config.strip()
else:
return config
else:
raise ImproperlyConfigured(
"Can't find config for '%s' either in environment"
"variable or in setting.py" % name)
con = redis.Redis(host=_get_config('REDIS_HOST'), port=_get_config('REDIS_PORT'))
def delete(key):
print('删除缓存,key={}'.format(key))
"""
删除缓存
:param key:
:return:
"""
try:
con.delete(key)
except:
print('删除缓存失败,key={}'.format(key))
pass
def get(key, fun, timeout=None):
"""
获取缓存
:param key:
:param fun: 回调方法
:param timeout: 超时
:return:
"""
# begin = int(round(time.time() * 1000))
value = None
# 如果redis挂了 就不使用缓存
try:
value = con.get(key)
if value:
value = json.loads(value.decode(encoding="utf-8"))
else:
value = fun()
str = json.dumps(value, cls=DateEncoder)
if timeout:
con.setex(key,timeout,str)
else:
con.set(key, str)
print('缓存数据,key={}'.format(key))
# end = int(round(time.time() * 1000))
# print("time:{}ms".format(end - begin))
except Exception as e:
print(e)
return fun()
return value
class DateEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
else:
return json.JSONEncoder.default(self, obj)
if __name__ == '__main__':
print('ok')
|
{"/models/admin.py": ["/models/models.py"], "/article/admin.py": ["/article/models.py"], "/article/tests.py": ["/article/models.py"], "/draw/draw.py": ["/models/models.py"], "/aliyun/oss_backends.py": ["/models/models.py"], "/mdeditor/views.py": ["/models/models.py"], "/myblog/test.py": ["/article/models.py"], "/myblog/views.py": ["/article/models.py", "/models/models.py"], "/myblog/templatetags/common.py": ["/article/models.py", "/models/models.py"]}
|
24,123
|
newpanjing/myblog
|
refs/heads/master
|
/article/tests.py
|
from django.test import TestCase
import django
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "blog.settings")
django.setup()
# Create your tests here.
from article.models import Cover
# Cover
#
# class TestCase(TestCase):
# # 测试函数执行前执行
# def setUp(self):
# print("======in setUp")
#
# # 需要测试的内容
# def test_add(self):
# pass
#
# # 需要测试的内容
#
# # 测试函数执行后执行
# def tearDown(self):
# print("======in tearDown")
|
{"/models/admin.py": ["/models/models.py"], "/article/admin.py": ["/article/models.py"], "/article/tests.py": ["/article/models.py"], "/draw/draw.py": ["/models/models.py"], "/aliyun/oss_backends.py": ["/models/models.py"], "/mdeditor/views.py": ["/models/models.py"], "/myblog/test.py": ["/article/models.py"], "/myblog/views.py": ["/article/models.py", "/models/models.py"], "/myblog/templatetags/common.py": ["/article/models.py", "/models/models.py"]}
|
24,124
|
newpanjing/myblog
|
refs/heads/master
|
/myblog/mymiddleware.py
|
# -*- coding: utf-8 -*-
from django.shortcuts import HttpResponseRedirect
try:
from django.utils.deprecation import MiddlewareMixin # Django 1.10.x
except ImportError:
MiddlewareMixin = object # Django 1.4.x - Django 1.9.x
class SimpleMiddleware(MiddlewareMixin):
'''没有登录不让访问'''
def process_request(self, request):
# print(request.path)
path = request.path
if path.find('/admin/password_change/') != -1:
# 如果是demo 就重定向
id = request.session['_auth_user_id']
if id == '8':
return HttpResponseRedirect('/no')
|
{"/models/admin.py": ["/models/models.py"], "/article/admin.py": ["/article/models.py"], "/article/tests.py": ["/article/models.py"], "/draw/draw.py": ["/models/models.py"], "/aliyun/oss_backends.py": ["/models/models.py"], "/mdeditor/views.py": ["/models/models.py"], "/myblog/test.py": ["/article/models.py"], "/myblog/views.py": ["/article/models.py", "/models/models.py"], "/myblog/templatetags/common.py": ["/article/models.py", "/models/models.py"]}
|
24,125
|
newpanjing/myblog
|
refs/heads/master
|
/article/models.py
|
from django.db import models
from django.contrib.auth.models import User
from mdeditor.fields import MDTextField
from django.utils.html import format_html
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length=128, verbose_name='分类名', blank=False, null=False)
alias = models.CharField(max_length=128, verbose_name='别名', db_index=True)
date = models.DateTimeField(verbose_name='创建日期', auto_now_add=True)
display = models.BooleanField(verbose_name='显示', default=True, db_index=True)
sort = models.IntegerField(verbose_name='排序', default=0, db_index=True)
class Meta:
verbose_name = "分类"
verbose_name_plural = "分类管理"
def __str__(self):
return self.name
class Article(models.Model):
sid = models.CharField(max_length=8, verbose_name='短ID', blank=True, null=True, editable=False, db_index=True)
title = models.CharField(max_length=256, verbose_name='标题', blank=False, null=False)
category = models.ForeignKey(Category, on_delete=models.SET_NULL, blank=False, null=True, verbose_name='分类',
db_index=True)
markdown = models.BooleanField(verbose_name='markdown格式', default=True, editable=False)
user = models.ForeignKey(User, on_delete=models.SET_NULL, verbose_name='发布者', null=True, editable=False)
hits = models.IntegerField(verbose_name='点击量', default=0, editable=False)
content = MDTextField(verbose_name='内容')
subject = models.TextField(verbose_name='简介', editable=False)
image = models.ImageField(upload_to='static/images/', verbose_name='封面', blank=True, null=True, db_index=True)
createDate = models.DateTimeField(verbose_name='创建日期', auto_now_add=True)
tags = models.CharField(max_length=256, verbose_name='标签', blank=True, null=True)
top_choices = ((0, '否'),
(1, '是'),)
top = models.IntegerField(choices=top_choices, verbose_name='置顶', default=0, db_index=True)
def comment_count(self):
return Comment.objects.filter(targetId=self.id, type=0).count()
def title_url(self):
return format_html('<a href="/article/{}" target="_blank">{}</a>', self.sid, self.title)
title_url.short_description = "标题"
comment_count.short_description = "评论数"
class Meta:
verbose_name = "文章"
verbose_name_plural = "文章管理"
def __str__(self):
return self.title
class Member(models.Model):
name = models.CharField(max_length=128, verbose_name='昵称', blank=True, null=False, default='no_name')
email = models.CharField(max_length=256, verbose_name='邮箱', blank=True, null=True)
nodeId = models.CharField(max_length=256, verbose_name='OAuth ID', blank=False, null=False)
avatar = models.CharField(max_length=256, verbose_name='头像', null=False, blank=False)
url = models.CharField(max_length=256, verbose_name='主页', blank=True, null=True)
blog = models.CharField(max_length=256, verbose_name='博客', blank=True, null=True)
createDate = models.DateTimeField(verbose_name='创建日期', auto_now_add=True)
updateDate = models.DateTimeField(verbose_name='更新日期', auto_now=True)
type_choices = (
(0, 'Github'),
(1, 'QQ'),
(2, '代码集市')
)
type = models.IntegerField(choices=type_choices, verbose_name='用户类型', db_index=True)
def github_url(self):
if self.url is None:
return ""
else:
return format_html('<a href="{}" target="_blank">{}</a>', self.url, self.url)
def avatar_img(self):
return format_html('<img src="{}" style="width:25px;height:25px"/>', self.avatar)
def blog_url(self):
if self.blog is None or self.blog == "":
return ""
else:
url = self.blog
if url.find("http") != 0:
url = "http://" + url
return format_html('<a href="{}" target="_blank">{}</a>', url, url)
avatar_img.short_description = "头像"
blog_url.short_description = "博客"
def __str__(self):
if self.name:
return self.name
else:
return '-'
class Meta:
verbose_name = "会员"
verbose_name_plural = "会员管理"
class Comment(models.Model):
content = models.TextField(verbose_name='内容', null=False, blank=True)
member = models.ForeignKey(Member, on_delete=models.SET_NULL, verbose_name='用户', null=True, editable=False,
db_index=True)
parentId = models.IntegerField(verbose_name='父ID', null=True, blank=True, db_index=True)
targetId = models.CharField(max_length=128, db_index=True, verbose_name='目标ID', null=True, blank=True)
type_choices = ((0, '文章'),
(1, '留言'),
(2, '页面'),
(3, '项目'),)
type = models.IntegerField(choices=type_choices, verbose_name='类型', db_index=True)
createDate = models.DateTimeField(verbose_name='创建日期', auto_now_add=True)
atMember = models.ForeignKey(Member, related_name='at_member_id', on_delete=models.SET_NULL, verbose_name='回复用户',
null=True, blank=True,
editable=False, db_index=True)
def show_content(self):
url = ''
if self.type == 0:
url = '/article/' + str(self.targetId)
elif self.type == 1:
url = '/page/message/#' + str(self.targetId)
else:
url = 'javascript:;'
return format_html('<a href="{}" target="_blank">{}</a>', url, self.content)
class Meta:
verbose_name = "评论"
verbose_name_plural = "评论管理"
def __str__(self):
return self.content
class Cover(models.Model):
'''封面'''
x = models.IntegerField(verbose_name='X坐标')
y = models.IntegerField(verbose_name='Y坐标')
font_size = models.IntegerField(verbose_name='字体大小', default=24, null=True, blank=True)
color = models.CharField(max_length=12, verbose_name='颜色', default='#FFF', null=True, blank=True)
image = models.ImageField(verbose_name='图片')
class Meta:
verbose_name = "封面"
verbose_name_plural = "封面管理"
def image_display(self):
return format_html('<img src="{}!100" width=50 height=50>', self.image.url)
def color_display(self):
return format_html('<div style="border:#000 1px solid;height:30px;width:30px;background:{}"></div>', self.color)
image_display.short_description = '图片'
color_display.short_description = '颜色'
def __str__(self):
return self.image.url
|
{"/models/admin.py": ["/models/models.py"], "/article/admin.py": ["/article/models.py"], "/article/tests.py": ["/article/models.py"], "/draw/draw.py": ["/models/models.py"], "/aliyun/oss_backends.py": ["/models/models.py"], "/mdeditor/views.py": ["/models/models.py"], "/myblog/test.py": ["/article/models.py"], "/myblog/views.py": ["/article/models.py", "/models/models.py"], "/myblog/templatetags/common.py": ["/article/models.py", "/models/models.py"]}
|
24,126
|
newpanjing/myblog
|
refs/heads/master
|
/oauth/qq_oauth.py
|
from django.conf import settings
import requests
import os
from shortid import short_id
import re
import json
BASE_URL = 'https://graph.qq.com/oauth2.0/authorize'
def _get(url, params):
r = requests.get(BASE_URL + url, params=params, headers={"Accept": 'application/json'})
if (r.status_code == 200):
return r.json()
else:
raise RuntimeError
# 获取认证url
def get_auth_url(request):
domain = "https://" + request.META.get("HTTP_HOST")
# 防止CSRF攻击
state = short_id.get_short_id()
client_id = __get_config('QQ_CLIENT_ID')
redirect_uri = domain + __get_config('QQ_CLIENT_CALLBACK')
request.session['state'] = state
return BASE_URL + "?response_type=code&client_id={}&redirect_uri={}&state={}".format(client_id, redirect_uri, state)
def get_access_token(request, code):
domain = "https://" + request.META.get("HTTP_HOST")
url = 'https://graph.qq.com/oauth2.0/token?grant_type=authorization_code&client_id={}&client_secret={}&code={}&redirect_uri={}'.format(
__get_config('QQ_CLIENT_ID'),
__get_config('QQ_CLIENT_SECRET'),
code,
domain + __get_config('QQ_CLIENT_CALLBACK'))
r = requests.get(url, headers={"Accept": 'application/json'})
if (r.status_code == 200):
str = r.text
array = str.split("&")
params = {}
for item in array:
items = item.split("=")
params[items[0]] = items[1]
return params
else:
raise RuntimeError(r.text)
# 获取用户信息
def get_user(access_token):
rs = requests.get('https://graph.qq.com/oauth2.0/me?access_token={}'.format(access_token),
headers={"Accept": 'application/json'})
if rs.status_code == 200:
jsonp = rs.text
p = re.match(r'callback\((.*?)\)', jsonp)
if p:
me = json.loads(p.group(1))
openid = me.get('openid')
params = {
'access_token': access_token,
'oauth_consumer_key': __get_config('QQ_CLIENT_ID'),
'openid': openid
}
rs = requests.get('https://graph.qq.com/user/get_user_info', params=params,
headers={"Accept": 'application/json'})
if rs.status_code == 200:
user = rs.json()
member = {}
member['name'] = user.get('nickname')
member['node_id'] = openid
member['avatar_url'] = user.get('figureurl_qq_2')
member['type'] = 1
return member;
pass
def __get_config(name):
value = os.environ.get(name, getattr(settings, name, None))
return value
|
{"/models/admin.py": ["/models/models.py"], "/article/admin.py": ["/article/models.py"], "/article/tests.py": ["/article/models.py"], "/draw/draw.py": ["/models/models.py"], "/aliyun/oss_backends.py": ["/models/models.py"], "/mdeditor/views.py": ["/models/models.py"], "/myblog/test.py": ["/article/models.py"], "/myblog/views.py": ["/article/models.py", "/models/models.py"], "/myblog/templatetags/common.py": ["/article/models.py", "/models/models.py"]}
|
24,127
|
newpanjing/myblog
|
refs/heads/master
|
/draw/draw.py
|
from PIL import Image, ImageDraw, ImageFont
from io import BytesIO
import requests
import os
from shortid import short_id
from oss2 import *
from models.models import Config
def __get_db_config(group):
dict = {}
datas = Config.objects.filter(group=group).values('key', 'value')
for i in datas:
dict[i.get('key')] = i.get('value')
return dict
# 初始化 oss
def get_oss_bucket():
config = __get_db_config('oss')
access_key_id = config.get('key')
access_key_secret = config.get('secret')
endpoint = config.get('endpoint')
bucket_name = config.get('bucket')
auth = Auth(access_key_id, access_key_secret)
cname = config.get('cname')
return {
"bucket": Bucket(auth, endpoint, bucket_name),
"cname": cname
}
def draw(text='', url=None, x=0, y=0, font_size=24, color='#FFF'):
font = ImageFont.truetype(os.path.abspath(os.path.dirname(__file__)) + "/pingfang.ttf", font_size)
r = requests.get(url, verify=False)
# r = requests.get('http://oss.88cto.com/4y6MS8Rs.png')
stream = BytesIO()
stream.write(r.content)
img1 = Image.open(stream)
d = ImageDraw.Draw(img1)
d.text((x, y), text, color, font=font)
io_obj = BytesIO()
# img1.save('/Users/panjing/Downloads/{}.png'.format(name))
img1.save(io_obj, 'png')
filename = short_id.get_short_id() + ".png"
result = get_oss_bucket().get('bucket').put_object(filename, io_obj.getvalue())
print(result)
return get_oss_bucket().get('cname') + "/" + filename
if __name__ == '__main__':
draw()
|
{"/models/admin.py": ["/models/models.py"], "/article/admin.py": ["/article/models.py"], "/article/tests.py": ["/article/models.py"], "/draw/draw.py": ["/models/models.py"], "/aliyun/oss_backends.py": ["/models/models.py"], "/mdeditor/views.py": ["/models/models.py"], "/myblog/test.py": ["/article/models.py"], "/myblog/views.py": ["/article/models.py", "/models/models.py"], "/myblog/templatetags/common.py": ["/article/models.py", "/models/models.py"]}
|
24,128
|
newpanjing/myblog
|
refs/heads/master
|
/myblog/utils/randoms.py
|
import random
def getRandomArray(count, size):
arr = []
for i in range(0, size):
val = random.randint(0, count-1)
arr.append(val)
return arr
|
{"/models/admin.py": ["/models/models.py"], "/article/admin.py": ["/article/models.py"], "/article/tests.py": ["/article/models.py"], "/draw/draw.py": ["/models/models.py"], "/aliyun/oss_backends.py": ["/models/models.py"], "/mdeditor/views.py": ["/models/models.py"], "/myblog/test.py": ["/article/models.py"], "/myblog/views.py": ["/article/models.py", "/models/models.py"], "/myblog/templatetags/common.py": ["/article/models.py", "/models/models.py"]}
|
24,129
|
newpanjing/myblog
|
refs/heads/master
|
/aliyun/oss_backends.py
|
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import Storage
from oss2 import *
from django.conf import settings
import os
from shortid import short_id
from models.models import Config
def _get_db_config(group):
dict = {}
datas = Config.objects.filter(group=group)
for i in datas:
dict[i.key] = i.value
return dict
class AliyunStorage(Storage):
def __init__(self):
config = _get_db_config('oss')
self.access_key_id = config.get('key')
self.access_key_secret = config.get('secret')
self.endpoint = config.get('endpoint')
self.bucket_name = config.get('bucket')
self.cname = config.get('cname')
self.auth = Auth(self.access_key_id, self.access_key_secret)
self.bucket = self._get_bucket(self.auth)
def _get_bucket(self, auth):
# if self.cname:
# return Bucket(auth, self.cname, self.bucket_name, is_cname=True)
# else:
return Bucket(auth, self.endpoint, self.bucket_name)
def _get_config(self, name):
config = os.environ.get(name, getattr(settings, name, None))
if config is not None:
if isinstance(config, str):
return config.strip()
else:
return config
else:
raise ImproperlyConfigured(
"Can't find config for '%s' either in environment"
"variable or in setting.py" % name)
def exists(self, name):
return self.bucket.object_exists(name)
def url(self, name):
# return self.bucket._make_url(self.bucket_name, name)
return name
def _save(self, name, content):
# 为保证django行为的一致性,保存文件时,应该返回相对于`media path`的相对路径。
# target_name = self._get_target_name(name)
suffix = os.path.splitext(name)[1]
target_name = short_id.get_short_id() + suffix
content.open()
content_str = b''.join(chunk for chunk in content.chunks())
self.bucket.put_object(target_name, content_str)
content.close()
return self.cname + "/" + target_name
|
{"/models/admin.py": ["/models/models.py"], "/article/admin.py": ["/article/models.py"], "/article/tests.py": ["/article/models.py"], "/draw/draw.py": ["/models/models.py"], "/aliyun/oss_backends.py": ["/models/models.py"], "/mdeditor/views.py": ["/models/models.py"], "/myblog/test.py": ["/article/models.py"], "/myblog/views.py": ["/article/models.py", "/models/models.py"], "/myblog/templatetags/common.py": ["/article/models.py", "/models/models.py"]}
|
24,130
|
newpanjing/myblog
|
refs/heads/master
|
/myblog/urls.py
|
"""myblog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.views.generic.base import RedirectView
from . import views
from django.urls import include
# 设置登录页
admin.site.site_title = '管理后台'
admin.site.site_header = '博客管理后台'
urlpatterns = [
path('favicon.ico', RedirectView.as_view(url='static/favicon.ico', permanent=True)),
path('admin/', admin.site.urls),
path('', views.home, name='home'),
path(r'article/<id>', views.detail, name='article'),
path(r'category', views.category_all, name='category'),
path(r'category/<alias>/', views.category, name='category_alias'),
path('category/<alias>/<page>/', views.category_page),
path('page/<alias>/', views.page, name='page'),
path('sitemap.xml', views.sitemap, name='sitemap'),
path('error/404', views.page_error),
path('error/500', views.page_error),
path('oauth/github/', views.oauth_github, name='github'),
path('oauth/github/callback/', views.oauth_github_callback, name='github_callback'),
path('oauth/qq/', views.oauth_qq, name='qq'),
path('oauth/qq/callback', views.oauth_qq_callback, name='qq_callback'),
path('oauth/seejoke/', views.oauth_seejoke, name='seejoke'),
path('oauth/seejoke/callback', views.oauth_seejoke_callback, name='seejoke_callback'),
path('comment/post', views.comments_save, name='comment_post'),
path(r'search/', include('haystack.urls')),
path('project/', views.project, name='project'),
path('project/<name>/', views.project_detail, name='project_detail'),
path('logout', views.logout, name='logout'),
path('no', views.no),
path('mdeditor/', include('mdeditor.urls')),
path('simplepro/info/',views.simplepro_info)
]
|
{"/models/admin.py": ["/models/models.py"], "/article/admin.py": ["/article/models.py"], "/article/tests.py": ["/article/models.py"], "/draw/draw.py": ["/models/models.py"], "/aliyun/oss_backends.py": ["/models/models.py"], "/mdeditor/views.py": ["/models/models.py"], "/myblog/test.py": ["/article/models.py"], "/myblog/views.py": ["/article/models.py", "/models/models.py"], "/myblog/templatetags/common.py": ["/article/models.py", "/models/models.py"]}
|
24,131
|
newpanjing/myblog
|
refs/heads/master
|
/mdeditor/views.py
|
# -*- coding:utf-8 -*-
import os
import datetime
import json
from django.views import generic
from django.conf import settings
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from .configs import MDConfig
import base64
from shortid import short_id
from oss2 import *
from models.models import Config
def __get_db_config(group):
dict = {}
datas = Config.objects.filter(group=group).values('key', 'value')
for i in datas:
dict[i.get('key')] = i.get('value')
return dict
# 初始化 oss
def get_oss_bucket():
config = __get_db_config('oss')
access_key_id = config.get('key')
access_key_secret = config.get('secret')
endpoint = config.get('endpoint')
bucket_name = config.get('bucket')
auth = Auth(access_key_id, access_key_secret)
cname = config.get('cname')
return {
"bucket": Bucket(auth, endpoint, bucket_name),
"cname": cname
}
# TODO 此处获取default配置,当用户设置了其他配置时,此处无效,需要进一步完善
MDEDITOR_CONFIGS = MDConfig('default')
class UploadView(generic.View):
""" upload image file """
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(UploadView, self).dispatch(*args, **kwargs)
def post(self, request, *args, **kwargs):
upload_image = request.FILES.get("editormd-image-file", None)
media_root = settings.MEDIA_ROOT
# image none check
if not upload_image:
return HttpResponse(json.dumps({
'success': 0,
'message': "未获取到要上传的图片",
'url': ""
}))
# image format check
file_name_list = upload_image.name.split('.')
file_extension = file_name_list.pop(-1)
file_name = '.'.join(file_name_list)
if file_extension not in MDEDITOR_CONFIGS['upload_image_formats']:
return HttpResponse(json.dumps({
'success': 0,
'message': "上传图片格式错误,允许上传图片格式为:%s" % ','.join(
MDEDITOR_CONFIGS['upload_image_formats']),
'url': ""
}))
suffix = os.path.splitext(upload_image._name)[1]
target_name = short_id.get_short_id() + suffix
rs = get_oss_bucket()
rs.get('bucket').put_object(target_name, upload_image)
url = rs.get('cname') + '/' + target_name
return HttpResponse(json.dumps({'success': 1,
'message': "上传成功!",
'url': url}))
|
{"/models/admin.py": ["/models/models.py"], "/article/admin.py": ["/article/models.py"], "/article/tests.py": ["/article/models.py"], "/draw/draw.py": ["/models/models.py"], "/aliyun/oss_backends.py": ["/models/models.py"], "/mdeditor/views.py": ["/models/models.py"], "/myblog/test.py": ["/article/models.py"], "/myblog/views.py": ["/article/models.py", "/models/models.py"], "/myblog/templatetags/common.py": ["/article/models.py", "/models/models.py"]}
|
24,132
|
newpanjing/myblog
|
refs/heads/master
|
/myblog/utils/oss.py
|
import oss2
def get_config(str):
return {
'key': '',
'secret': '',
'bucket': '',
'endpoint': 'http://oss-cn-shenzhen.aliyuncs.com/',
# 'endpoint': 'https://oss.88cto.com/',
}
def put_object(file):
oss_config = get_config('oss')
key_id = oss_config['key']
key_secret = oss_config['secret']
bucket_name = oss_config['bucket']
auth = oss2.Auth(key_id, key_secret)
endpoint = oss_config['endpoint']
bucket = oss2.Bucket(auth, endpoint, bucket_name)
results = bucket.put_object('test.jpg', file)
print(results)
|
{"/models/admin.py": ["/models/models.py"], "/article/admin.py": ["/article/models.py"], "/article/tests.py": ["/article/models.py"], "/draw/draw.py": ["/models/models.py"], "/aliyun/oss_backends.py": ["/models/models.py"], "/mdeditor/views.py": ["/models/models.py"], "/myblog/test.py": ["/article/models.py"], "/myblog/views.py": ["/article/models.py", "/models/models.py"], "/myblog/templatetags/common.py": ["/article/models.py", "/models/models.py"]}
|
24,133
|
newpanjing/myblog
|
refs/heads/master
|
/models/tests.py
|
from pysolr import Solr
conn = Solr("http://127.0.0.1:8983/solr/myblog", timeout=10000)
rs = conn.search("java")
print(rs)
|
{"/models/admin.py": ["/models/models.py"], "/article/admin.py": ["/article/models.py"], "/article/tests.py": ["/article/models.py"], "/draw/draw.py": ["/models/models.py"], "/aliyun/oss_backends.py": ["/models/models.py"], "/mdeditor/views.py": ["/models/models.py"], "/myblog/test.py": ["/article/models.py"], "/myblog/views.py": ["/article/models.py", "/models/models.py"], "/myblog/templatetags/common.py": ["/article/models.py", "/models/models.py"]}
|
24,134
|
newpanjing/myblog
|
refs/heads/master
|
/oauth/seejoke_oauth.py
|
import os
import requests
from django.conf import settings
BASE_URL = 'https://www.seejoke.com/service'
def get_auth_url(request):
domain = request.scheme + "://" + request.META.get("HTTP_HOST")
clientId = __get_config("SEEJOKE_CLIENT_ID")
redirect_uri = domain + __get_config("SEEJOKE_CLIENT_CALLBACK")
state = '88cto'
return BASE_URL + "/auth2/auth?client_id=" + clientId + "&state=" + state + "&response_type=code" + "&redirect_uri=" + redirect_uri
# 根据token 获取用户
def get_user(token):
clientId = __get_config("SEEJOKE_CLIENT_ID")
secret = __get_config("SEEJOKE_CLIENT_SECRET")
r = requests.get(BASE_URL + '/user',
params={
'client_id': clientId,
'secret': secret,
'state': '88cto',
'token': token
},
headers={"Accept": 'application/json'})
if (r.status_code == 200):
return r.json()
else:
raise RuntimeError
def __get_config(name):
value = os.environ.get(name, getattr(settings, name, None))
return value
|
{"/models/admin.py": ["/models/models.py"], "/article/admin.py": ["/article/models.py"], "/article/tests.py": ["/article/models.py"], "/draw/draw.py": ["/models/models.py"], "/aliyun/oss_backends.py": ["/models/models.py"], "/mdeditor/views.py": ["/models/models.py"], "/myblog/test.py": ["/article/models.py"], "/myblog/views.py": ["/article/models.py", "/models/models.py"], "/myblog/templatetags/common.py": ["/article/models.py", "/models/models.py"]}
|
24,135
|
newpanjing/myblog
|
refs/heads/master
|
/gunicorn.conf.py
|
import multiprocessing
bind = "127.0.0.1:8001"
workers = multiprocessing.cpu_count() * 2 + 1 # workers是工作进程数
threads = 2 # 指定每个进程开启的线程数
errorlog = '/data/www/myblog/gunicorn.error.log'
accesslog = './gunicorn.access.log'
loglevel = 'info'
proc_name = 'gunicorn_blog_project'
|
{"/models/admin.py": ["/models/models.py"], "/article/admin.py": ["/article/models.py"], "/article/tests.py": ["/article/models.py"], "/draw/draw.py": ["/models/models.py"], "/aliyun/oss_backends.py": ["/models/models.py"], "/mdeditor/views.py": ["/models/models.py"], "/myblog/test.py": ["/article/models.py"], "/myblog/views.py": ["/article/models.py", "/models/models.py"], "/myblog/templatetags/common.py": ["/article/models.py", "/models/models.py"]}
|
24,136
|
newpanjing/myblog
|
refs/heads/master
|
/myblog/test.py
|
import os, sys, django
from django.test import TestCase
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "myblog.settings")
django.setup()
from draw import draw
from article.models import Cover, Article
import random
from django.db import models
all = Article.objects.all()
total = Cover.objects.count()
for item in all:
# if item.image != '':
# continue
c = Cover.objects.all()[random.randint(0, total - 1)]
url = draw.draw(text=item.title, url=c.image.url, font_size=c.font_size, color=c.color, x=c.x, y=c.y)
item.image.name = url
print(url)
item.save()
# all = Cover.objects.all()
# for index, item in enumerate(all):
# draw.draw(item, name=index, font_size=item.font_size, color=item.color, x=item.x, y=item.y)
# print(item, index)
|
{"/models/admin.py": ["/models/models.py"], "/article/admin.py": ["/article/models.py"], "/article/tests.py": ["/article/models.py"], "/draw/draw.py": ["/models/models.py"], "/aliyun/oss_backends.py": ["/models/models.py"], "/mdeditor/views.py": ["/models/models.py"], "/myblog/test.py": ["/article/models.py"], "/myblog/views.py": ["/article/models.py", "/models/models.py"], "/myblog/templatetags/common.py": ["/article/models.py", "/models/models.py"]}
|
24,137
|
newpanjing/myblog
|
refs/heads/master
|
/oauth/github_oauth.py
|
from django.conf import settings
import requests
import os
from shortid import short_id
BASE_URL = 'https://github.com'
def _get(url, params):
r = requests.get(BASE_URL + url, params=params, headers={"Accept": 'application/json'})
if (r.status_code == 200):
return r.json()
else:
raise RuntimeError
def get_auth_url(request):
domain = request.scheme + "://" + request.META.get("HTTP_HOST")
clientId = __get_config("GITHUB_CLIENT_ID")
redirect_uri = domain + __get_config("GITHUB_CLIENT_CALLBACK")
state = short_id.get_short_id()
request.session['state'] = state
return BASE_URL + "/login/oauth/authorize?client_id=" + clientId + "&redirect_uri=" + redirect_uri + "&state=" + state
# 根据code 获取token
def get_access_token(request, code):
domain = request.scheme + "://" + request.META.get("HTTP_HOST")
clientId = __get_config("GITHUB_CLIENT_ID")
client_secret = __get_config("GITHUB_CLIENT_SECRET")
redirect_uri = domain + __get_config("GITHUB_CLIENT_CALLBACK")
state = short_id.get_short_id()
json = _get('/login/oauth/access_token', {
"client_id": clientId,
"client_secret": client_secret,
"code": code,
"redirect_uri": redirect_uri,
"state": state
})
return json
# 根据token 获取用户
def get_user(access_token):
r = requests.get('https://api.github.com/user', params={'access_token': access_token},
headers={"Accept": 'application/json'})
if (r.status_code == 200):
return r.json()
else:
raise RuntimeError
def __get_config(name):
value = os.environ.get(name, getattr(settings, name, None))
return value
|
{"/models/admin.py": ["/models/models.py"], "/article/admin.py": ["/article/models.py"], "/article/tests.py": ["/article/models.py"], "/draw/draw.py": ["/models/models.py"], "/aliyun/oss_backends.py": ["/models/models.py"], "/mdeditor/views.py": ["/models/models.py"], "/myblog/test.py": ["/article/models.py"], "/myblog/views.py": ["/article/models.py", "/models/models.py"], "/myblog/templatetags/common.py": ["/article/models.py", "/models/models.py"]}
|
24,138
|
newpanjing/myblog
|
refs/heads/master
|
/myblog/views.py
|
from django.http import HttpResponse
from django.core.paginator import Paginator
from django.shortcuts import render
from article.models import Article
from article.models import Category
from models.models import Page
from article.models import Member
from article.models import Comment
import datetime
from django.http import Http404
from oauth import github_oauth
from oauth import qq_oauth
from oauth import seejoke_oauth
from django.http import HttpResponseRedirect
from shortid import short_id
import json
from django.forms.models import model_to_dict
from .utils import randoms
import requests
import markdown
from jieba import analyse
import random
from .utils import cache
def __get_home_data():
fields = ('sid', 'image', 'title', 'subject', 'createDate', 'hits', 'category__alias', 'category__name')
return {
"tops": list(Article.objects.filter(top=True).order_by('-id').values(*fields)),
"articles": list(Article.objects.filter(top=False).order_by("-id").values(*fields)[:10])
}
# 主页
def home(request):
data = cache.get(cache.CACHE_HOME_KEY, __get_home_data)
return render(request, 'index.html', data)
# 文章详情
def detail(request, id):
query_set = {}
# 如果是数字就是id,不是就是sid
if id.isdigit():
query_set["id"] = id
else:
query_set["sid"] = id
# 查询一条数据
article = None
try:
article = Article.objects.get(**query_set)
except Article.DoesNotExist:
raise Http404
# 修改点击量
article.hits += 1
article.save()
# 如果是markdown,就用markdown渲染
article.content = '[TOC]\n' + article.content
article.content = markdown.markdown(article.content, extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
'markdown.extensions.toc',
])
if '[TOC]' in article.content:
article.content = article.content.replace('[TOC]', '')
sid = short_id.get_short_id()
request.session['sid'] = sid
# 查询评论
comment = get_comment(0, article.id)
# 随机10片文章
recommends = get_recommend(10)
return render(request, "detail.html", {
'id': id,
'article': article,
'sid': sid,
'comment': comment,
'recommends': recommends
})
def get_recommend(size):
array = []
count = Article.objects.count()
if count == 0:
return array
indexs = randoms.getRandomArray(count, size)
for i in indexs:
obj = Article.objects.all().values('title', 'sid')[i]
array.append(obj)
return array
# 所有分类
def category_all(request):
return category(request, None)
# 单个分类
def category(request, alias):
return category_page(request, alias, 1)
# 获取评论
def get_comment(type, targetId):
# 参与人数
people = Comment.objects.filter(type=type, targetId=targetId, parentId__isnull=True).count()
# 评论条数
count = Comment.objects.filter(type=type, targetId=targetId).count()
# 查询评论list
list = Comment.objects.filter(type=type, targetId=targetId, parentId__isnull=True).order_by("-id")
# 分页待处理
for item in list:
item.comments = Comment.objects.filter(parentId=item.id).order_by("-id")
return {
'people': people,
'count': count,
'list': list
}
# 分类分页
def category_page(request, alias, page):
category = None
suffix = ''
if alias:
# 如果全是数字,就是分页,不是就是别名
suffix = "/" + alias
if alias.isdigit():
page = alias
suffix = ''
else:
category = Category.objects.values('id', 'name').get(alias=alias)
filter = {}
if category:
filter["category"] = category.get('id')
count = Article.objects.filter(**filter).count()
articles = Article.objects.filter(**filter).values('image', 'category__alias', 'category__name', 'sid',
'title', 'subject', 'createDate', 'hits').order_by("-id")
size = 10
show = 5
paginator = Paginator(articles, size)
articles = paginator.page(page)
return render(request, 'category.html', {
"cdata": category,
"articles": articles,
"total": count,
"current": page,
"url": '/category' + suffix,
"size": size,
"show_number": show
})
# 自定义页面
def page(request, alias):
page = Page.objects.values('title', 'content', 'id').get(alias=alias)
page['content'] = markdown.markdown(page.get('content'), extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
'markdown.extensions.toc',
], safe_mode=True, enable_attributes=False)
sid = short_id.get_short_id()
request.session['sid'] = sid
comment = get_comment(2, page.get('id'))
return render(request, 'page.html', {
"page": page,
"sid": sid,
"comment": comment
})
# sitemap
def sitemap(request):
list = Article.objects.values('sid').all().order_by("-id")
domain = request.scheme + "://" + request.META.get("HTTP_HOST")
buffer = []
buffer.append(
'<?xml version="1.0" encoding="utf-8" standalone="no"?>\n<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"\nxmlns:mobile="http://www.baidu.com/schemas/sitemap-mobile/1/">\n')
# 分类页面
categorys = Category.objects.all().values('alias').order_by("sort")
for category in categorys:
buffer.append("<url>\n")
buffer.append('<loc>{domain}/category/{alias}</loc>\n'.format(domain=domain, alias=category.get('alias')))
buffer.append('<mobile:mobile type="pc,mobile"/>')
buffer.append('<priority>0.8</priority>\n')
buffer.append('<lastmod>{date}</lastmod>\n'.format(date=datetime.datetime.now().strftime('%Y-%m-%d')))
buffer.append('<changefreq>daily</changefreq>\n')
buffer.append('</url>\n')
pages = Page.objects.values('alias').all()
for page in pages:
buffer.append("<url>\n")
buffer.append('<loc>{domain}/page/{alias}</loc>\n'.format(domain=domain, alias=page.get('alias')))
buffer.append('<mobile:mobile type="pc,mobile"/>')
buffer.append('<priority>0.8</priority>\n')
buffer.append('<lastmod>{date}</lastmod>\n'.format(date=datetime.datetime.now().strftime('%Y-%m-%d')))
buffer.append('<changefreq>daily</changefreq>\n')
buffer.append('</url>\n')
for article in list:
buffer.append('<url>\n')
buffer.append('<loc>{domain}/article/{sid}</loc>\n'.format(domain=domain, sid=article.get('sid')))
buffer.append('<mobile:mobile type="pc,mobile"/>')
buffer.append('<priority>0.8</priority>\n')
buffer.append('<lastmod>{date}</lastmod>\n'.format(date=datetime.datetime.now().strftime('%Y-%m-%d')))
buffer.append('<changefreq>daily</changefreq>\n')
buffer.append('</url>\n')
buffer.append('</urlset>')
return HttpResponse(content=buffer, content_type="application/xml")
# 500 错误
def page_error(request):
params = {}
if request.path.find("500") != -1:
params["code"] = "500"
params['msg'] = "服务器内部错误,请稍后重试!"
else:
params["code"] = "404"
params['msg'] = "抱歉,该页面没有找到!"
return render(request, 'error.html', params)
# GitHub登录
def oauth_github(request):
url = github_oauth.get_auth_url(request)
# 记录来源页
if 'HTTP_REFERER' in request.META:
referer = request.META['HTTP_REFERER']
request.session['referer'] = referer
return HttpResponseRedirect(url)
# Github登录回调
def oauth_github_callback(request):
code = request.GET.get("code")
state = request.GET.get("state")
# 如果 state和session中的不一致,可能是伪造的请求
if request.session['state'] != state:
return HttpResponse('参数校验不通过,疑是非法请求。')
rs = github_oauth.get_access_token(request, code)
user = github_oauth.get_user(rs['access_token'])
user['type'] = 0
# 处理用户
return update_user(request, user)
def update_user(request, user):
member = None
# 跳转到来源页
# 数据库更新用户信息
try:
member = Member.objects.filter(nodeId=user.get("node_id")).get()
# 没有名字,取登录名
if user.get('name') is None:
member.name = user.get('login')
else:
member.name = user.get('name')
member.avatar = user.get('avatar_url')
member.blog = user.get('blog')
member.url = user.get('html_url')
member.email = user.get('email')
member.nodeId = user.get('node_id')
member.type = user.get('type')
member.save()
except:
member = Member.objects.create(
name=user.get('name'),
avatar=user.get('avatar_url'),
blog=user.get('blog'),
url=user.get('html_url'),
email=user.get('email'),
nodeId=user.get('node_id'),
type=user.get('type')
)
if member.url is None:
member.url = 'javascript:;'
request.session['member'] = model_to_dict(member)
url = '/'
referer = request.session['referer']
if referer:
url = referer
# 如果是logout 就跳转首页
if url.find('logout') != -1:
url = '/'
return HttpResponseRedirect(url)
# QQ登录
def oauth_qq(request):
url = qq_oauth.get_auth_url(request)
# 记录来源页
if 'HTTP_REFERER' in request.META:
referer = request.META['HTTP_REFERER']
request.session['referer'] = referer
return HttpResponseRedirect(url)
def oauth_qq_callback(request):
code = request.GET.get("code")
state = request.GET.get("state")
# 如果 state和session中的不一致,可能是伪造的请求
if request.session['state'] != state:
return HttpResponse('参数校验不通过,疑是非法请求。')
rs = qq_oauth.get_access_token(request, code)
access_token = rs.get('access_token')
user = qq_oauth.get_user(access_token)
# 处理用户
return update_user(request, user)
def oauth_seejoke(request):
url = seejoke_oauth.get_auth_url(request)
# 记录来源页
if 'HTTP_REFERER' in request.META:
referer = request.META['HTTP_REFERER']
request.session['referer'] = referer
return HttpResponseRedirect(url)
def oauth_seejoke_callback(request):
token = request.GET.get("token")
rs = seejoke_oauth.get_user(token)
user = {}
if (rs.get('code') == 0):
data = rs.get('data')
user['name'] = data.get('userName')
user['email'] = data.get('email')
user['node_id'] = data.get('id')
user['avatar_url'] = data.get('head')
user['url'] = data.get('address')
user['type'] = 2
# 处理用户
return update_user(request, user)
# 保存评论
def comments_save(request):
# 通过session限制评论频率
result = {}
session = request.session;
post = request.POST;
ssid = session['sid']
sid = post.get('SID')
targetId = post.get('TARGET_ID')
parentId = post.get('parentId')
member = session['member']
dbMember = Member.objects.get(id=member['id'])
atMemberId = post.get('atMemberId')
type = post.get('type')
if type is None:
type = 0
if ssid != sid:
result = {
'code': 0,
'msg': '非法请求'
}
elif member is None:
result = {
'code': 0,
'msg': '用户未登录'
}
else:
obj = Comment.objects.create(
member=dbMember,
content=post.get('content'),
type=type,
targetId=targetId,
parentId=parentId,
atMember_id=atMemberId
)
result = {
'code': 1,
'msg': '评论成功',
'id': obj.id
}
return HttpResponse(json.dumps(result), content_type="application/json")
def __get_project():
r = requests.get("https://api.github.com/users/newpanjing/repos?sort=updated&direction=desc")
if r.status_code == 200:
return r.json()
else:
raise RuntimeError(r.text)
def project(request):
# 缓存时间为12小时 43200
rs = cache.get(cache.CACHE_PROJECT_KEY, __get_project, 43200)
return render(request, 'project.html', {
'rs': rs
})
def project_detail(request, name):
r = requests.get("https://api.github.com/repos/newpanjing/{}".format(name))
rs = None
readme = None
tags = None
if r.status_code == 200:
rs = r.json()
# 读取 readme
if rs["description"]:
arry = analyse.extract_tags(rs["description"], topK=5)
tags = ','.join(arry)
r = requests.get("https://raw.githubusercontent.com/newpanjing/{}/master/README.md?_={}".format(name,
random.uniform(
100, 999)))
if r.status_code == 200:
readme = markdown.markdown(r.text)
sid = short_id.get_short_id()
request.session['sid'] = sid
comment = get_comment(3, name)
return render(request, 'project_detail.html', {
'item': rs,
'readme': readme,
'name': name,
'tags': tags,
"sid": sid,
"comment": comment
})
def logout(request):
if request.session.get('member'):
del request.session['member']
return render(request, 'logout.html')
def no(request):
return render(request, 'no.html')
def simplepro_info(request):
return render(request, 'admin/tips.html')
|
{"/models/admin.py": ["/models/models.py"], "/article/admin.py": ["/article/models.py"], "/article/tests.py": ["/article/models.py"], "/draw/draw.py": ["/models/models.py"], "/aliyun/oss_backends.py": ["/models/models.py"], "/mdeditor/views.py": ["/models/models.py"], "/myblog/test.py": ["/article/models.py"], "/myblog/views.py": ["/article/models.py", "/models/models.py"], "/myblog/templatetags/common.py": ["/article/models.py", "/models/models.py"]}
|
24,139
|
newpanjing/myblog
|
refs/heads/master
|
/myblog/templatetags/common.py
|
import datetime
from ..utils import pager
from django import template
import re
import json
register = template.Library() # 这一句必须这样写
from article.models import Article
from article.models import Category
from models.models import Site
from models.models import Menu
from models.models import Notice
from models.models import Config
from ..utils import randoms
from ..utils import cache
def get_cache():
return {
'sites': list(Site.objects.order_by("sort").values('site', 'name')),
'categorys': list(Category.objects.filter(display=True).order_by("sort").values('name', 'alias')),
'menus': list(Menu.objects.filter(display=True).order_by('sort').values('name', 'icon', 'href')),
'notice': Notice.objects.values('createDate', 'content').last(),
'configs': get_config('site')
}
def get_recommend():
return getRecommend(5)
@register.simple_tag
def loadData():
# redis缓存
results = cache.get(cache.CACHE_COMMON_KEY, get_cache)
# 推荐5分钟更新一次,其余的永久缓存,有更新的时候刷新缓存
results['recommeneds'] = cache.get(cache.CACHE_RECOMMEND_KEY, get_recommend, 300)
return results
def get_config(group):
configs = Config.objects.filter(group=group).values('key', 'value')
dicts = {}
for c in configs:
dicts[c.get('key')] = c.get('value')
return dicts
def getRecommend(size):
array = []
count = Article.objects.filter(image__isnull=False).exclude(image='').count()
if count == 0:
return array
indexs = randoms.getRandomArray(count, size)
for i in indexs:
obj = \
Article.objects.filter(image__isnull=False).exclude(image='').values('title', 'sid', 'image',
'category__name',
'category__alias',
'createDate')[i]
array.append(obj)
return array
@register.filter
def url(url):
u = str(url)
if u.find('http') != 0:
u = "/" + u
return u
@register.filter
def filter(url):
if url:
return url
else:
return 'javascript:;'
@register.filter
def converToHtml(text):
text = text.replace('\r\n', "<br/>")
text = text.replace(' ', '')
return text
@register.filter
def clear(text):
p = re.compile(r'([&]{0,1}(\w+;))')
text = re.sub(p, '', text)
p = re.compile(r'\r|\n|\t|\s')
text = re.sub(p, '', text)
return text
@register.simple_tag
def get_now():
return datetime.datetime.now()
@register.simple_tag(takes_context=True)
def get_pager(context):
total = context["total"]
current = context["current"]
current = int(current)
url = context["url"]
size = int(context["size"])
show_number = int(context["show_number"])
total_page_num = int((total - 1) / size + 1)
if total_page_num < 2:
return ""
array = pager.get_numbers(total, size, current, show_number)
buffer = []
buffer.append('<div class="pager-block"><nav><ul class="pagination">')
prev = ''
href = url + '/' + str(current - 1)
if current <= 1:
prev = 'class="disabled"'
href = 'javascript:;'
buffer.append('<li ' + prev + '><a href="' + href + '" aria-label="Previous">上一页</a></li>')
for i in array:
page = str(i)
active = ''
if current == i:
active = 'class="active"'
buffer.append(' <li ' + active + '><a href="' + url + '/' + page + '">' + page + '</a></li>')
next = ''
href = url + '/' + str(current + 1)
if current == total_page_num:
next = 'class="disabled"'
href = 'javascript:;'
buffer.append(
'<li ' + next + '><a href="' + href + '" aria-label="Next">下一页</a></li></ul></nav></div>')
return ''.join(buffer)
|
{"/models/admin.py": ["/models/models.py"], "/article/admin.py": ["/article/models.py"], "/article/tests.py": ["/article/models.py"], "/draw/draw.py": ["/models/models.py"], "/aliyun/oss_backends.py": ["/models/models.py"], "/mdeditor/views.py": ["/models/models.py"], "/myblog/test.py": ["/article/models.py"], "/myblog/views.py": ["/article/models.py", "/models/models.py"], "/myblog/templatetags/common.py": ["/article/models.py", "/models/models.py"]}
|
24,156
|
soon/berfmk
|
refs/heads/master
|
/berfmk/utils.py
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
from django.http import Http404
#-------------------------------------------------------------------------------
def get_number_from_param_or_404(param):
try:
param = int(param)
except ValueError:
raise Http404()
return param;
#-------------------------------------------------------------------------------
|
{"/art/views.py": ["/art/models.py"], "/news/views.py": ["/news/models.py", "/news/utils.py"], "/berfmk/templatetags/get_all_forums.py": ["/forum/models.py"], "/news/utils.py": ["/news/models.py", "/berfmk/utils.py"], "/forum/views.py": ["/forum/models.py", "/forum/utils.py", "/berfmk/utils.py"], "/forum/utils.py": ["/forum/models.py"]}
|
24,157
|
soon/berfmk
|
refs/heads/master
|
/art/views.py
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
from django.shortcuts import redirect
from django.views.generic.simple import direct_to_template
from django.conf import settings
from django.http import Http404
#-------------------------------------------------------------------------------
from art.models import Art
#-------------------------------------------------------------------------------
def art_page(request, name):
pass
# try:
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
|
{"/art/views.py": ["/art/models.py"], "/news/views.py": ["/news/models.py", "/news/utils.py"], "/berfmk/templatetags/get_all_forums.py": ["/forum/models.py"], "/news/utils.py": ["/news/models.py", "/berfmk/utils.py"], "/forum/views.py": ["/forum/models.py", "/forum/utils.py", "/berfmk/utils.py"], "/forum/utils.py": ["/forum/models.py"]}
|
24,158
|
soon/berfmk
|
refs/heads/master
|
/news/views.py
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
from django.shortcuts import redirect
from django.views.generic.simple import direct_to_template
from django.conf import settings
from django.http import Http404
from django.core.paginator import Paginator, EmptyPage
#-------------------------------------------------------------------------------
from news.models import News
from news.utils import get_news_or_404
from news.utils import get_number_from_param_or_404
#-------------------------------------------------------------------------------
def news(request, direction, page):
page = get_number_from_param_or_404(page)
schoolNews, siteNews = False, False
part_of_title = ''
if direction == 'site/':
siteNews = True
part_of_title = ' сайта'
elif direction == 'school/':
schoolNews = True
part_of_title = ' школы'
elif direction != '':
raise Http404()
# NOTE
# Может быть сделать отдельную страницу для пустых страниц
# soon(30.08.12, 11:04)
# Что я хотел себе этим сказать - непонятно
news = News.objects.filter(hidden = False)
hidden_news = News.objects.filter(hidden = True) \
if request.user.has_perm('news.view_hidden') \
else []
if(schoolNews):
news = news.filter(schoolNews = True)
if(hidden_news):
hidden_news = hidden_news.filter(schoolNews = True)
elif(siteNews):
news = news.filter(siteNews = True)
if(hidden_news):
hidden_news = hidden_news.filter(siteNews = True)
paginator = Paginator(news, 10)
try:
news = paginator.page(page)
except EmptyPage:
news = paginator.page(paginator.num_pages)
can_add = request.user.has_perm('news.add_{0}news'.format(direction[:-1])) \
or request.user.has_perm('news.add_hidden') \
or request.user.has_perm('news.add_only_hidden')
return direct_to_template(
request,
'news/news.hdt', {
'direction' : part_of_title,
'news' : news,
'hidden_news' : hidden_news,
'can_add' : can_add
}
)
#-------------------------------------------------------------------------------
def add_news(request, preview = False):
# FIXME
# Если послать поддельный POST, то юзер без прав(на определенный раздел)
# сможет добавить новость
# soon(30.08.12, 11:06)
# Или не сможет.
user = request.user
if not (
user.has_perm('news.add_news') or \
user.has_perm('news.add_schoolnews') or \
user.has_perm('news.add_sitenews') or \
user.has_perm('news.add_hidden') or \
user.has_perm('news.add_only_hidden')
):
# soon(02.09.12, 15:32)
# FIXME
# Сделать нормальную страницу, оповещающую об отсутствии прав
raise Http404()
# FIXME:
# Если длинна будет нулевая
if request.method == 'POST':
errors = {'title': False, 'text_block': False}
title = request.POST['input_title']
if len(title) > 100:
errors['title'] = True
text_block = request.POST['input_text_block']
if len(text_block) > 1000:
errors['text_block'] = True
news = None
if not True in errors.values():
schoolNews = 'school' in request.POST.keys()
if schoolNews and not request.user.has_perm('news.add_schoolnews'):
# soon(02.09.12, 14:02)
# FIXME
# Сделать нормальную страницу, оповещающую об отсутствии прав
raise Http404()
siteNews = 'site' in request.POST.keys()
if siteNews and not request.user.has_perm('news.add_sitenews'):
# soon(02.09.12, 14:03)
# FIXME
# Сделать нормальную страницу, оповещающую об отсутствии прав
raise Http404()
hidden = 'hidden' in request.POST.keys() or \
user.has_perm('news.add_only_hidden')
if hidden:
if not (
user.has_perm('news.add_hidden') or \
user.has_perm('news.add_only_hidden')
):
# soon(02.09.12, 14:04)
# FIXME
# Сделать нормальную страницу, оповещающую об отсутствии прав
raise Http404()
news = News(
title = title,
text_block = text_block,
author = user,
schoolNews = schoolNews,
siteNews = siteNews,
hidden = hidden
)
if not preview:
news.save()
return redirect('/news/{0}'.format(news.id))
return direct_to_template(
request,
'news/add_news.hdt', {
'news_title' : title,
'news_text_block' : text_block,
'news' : news,
'errors' : errors
}
)
else:
return direct_to_template(request, 'news/add_news.hdt')
#-------------------------------------------------------------------------------
def news_page(request, id):
news = get_news_or_404(id)
if news.hidden and not request.user.has_perm('news.view_hidden'):
# soon(02.08.12, 14:17)
# FIXME
# Сделать нормальную страницу, оповещающую об отсутствии прав
raise Http404()
return direct_to_template(
request,
'news/news_page.hdt', {
'news': news
}
)
#-------------------------------------------------------------------------------
def edit_news(request, id, preview = False):
# FIXME
# Если послать поддельный POST, то юзер без прав(на определенный раздел)
# сможет добавить новость
# soon(30.08.12, 11:11)
# См. выше
user = request.user
if not (
user.has_perm('news.add_news') or \
user.has_perm('news.add_schoolnews') or \
user.has_perm('news.add_sitenews') or \
user.has_perm('news.add_hidden') or \
user.has_perm('news.add_only_hidden')
):
# soon(02.09.12, 15:32)
# FIXME
# Сделать нормальную страницу, оповещающую об отсутствии прав
raise Http404()
news = get_news_or_404(id)
if news.hidden:
if not (
user.has_perm('news.add_hidden') or \
user.has_perm('news.add_only_hidden')
):
# soon(02.09.12, 14:13)
# FIXME
# Сделать нормальную страницу, оповещающую об отсутствии прав
raise Http404()
# FIXME:
# Если длинна будет нулевая
if request.method == 'POST':
errors = {'title': False, 'text_block': False}
title = request.POST['input_title']
if len(title) > 100:
errors['title'] = True
text_block = request.POST['input_text_block']
if len(text_block) > 1000:
errors['text_block'] = True
if not True in errors.values():
schoolNews = 'school' in request.POST.keys()
if schoolNews and not user.has_perm('news.add_schoolnews'):
# soon(02.09.12, 14:07)
# FIXME
# Сделать нормальную страницу, оповещающую об отсутствии прав
raise Http404()
siteNews = 'site' in request.POST.keys()
if siteNews and not user.has_perm('news.add_sitenews'):
# soon(02.09.12, 14:07)
# FIXME
# Сделать нормальную страницу, оповещающую об отсутствии прав
raise Http404()
hidden = 'hidden' in request.POST.keys() or \
user.has_perm('news.add_only_hidden')
if hidden:
if not (
user.has_perm('news.add_hidden') or \
user.has_perm('news.add_only_hidden')
):
# soon(02.09.12, 14:08)
# FIXME
# Сделать нормальную страницу, оповещающую об отсутствии прав
raise Http404()
news.title, news.text_block, news.schoolNews, news.siteNews = \
title, text_block, schoolNews, siteNews
news.hidden = hidden
if not preview:
news.save()
return redirect('/news/{0}/'.format(id))
return direct_to_template(
request,
'news/edit_news.hdt', {
'news' : news,
'news_title' : title,
'news_text_block' : text_block,
'errors' : errors
}
)
else:
return direct_to_template(
request,
'news/edit_news.hdt', {
'news': news
}
)
#-------------------------------------------------------------------------------
|
{"/art/views.py": ["/art/models.py"], "/news/views.py": ["/news/models.py", "/news/utils.py"], "/berfmk/templatetags/get_all_forums.py": ["/forum/models.py"], "/news/utils.py": ["/news/models.py", "/berfmk/utils.py"], "/forum/views.py": ["/forum/models.py", "/forum/utils.py", "/berfmk/utils.py"], "/forum/utils.py": ["/forum/models.py"]}
|
24,159
|
soon/berfmk
|
refs/heads/master
|
/news/migrations/0005_auto__chg_field_news_create_date.py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'News.create_date'
db.alter_column('news_news', 'create_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True))
def backwards(self, orm):
# Changing field 'News.create_date'
db.alter_column('news_news', 'create_date', self.gf('django.db.models.fields.DateField')(auto_now_add=True))
models = {
'news.news': {
'Meta': {'ordering': "['-create_date']", 'object_name': 'News'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text_block': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'news.schoolnews': {
'Meta': {'ordering': "['-create_date']", 'object_name': 'SchoolNews', '_ormbases': ['news.News']},
'news_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['news.News']", 'unique': 'True', 'primary_key': 'True'})
},
'news.sitenews': {
'Meta': {'ordering': "['-create_date']", 'object_name': 'SiteNews', '_ormbases': ['news.News']},
'news_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['news.News']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['news']
|
{"/art/views.py": ["/art/models.py"], "/news/views.py": ["/news/models.py", "/news/utils.py"], "/berfmk/templatetags/get_all_forums.py": ["/forum/models.py"], "/news/utils.py": ["/news/models.py", "/berfmk/utils.py"], "/forum/views.py": ["/forum/models.py", "/forum/utils.py", "/berfmk/utils.py"], "/forum/utils.py": ["/forum/models.py"]}
|
24,160
|
soon/berfmk
|
refs/heads/master
|
/berfmk/templatetags/get_all_forums.py
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
from django import template
#-------------------------------------------------------------------------------
from forum.models import Forum
#-------------------------------------------------------------------------------
register = template.Library()
#-------------------------------------------------------------------------------
@register.filter(needs_autoescape = False)
def get_all_forums():
return Forum.objects.all().values('title', 'address')
#-------------------------------------------------------------------------------
|
{"/art/views.py": ["/art/models.py"], "/news/views.py": ["/news/models.py", "/news/utils.py"], "/berfmk/templatetags/get_all_forums.py": ["/forum/models.py"], "/news/utils.py": ["/news/models.py", "/berfmk/utils.py"], "/forum/views.py": ["/forum/models.py", "/forum/utils.py", "/berfmk/utils.py"], "/forum/utils.py": ["/forum/models.py"]}
|
24,161
|
soon/berfmk
|
refs/heads/master
|
/news/utils.py
|
# -*- codeing: utf-8 -*-
#-------------------------------------------------------------------------------
from django.shortcuts import get_object_or_404
from django.http import Http404
#-------------------------------------------------------------------------------
from news.models import News
from berfmk.utils import get_number_from_param_or_404
#-------------------------------------------------------------------------------
def get_news_or_404(id):
return get_object_or_404(News, id = get_number_from_param_or_404(id))
#-------------------------------------------------------------------------------
|
{"/art/views.py": ["/art/models.py"], "/news/views.py": ["/news/models.py", "/news/utils.py"], "/berfmk/templatetags/get_all_forums.py": ["/forum/models.py"], "/news/utils.py": ["/news/models.py", "/berfmk/utils.py"], "/forum/views.py": ["/forum/models.py", "/forum/utils.py", "/berfmk/utils.py"], "/forum/utils.py": ["/forum/models.py"]}
|
24,162
|
soon/berfmk
|
refs/heads/master
|
/accounts/models.py
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
from django.db import models
from django.db.models.signals import post_save
from django.contrib.auth.models import User, Group
#-------------------------------------------------------------------------------
class UserProfile(models.Model):
user = models.OneToOneField(User)
group = models.ForeignKey(Group)
#---------------------------------------------------------------------------
def set_group(self, group):
self.group.user_set.remove(self.user)
self.group = group
self.group.user_set.add(self.user)
self.save()
#---------------------------------------------------------------------------
def __unicode__(self):
return self.user.username + ': ' + self.group.name
#-------------------------------------------------------------------------------
def create_user_profile(sender, instance, created, **kwargs):
if created:
group = Group.objects.get(name = 'Пользователь')
user = UserProfile.objects.create(user = instance, group = group)
group.user_set.add(instance)
#-------------------------------------------------------------------------------
post_save.connect(create_user_profile, sender = User)
#-------------------------------------------------------------------------------
|
{"/art/views.py": ["/art/models.py"], "/news/views.py": ["/news/models.py", "/news/utils.py"], "/berfmk/templatetags/get_all_forums.py": ["/forum/models.py"], "/news/utils.py": ["/news/models.py", "/berfmk/utils.py"], "/forum/views.py": ["/forum/models.py", "/forum/utils.py", "/berfmk/utils.py"], "/forum/utils.py": ["/forum/models.py"]}
|
24,163
|
soon/berfmk
|
refs/heads/master
|
/forum/models.py
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
from django.db import models
from django.contrib.auth.models import User
#-------------------------------------------------------------------------------
# forum
# |
# section ---+
# | |
# sub_section |
# | |
# topic ----+
# |
# post
#-------------------------------------------------------------------------------
# Add fields title, address and order and simple __unicode__
class Base_title_address(models.Model):
title = models.CharField(max_length = 64)
address = models.CharField(max_length = 32)
order = models.PositiveIntegerField(default = 0)
#---------------------------------------------------------------------------
class Meta:
abstract = True
ordering = ['order']
#---------------------------------------------------------------------------
def __unicode__(self):
return self.title
#-------------------------------------------------------------------------------
class Forum(Base_title_address):
def get_absolute_url(self):
return '/forum/' + self.address + '/'
#-------------------------------------------------------------------------------
class Section(Base_title_address):
forum = models.ForeignKey(Forum)
#---------------------------------------------------------------------------
def get_absolute_url(self):
return self.forum.get_absolute_url() + self.address + '/'
#---------------------------------------------------------------------------
def get_last_post(self):
topics = self.topic_set.all()
if not topics:
return None
else:
topic = max(
topics,
key = lambda t: t.get_last_post().created,
)
return topic.get_last_post()
#-------------------------------------------------------------------------------
class Sub_section(Base_title_address):
section = models.ForeignKey(Section)
#---------------------------------------------------------------------------
def get_absolute_url(self):
return self.section.forum.get_absolute_url() + self.address + '/'
#---------------------------------------------------------------------------
def get_last_post(self):
topics = self.topic_set.all()
if not topics:
return None
else:
topic = max(
topics,
key = lambda t: t.get_last_post().created,
)
return topic.get_last_post()
#-------------------------------------------------------------------------------
class Topic(models.Model):
title = models.CharField(max_length = 64)
created = models.DateTimeField(auto_now_add = True)
creator = models.ForeignKey(User)
section = models.ForeignKey(Section)
sub_section = models.ForeignKey(Sub_section, null = True)
#---------------------------------------------------------------------------
closed = models.BooleanField(default = False)
visible = models.BooleanField(default = True)
locked = models.BooleanField(default = False)
#---------------------------------------------------------------------------
# class Meta:
# ordering = ['-created']
#---------------------------------------------------------------------------
def __unicode__(self):
return u'%s - %s at %s(%s)' % \
(self.creator, self.title, self.created, self.sub_section)
#---------------------------------------------------------------------------
def get_absolute_url(self):
if(self.sub_section == None):
return self.section.get_absolute_url() + str(self.id) + '/'
else:
return self.sub_section.get_absolute_url() + str(self.id) + '/'
#---------------------------------------------------------------------------
def get_last_post(self):
return self.post_set.reverse()[0]
#-------------------------------------------------------------------------------
class Post(models.Model):
created = models.DateTimeField(auto_now_add = True)
creator = models.ForeignKey(User)
topic = models.ForeignKey(Topic)
body = models.TextField(max_length = 1000)
# viewers = models.ManyToManyField(User)
# number = models.PositiveIntegerField(default = 0)
#---------------------------------------------------------------------------
visible = models.BooleanField(default = True)
#---------------------------------------------------------------------------
class Meta:
ordering = ['id']
#---------------------------------------------------------------------------
def __unicode__(self):
return u'%s - %s...(%s)' % (self.creator, self.body[:10], self.topic)
#---------------------------------------------------------------------------
def get_absolute_url(self):
# p = 1 if self.number == 0 else (self.number) / 10 + 1
# return self.topic.get_absolute_url() + 'page/' + str(p) + '/#post' + \
# str(self.id)
# p = 1 if self.is_first_post() \
# else self.topic.post_set.objects.count()
# TODO
# soon(19.09.12)
return self.topic.get_absolute_url()
#---------------------------------------------------------------------------
def is_first_post(self):
return self.topic.post_set.objects.all()[0] is self
#-------------------------------------------------------------------------------
|
{"/art/views.py": ["/art/models.py"], "/news/views.py": ["/news/models.py", "/news/utils.py"], "/berfmk/templatetags/get_all_forums.py": ["/forum/models.py"], "/news/utils.py": ["/news/models.py", "/berfmk/utils.py"], "/forum/views.py": ["/forum/models.py", "/forum/utils.py", "/berfmk/utils.py"], "/forum/utils.py": ["/forum/models.py"]}
|
24,164
|
soon/berfmk
|
refs/heads/master
|
/news/models.py
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
from django.db import models
from django.contrib.auth.models import User
#-------------------------------------------------------------------------------
class News(models.Model):
title = models.CharField(max_length = 100)
text_block = models.TextField()
created = models.DateTimeField(auto_now_add = True)
last_change = models.DateTimeField(auto_now = True, null = True)
author = models.ForeignKey(User)
schoolNews = models.BooleanField(default = False)
siteNews = models.BooleanField(default = False)
hidden = models.BooleanField(default = False)
#---------------------------------------------------------------------------
class Meta:
ordering = ['-last_change']
#-----------------------------------------------------------------------
permissions = (
( 'add_sitenews', 'Can add sitenews' ),
( 'add_schoolnews', 'Can add schoolnews' ),
( 'change_sitenews', 'Can change sitenews' ),
( 'change_schoolnews', 'Can change schoolnews' ),
( 'delete_sitenews', 'Can delete sitenews' ),
( 'delete_schoolnews', 'Can delete schoolnews' ),
( 'add_only_hidden', 'Can add only hidden news' ),
('change_only_hidden', 'Can change only hidden news' ),
('delete_only_hidden', 'Can delete only hidden news' ),
( 'add_hidden', 'Can add hidden news' ),
( 'change_hidden', 'Can change hidden news' ),
( 'delete_hidden', 'Can delete hidden news' ),
( 'view_hidden', 'Can view hidden news' ),
)
#---------------------------------------------------------------------------
def __unicode__(self):
return self.title
#---------------------------------------------------------------------------
def get_absolute_url(self):
return u'news/' + unicode(id) + u'/'
#-------------------------------------------------------------------------------
|
{"/art/views.py": ["/art/models.py"], "/news/views.py": ["/news/models.py", "/news/utils.py"], "/berfmk/templatetags/get_all_forums.py": ["/forum/models.py"], "/news/utils.py": ["/news/models.py", "/berfmk/utils.py"], "/forum/views.py": ["/forum/models.py", "/forum/utils.py", "/berfmk/utils.py"], "/forum/utils.py": ["/forum/models.py"]}
|
24,165
|
soon/berfmk
|
refs/heads/master
|
/news/urls.py
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns(
'',
url(r'^(?P<direction>|site/|school/)$', 'news.views.news', {'page': '1'}),
url(
r'^(?P<direction>|site/|school/)page/(?P<page>[1-9]\d{0,3})/$',
'news.views.news'
),
url(r'^(?P<id>[1-9]\d{0,4})/$', 'news.views.news_page'),
url(r'^add/$', 'news.views.add_news'),
url(r'^preview/$', 'news.views.add_news', {'preview': True}),
url(r'^(?P<id>[1-9]\d{0,4})/edit/$', 'news.views.edit_news'),
url(
r'^(?P<id>[1-9]\d{0,4})/edit/preview/$',
'news.views.edit_news',
{'preview': True}
),
)
|
{"/art/views.py": ["/art/models.py"], "/news/views.py": ["/news/models.py", "/news/utils.py"], "/berfmk/templatetags/get_all_forums.py": ["/forum/models.py"], "/news/utils.py": ["/news/models.py", "/berfmk/utils.py"], "/forum/views.py": ["/forum/models.py", "/forum/utils.py", "/berfmk/utils.py"], "/forum/utils.py": ["/forum/models.py"]}
|
24,166
|
soon/berfmk
|
refs/heads/master
|
/berfmk/templatetags/markdown_to_html.py
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
from django import template
from django.template.defaultfilters import stringfilter
#-------------------------------------------------------------------------------
from markdown import markdown
#-------------------------------------------------------------------------------
register = template.Library()
#-------------------------------------------------------------------------------
@register.filter(needs_autoescape = False)
@stringfilter
def markdown_to_html(text):
return markdown(text)
#-------------------------------------------------------------------------------
|
{"/art/views.py": ["/art/models.py"], "/news/views.py": ["/news/models.py", "/news/utils.py"], "/berfmk/templatetags/get_all_forums.py": ["/forum/models.py"], "/news/utils.py": ["/news/models.py", "/berfmk/utils.py"], "/forum/views.py": ["/forum/models.py", "/forum/utils.py", "/berfmk/utils.py"], "/forum/utils.py": ["/forum/models.py"]}
|
24,167
|
soon/berfmk
|
refs/heads/master
|
/forum/views.py
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
from django.shortcuts import redirect, get_object_or_404
from django.views.generic.simple import direct_to_template
from django.conf import settings
from django.http import Http404
from django.db.models import Q
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import Paginator, EmptyPage
#-------------------------------------------------------------------------------
from forum.models import Forum, Section, Sub_section, Topic
from forum.models import Post
from forum.utils import create_and_get_topic
from forum.utils import create_and_get_post
from berfmk.utils import get_number_from_param_or_404
#-------------------------------------------------------------------------------
# defs
# forums........................return all forums
# sections......................return all sections in forum
# topics.+---.(got Section).---.return all sub_sections and topics in section
# |
# +-.(got Sub_section).-.return all topics in sub_section
# posts.........................return all posts in topic
#-------------------------------------------------------------------------------
# templates
# forum_main....................show all forums
# forum.........................show all sections in forum
# section.......................show all sub-sections and topics in section
# sub_section...................show all topics in sub_section
# topic.........................show all posts in topic
#-------------------------------------------------------------------------------
def forums(request):
return direct_to_template(
request,
'forum/forum_main.hdt', {
'forums': Forum.objects.all()
}
)
#-------------------------------------------------------------------------------
def sections(request, forum):
f = get_object_or_404(Forum, address = forum)
return direct_to_template(
request,
'forum/forum.hdt', {
'sections' : f.section_set.all(),
'forum' : f
}
)
#-------------------------------------------------------------------------------
def topics(request, forum, section):
f = get_object_or_404(Forum, address = forum)
try:
s = f.section_set.get(address = section)
topics = sorted(
s.topic_set.filter(sub_section = None),
key = lambda t: t.get_last_post().created,
reverse = True
)
return direct_to_template(
request,
'forum/section.hdt', {
'topics' : topics,
'sub_sections' : s.sub_section_set.all(),
'section' : s,
'forum' : f
}
)
except ObjectDoesNotExist:
ss = get_object_or_404(Sub_section, address = section)
topics = sorted(
ss.topic_set.exclude(sub_section = None),
key = lambda t: t.get_last_post().created,
reverse = True
)
return direct_to_template(
request,
'forum/sub_section.hdt', {
'topics' : topics,
'sub_section' : ss,
'forum' : f
}
)
#-------------------------------------------------------------------------------
def posts(request, forum, section, topic, page):
topic = get_number_from_param_or_404(topic)
page = get_number_from_param_or_404(page)
f = get_object_or_404(Forum, address = forum)
try:
s = f.section_set.get(address = section)
t = get_object_or_404(
s.topic_set.filter(sub_section = None),
id = topic
)
p = t.post_set.all()
first_post = p[0];
paginator = Paginator(p[1:], 10)
try:
p = paginator.page(page)
except EmptyPage:
p = paginator.page(paginator.num_pages)
# if p.count() <= (page - 1) * 10 and p.count():
# raise Http404()
return direct_to_template(
request,
'forum/topic.hdt', {
# 'posts' : p[:10] if page == 1 else \
# list(p[:1]) + \
# list(p[(page - 1) * 10:page * 10]),
'first_post' : first_post,
'posts' : p,
'topic' : t,
'section' : s,
'forum' : f
# 'page' : page
}
)
except ObjectDoesNotExist:
ss = get_object_or_404(Sub_section, address = section)
t = get_object_or_404(ss.topic_set.all(), id = topic)
p = t.post_set.all()
first_post = p[0];
paginator = Paginator(p[1:], 10)
try:
p = paginator.page(page)
except EmptyPage:
p = paginator.page(paginator.num_pages)
# if p.count() <= (page - 1) * 10 and p.count():
# raise Http404()
return direct_to_template(
request,
'forum/topic.hdt', {
# 'posts' : p[:10] if page == 1 else \
# list(p[:1]) + \
# list(p[(page - 1) * 10:page * 10]),
'first_post' : first_post,
'posts' : p,
'topic' : t,
'sub_section' : ss,
'section' : ss.section,
'forum' : f
# 'page' : page
}
)
#-------------------------------------------------------------------------------
def add_topic(request, forum, section, preview):
if not request.user.has_perm('forum.add_topic'):
raise Http404()
f = get_object_or_404(Forum, address = forum)
s, ss = None, None
try:
s = f.section_set.get(address = section)
except ObjectDoesNotExist:
ss = get_object_or_404(Sub_section, address = section)
s = ss.section
if request.method == 'POST':
errors = {'title': False, 'body': False}
title = request.POST['input_title']
if len(title) > 64:
errors['title'] = True
body = request.POST['input_body']
if len(body) > 1000:
errors['body'] = True
if not True in errors.values():
if not preview:
try:
t = create_and_get_topic(
title = title,
creator = request.user,
section = s if ss is None else ss,
body = body
)
except ValueError:
# soon(30.08.12, 12:42)
# FIXME
# Обработать нормально эту ситуацию
raise Http404()
return redirect(t.get_absolute_url())
else:
t = Topic(
title = title,
creator = request.user,
section = s,
sub_section = ss,
)
p = Post(
creator = t.creator,
topic = t,
body = body
)
return direct_to_template(
request,
'forum/add_topic.hdt', {
'topic' : t,
'post' : p,
'forum' : f,
'section' : s
}
)
return direct_to_template(
request,
'forum/add_topic.hdt', {
'topic_title' : title,
'post_body' : text_block,
'errors' : errors
}
)
else:
return direct_to_template(
request,
'forum/add_topic.hdt', {
'forum' : f,
'section' : s if ss is None else ss
}
)
#-------------------------------------------------------------------------------
def add_post(request, forum, section, topic, preview):
if not request.user.has_perm('forum.add_post'):
raise Http404()
topic = get_number_from_param_or_404(topic)
f = get_object_or_404(Forum, address = forum)
s, ss = None, None
try:
s = f.section_set.get(address = section)
except ObjectDoesNotExist:
ss = get_object_or_404(Sub_section, address = section)
s = ss.section
t = get_object_or_404(s.topic_set.all(), id = topic)
if request.method == 'POST':
errors = {'body': False}
body = request.POST['input_body']
if len(body) > 1000:
errors['body'] = True
if not True in errors.values():
if not preview:
try:
p = create_and_get_post(
creator = request.user,
topic = t,
body = body
)
except ValueError:
# soon(30.08.12, 12:42)
# FIXME
# Обработать нормально эту ситуацию
raise Http404()
return redirect(p.get_absolute_url())
else:
p = Post(
creator = request.user,
topic = t,
body = body
)
return direct_to_template(
request,
'forum/add_post.hdt', {
'post' : p,
'topic' : t,
'forum' : f,
'section' : s
}
)
return direct_to_template(
request,
'forum/add_post.hdt', {
'post_body' : text_block,
'errors' : errors
}
)
else:
raise Http404()
#-------------------------------------------------------------------------------
|
{"/art/views.py": ["/art/models.py"], "/news/views.py": ["/news/models.py", "/news/utils.py"], "/berfmk/templatetags/get_all_forums.py": ["/forum/models.py"], "/news/utils.py": ["/news/models.py", "/berfmk/utils.py"], "/forum/views.py": ["/forum/models.py", "/forum/utils.py", "/berfmk/utils.py"], "/forum/utils.py": ["/forum/models.py"]}
|
24,168
|
soon/berfmk
|
refs/heads/master
|
/accounts/views.py
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
from django.views.generic.simple import direct_to_template
from django.shortcuts import redirect
from django.contrib import auth
from django.contrib.auth import logout as user_logout
from django.contrib.auth.models import User
#-------------------------------------------------------------------------------
import re
#-------------------------------------------------------------------------------
def login(request):
if request.user.is_authenticated():
return redirect('/')
if request.method == 'POST':
username = request.POST['input_username']
password = request.POST['input_password']
user = auth.authenticate(username = username, password = password)
if user is not None:
auth.login(request, user)
return redirect('/')
else:
return direct_to_template(
request,
'user/login.hdt', {
'error': True,
'username': username
}
)
else:
return direct_to_template(request, 'user/login.hdt')
#-------------------------------------------------------------------------------
def logout(request):
if request.user.is_authenticated():
user_logout(request)
return redirect('/')
#-------------------------------------------------------------------------------
def register(request):
if request.user.is_authenticated():
return redirect('/')
errors = {
'username': False,
'username_is_already_used': False,
'email': False,
'email_is_already_used': False,
'password': False,
'password_repeat': False
}
if request.method == 'POST':
username = request.POST['input_username']
if not re.match(r'^[0-9A-Za-z_]{4,16}$', username):
errors['username'] = True
username = ''
email = request.POST['input_email']
if not re.match(
r'^[A-Za-z0-9\.\-_]{1,64}@[a-z0-9\.\-_]{1,255}\.[a-z]{2,4}$',
email
) or len(email) > 75:
errors['email'] = True
email = ''
password = request.POST['input_password']
if not 4 <= len(password) <= 128:
errors['password'] = True
password_repeat = request.POST['input_password_repeat']
if password != password_repeat:
errors['password_repeat'] = True
if not errors['username']:
if User.objects.filter(username = username):
errors['username_is_already_used'] = True
if User.objects.filter(email = unicode.lower(email)):
errors['email_is_already_used'] = True
if not True in errors.values():
User.objects.create_user(username, email, password)
user = auth.authenticate(username = username, password = password)
auth.login(request, user)
return redirect('/')
else:
return direct_to_template(
request,
'user/register.hdt', {
'errors': errors,
'username': username,
'email': email
}
)
else:
return direct_to_template(request, 'user/register.hdt')
#-------------------------------------------------------------------------------
|
{"/art/views.py": ["/art/models.py"], "/news/views.py": ["/news/models.py", "/news/utils.py"], "/berfmk/templatetags/get_all_forums.py": ["/forum/models.py"], "/news/utils.py": ["/news/models.py", "/berfmk/utils.py"], "/forum/views.py": ["/forum/models.py", "/forum/utils.py", "/berfmk/utils.py"], "/forum/utils.py": ["/forum/models.py"]}
|
24,169
|
soon/berfmk
|
refs/heads/master
|
/forum/utils.py
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
from forum.models import Forum, Section, Sub_section, Topic, Post
#-------------------------------------------------------------------------------
def create_and_get_forum(title, address, order = 0):
f, created = Forum.objects.get_or_create(title = title, address = address)
if(not created):
raise ValueError('Forum with that address and title already exists')
f.order = order
f.save()
return f
#-------------------------------------------------------------------------------
def create_and_get_section(title, address, forum):
if(not Forum.objects.get(id = forum.id) == forum):
raise ValueError('Invalid forum')
s, created = Section.objects.get_or_create(
title = title,
address = address,
forum = forum
)
if(not created):
raise ValueError('Section with that address and title already exists')
s.order = order
s.save()
return s
#-------------------------------------------------------------------------------
def create_and_get_sub_section(title, address, section):
if(not Section.objects.get(id = section.id) == section):
raise ValueError('Invalid section')
ss, created = Sub_section.objects.get_or_create(
title = title,
address = address,
section = section
)
if(not created):
raise ValueError(
'Sub_section with that address and title already exists'
)
ss.order = order
ss.save()
return ss
#-------------------------------------------------------------------------------
def create_and_get_topic(title, creator, section, body):
sub_section = None
if(type(section) == Sub_section):
if(Sub_section.objects.get(id = section.id) == section):
sub_section = section
section = sub_section.section
elif(type(section) == Section):
if(not Section.objects.filter(id = section.id)[0] == section):
raise ValueError('Invalid section or sub_section')
else:
ValueError('type(section) != Sub_section or Section. Get The Fuck Out')
t, created = Topic.objects.get_or_create(
title = title,
creator = creator,
section = section,
sub_section = sub_section
)
if(not created):
raise ValueError('Topic with that address and title already exists')
Post.objects.create(creator = creator, topic = t, body = body)
return t
#-------------------------------------------------------------------------------
def create_and_get_post(topic, creator, body):
if(not Topic.objects.get(id = topic.id) == topic):
raise ValueError('Invalid topic')
p, created = Post.objects.get_or_create(
topic = topic,
creator = creator,
body = body
)
if(not created):
raise ValueError('This post already exists')
p.save()
return p
#-------------------------------------------------------------------------------
|
{"/art/views.py": ["/art/models.py"], "/news/views.py": ["/news/models.py", "/news/utils.py"], "/berfmk/templatetags/get_all_forums.py": ["/forum/models.py"], "/news/utils.py": ["/news/models.py", "/berfmk/utils.py"], "/forum/views.py": ["/forum/models.py", "/forum/utils.py", "/berfmk/utils.py"], "/forum/utils.py": ["/forum/models.py"]}
|
24,170
|
soon/berfmk
|
refs/heads/master
|
/news/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'News'
db.create_table('news_news', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=45)),
('text_block', self.gf('django.db.models.fields.TextField')()),
('create_date', self.gf('django.db.models.fields.DateField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('news', ['News'])
# Adding model 'SchoolNews'
db.create_table('news_schoolnews', (
('news_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['news.News'], unique=True, primary_key=True)),
))
db.send_create_signal('news', ['SchoolNews'])
# Adding model 'SiteNews'
db.create_table('news_sitenews', (
('news_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['news.News'], unique=True, primary_key=True)),
))
db.send_create_signal('news', ['SiteNews'])
def backwards(self, orm):
# Deleting model 'News'
db.delete_table('news_news')
# Deleting model 'SchoolNews'
db.delete_table('news_schoolnews')
# Deleting model 'SiteNews'
db.delete_table('news_sitenews')
models = {
'news.news': {
'Meta': {'object_name': 'News'},
'create_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text_block': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '45'})
},
'news.schoolnews': {
'Meta': {'object_name': 'SchoolNews', '_ormbases': ['news.News']},
'news_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['news.News']", 'unique': 'True', 'primary_key': 'True'})
},
'news.sitenews': {
'Meta': {'object_name': 'SiteNews', '_ormbases': ['news.News']},
'news_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['news.News']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['news']
|
{"/art/views.py": ["/art/models.py"], "/news/views.py": ["/news/models.py", "/news/utils.py"], "/berfmk/templatetags/get_all_forums.py": ["/forum/models.py"], "/news/utils.py": ["/news/models.py", "/berfmk/utils.py"], "/forum/views.py": ["/forum/models.py", "/forum/utils.py", "/berfmk/utils.py"], "/forum/utils.py": ["/forum/models.py"]}
|
24,171
|
soon/berfmk
|
refs/heads/master
|
/forum/urls.py
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns(
'',
url(r'^$', 'forum.views.forums'),
url(r'^(?P<forum>[^/]{1,32})/$', 'forum.views.sections'),
url(
r'^(?P<forum>[^/]{1,32})/(?P<section>[^/]{1,32})/$',
'forum.views.topics'
),
url(
r'^(?P<forum>[^/]{1,32})/(?P<section>[^/]{1,32})/(?P<topic>\d{1,9})/$',
'forum.views.posts',
{'page': 1}
),
url(
r'^(?P<forum>[^/]{1,32})/(?P<section>[^/]{1,32})/(?P<topic>\d{1,9})' + \
r'/page/(?P<page>[1-9]\d{0,3})/',
'forum.views.posts'
),
url(
r'^(?P<forum>[^/]{1,32})/(?P<section>[^/]{1,32})/add-topic/$',
'forum.views.add_topic',
{'preview': False}
),
url(
r'^(?P<forum>[^/]{1,32})/(?P<section>[^/]{1,32})/add-topic/preview/$',
'forum.views.add_topic',
{'preview': True}
),
url(
r'^(?P<forum>[^/]{1,32})/(?P<section>[^/]{1,32})/(?P<topic>\d{1,9})' + \
r'/add-post/$',
'forum.views.add_post',
{'preview': False}
),
url(
r'^(?P<forum>[^/]{1,32})/(?P<section>[^/]{1,32})/(?P<topic>\d{1,9})' + \
r'/add-post/preview/$',
'forum.views.add_post',
{'preview': True}
),
)
|
{"/art/views.py": ["/art/models.py"], "/news/views.py": ["/news/models.py", "/news/utils.py"], "/berfmk/templatetags/get_all_forums.py": ["/forum/models.py"], "/news/utils.py": ["/news/models.py", "/berfmk/utils.py"], "/forum/views.py": ["/forum/models.py", "/forum/utils.py", "/berfmk/utils.py"], "/forum/utils.py": ["/forum/models.py"]}
|
24,172
|
soon/berfmk
|
refs/heads/master
|
/berfmk/views.py
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
from django.views.generic.simple import direct_to_template
from django.shortcuts import redirect
#-------------------------------------------------------------------------------
def login(request):
if request.user.is_authenticated():
return redirect('/')
else:
return direct_to_template(request, 'user/login.hdt')
#-------------------------------------------------------------------------------
def register(request):
if request.user.is_authenticated():
return redirect('/')
else:
return direct_to_template(request, 'user/register.hdt')
#-------------------------------------------------------------------------------
|
{"/art/views.py": ["/art/models.py"], "/news/views.py": ["/news/models.py", "/news/utils.py"], "/berfmk/templatetags/get_all_forums.py": ["/forum/models.py"], "/news/utils.py": ["/news/models.py", "/berfmk/utils.py"], "/forum/views.py": ["/forum/models.py", "/forum/utils.py", "/berfmk/utils.py"], "/forum/utils.py": ["/forum/models.py"]}
|
24,173
|
soon/berfmk
|
refs/heads/master
|
/art/models.py
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
from django.db import models
from django.contrib.auth.models import User
#-------------------------------------------------------------------------------
class Art(models.Model):
name = models.CharField(max_length = 15)
title = models.CharField(max_length = 100)
text_block = models.TextField()
author = models.ForeignKey(User)
#---------------------------------------------------------------------------
def __unicode__(self):
return self.name
#---------------------------------------------------------------------------
class Meta:
ordering = ['-name']
#-------------------------------------------------------------------------------
|
{"/art/views.py": ["/art/models.py"], "/news/views.py": ["/news/models.py", "/news/utils.py"], "/berfmk/templatetags/get_all_forums.py": ["/forum/models.py"], "/news/utils.py": ["/news/models.py", "/berfmk/utils.py"], "/forum/views.py": ["/forum/models.py", "/forum/utils.py", "/berfmk/utils.py"], "/forum/utils.py": ["/forum/models.py"]}
|
24,174
|
soon/berfmk
|
refs/heads/master
|
/berfmk/middleware/exception.py
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
class ExceptionLoggingMiddleware(object):
def process_exception(self, request, exception):
import traceback
print traceback.format_exc()
#-------------------------------------------------------------------------------
|
{"/art/views.py": ["/art/models.py"], "/news/views.py": ["/news/models.py", "/news/utils.py"], "/berfmk/templatetags/get_all_forums.py": ["/forum/models.py"], "/news/utils.py": ["/news/models.py", "/berfmk/utils.py"], "/forum/views.py": ["/forum/models.py", "/forum/utils.py", "/berfmk/utils.py"], "/forum/utils.py": ["/forum/models.py"]}
|
24,175
|
ronitray95/rpc_ftp_example
|
refs/heads/master
|
/client.py
|
#!/usr/bin/env python3
import rpc
z = rpc.initFTP('sumNumber', 5, 6, 7)
print(z)
z = rpc.initFTP('sum', 5, 6)
print(z)
z = rpc.initFTP('multiply', 5, 6, 7)
print(z)
|
{"/client.py": ["/rpc.py"]}
|
24,176
|
ronitray95/rpc_ftp_example
|
refs/heads/master
|
/ftp_server.py
|
#!/usr/bin/env python3
from pyftpdlib.servers import FTPServer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.authorizers import DummyAuthorizer
import os
import logging
mappings = {'old_method_name': 'new_method_name',
'sumNumber': 'sum',
'multiply': 'product',
'prod': 'product',
'divide': 'divide'
}
class MyHandler(FTPHandler):
def on_connect(self):
print("%s:%s connected" % (self.remote_ip, self.remote_port))
def on_disconnect(self):
# do something when client disconnects
pass
def on_login(self, username):
# do something when user login
pass
def on_logout(self, username):
# do something when user logs out
pass
def on_file_sent(self, file):
# do something when a file has been sent
pass
def on_file_received(self, file):
# do something when a file has been received
op = ''
with open('input.txt', 'r') as f:
line = (f.readline()).strip().split()
cmd = line[0]
print('ACTUAL CMD', cmd)
if cmd not in mappings.keys() and cmd not in mappings.values():
op = 'Unknown function'
elif cmd in mappings.keys():
op = globals()[mappings[cmd]](line[1:])
else:
op = globals()[cmd](line[1:])
with open('output.txt', 'w') as f:
f.write(op)
def on_incomplete_file_sent(self, file):
# do something when a file is partially sent
pass
def on_incomplete_file_received(self, file):
# remove partially uploaded files
os.remove(file)
def sum(*args):
print('Called sum', args[0])
s = 0.0
try:
for arg in args[0]:
s += float(arg)
return str(s)
except Exception:
return 'Bad number format'
def product(*args):
print('Called product', args[0])
s = 1.0
try:
for arg in args[0]:
s *= float(arg)
return str(s)
except Exception:
return 'Bad number format'
def divide(*args):
print('Called divide', args[0])
if len(args) > 2:
return 'Excess arguments'
args = args[0]
try:
s = float(args[0]) / float(args[1])
return str(s)
except Exception:
return 'Bad number format'
def main():
authorizer = DummyAuthorizer()
authorizer.add_user('user', '12345', '.', perm='elradfmwMT')
authorizer.add_anonymous('.', perm='elradfmwMT')
handler = MyHandler
handler.authorizer = authorizer
# logging.basicConfig(level=logging.DEBUG)
handler.banner = "pyftpdlib based ftpd ready."
address = ('127.0.0.1', 5000)
server = FTPServer(address, handler)
server.max_cons = 256
server.max_cons_per_ip = 5
server.serve_forever()
if __name__ == '__main__':
main()
|
{"/client.py": ["/rpc.py"]}
|
24,177
|
ronitray95/rpc_ftp_example
|
refs/heads/master
|
/rpc.py
|
#!/usr/bin/env python3
import os
from ftplib import *
FTP_URL = 'localhost'
FTP_PORT = 5000
def initFTP(cmd, *args):
ftp = FTP()
ftp.connect('localhost', FTP_PORT)
resp = ftp.getwelcome()
# print(resp)
resp = ftp.login('user', '12345')
# print(resp)
fname = 'cmd.txt'
with open(fname, 'w') as f:
s = ''
for arg in args:
s = s + str(arg) + ' '
f.write(cmd+' '+s)
with open(fname, 'rb') as f:
ftp.storbinary('STOR input.txt', f)
ftp.retrbinary('RETR output.txt', open('userop.txt', 'wb').write)
resp = ftp.sendcmd("QUIT")
# print(resp)
s = ''
with open('userop.txt', 'r') as f:
s = f.readline()
os.remove('cmd.txt')
os.remove('userop.txt')
os.remove('input.txt')
os.remove('output.txt')
return s
|
{"/client.py": ["/rpc.py"]}
|
24,185
|
davidcGIThub/quadcopter_simulation
|
refs/heads/master
|
/Quad3DGraphics.py
|
import utils
import numpy as np
class Quad3DGraphics:
def __init__(self,ax,isTarget = False, x0=0,y0=0,z0=0,psi0=0):
if isTarget:
centerColor = 'g'
armsColor = 'g'
frontPropsColor = 'g'
backPropsColor = 'y'
else:
centerColor = 'k'
armsColor = 'k'
frontPropsColor = 'b'
backPropsColor = 'r'
self.translation = np.array([[x0],[y0],[z0]])
self.rotation = utils.rotationZ(psi0)
self.center, = ax.plot([],[],[],lw=.5,color=centerColor , marker = 'o')
self.arm1, = ax.plot([], [], [], lw=2, color=armsColor)
self.arm2, = ax.plot([], [], [], lw=2, color=armsColor)
self.arm3, = ax.plot([], [], [], lw=2, color=armsColor)
self.arm4, = ax.plot([], [], [], lw=2, color=armsColor)
self.prop1, = ax.plot([], [], [], lw=2, color=frontPropsColor)
self.prop2, = ax.plot([], [], [], lw=2, color=frontPropsColor)
self.prop3, = ax.plot([], [], [], lw=2, color=backPropsColor)
self.prop4, = ax.plot([], [], [], lw=2, color=backPropsColor)
self.propRadius = 0.1
self.armLength = 0.3
self.propOutline = self.generateCircle(self.propRadius)
self.prop1Center = np.array([[self.armLength],[self.armLength],[0]])/np.sqrt(2)
self.prop2Center = np.array([[self.armLength],[-self.armLength],[0]])/np.sqrt(2)
self.prop3Center = np.array([[-self.armLength],[self.armLength],[0]])/np.sqrt(2)
self.prop4Center = np.array([[-self.armLength],[-self.armLength],[0]])/np.sqrt(2)
temp = np.concatenate((self.rotation,self.translation),1)
self.homogeneousTransformation = np.concatenate( (temp , np.array([[0,0,0,1]])) , 0 )
def getRotation(self):
return self.rotation
def getTranslation(self):
return self.translation
def getTransformation(self):
return self.homogeneousTransformation
def update(self, rotation,translation):
self.rotation = np.dot(rotation,self.rotation)
#self.rotation = utils.normalizeRotation(self.rotation)
self.translation = translation + self.translation
temp = np.concatenate((self.rotation,self.translation),1)
self.homogeneousTransformation = np.concatenate( (temp , np.array([[0,0,0,1]])) , 0 )
def draw(self):
self.drawArms()
self.drawProps()
self.drawCenter()
def drawCenter(self):
self.center.set_xdata([self.translation[0,0]])
self.center.set_ydata([self.translation[1,0]])
self.center.set_3d_properties([self.translation[2,0]])
def drawArms(self):
point1arm1 = self.translation
point2arm1 = np.dot(self.rotation,self.prop1Center) + self.translation
self.arm1.set_data([point1arm1[0,0],point2arm1[0,0]] , [point1arm1[1,0],point2arm1[1,0]])
self.arm1.set_3d_properties([point1arm1[2,0],point2arm1[2,0]])
point1arm2 = self.translation
point2arm2 = np.dot(self.rotation,self.prop2Center) + self.translation
self.arm2.set_data([point1arm2[0,0],point2arm2[0,0]] , [point1arm2[1,0],point2arm2[1,0]])
self.arm2.set_3d_properties([point1arm2[2,0],point2arm2[2,0]])
point1arm3 = self.translation
point2arm3 = np.dot(self.rotation,self.prop3Center) + self.translation
self.arm3.set_data([point1arm3[0,0],point2arm3[0,0]] , [point1arm3[1,0],point2arm3[1,0]])
self.arm3.set_3d_properties([point1arm3[2,0],point2arm3[2,0]])
point1arm4 = self.translation
point2arm4 = np.dot(self.rotation,self.prop4Center) + self.translation
self.arm4.set_data([point1arm4[0,0],point2arm4[0,0]] , [point1arm4[1,0],point2arm4[1,0]])
self.arm4.set_3d_properties([point1arm4[2,0],point2arm4[2,0]])
def drawProps(self):
rotatedPropOutline = np.dot(self.rotation,self.propOutline)
prop1Outline = np.dot(self.rotation,self.prop1Center) + self.translation + rotatedPropOutline
prop2Outline = np.dot(self.rotation,self.prop2Center) + self.translation + rotatedPropOutline
prop3Outline = np.dot(self.rotation,self.prop3Center) + self.translation + rotatedPropOutline
prop4Outline = np.dot(self.rotation,self.prop4Center) + self.translation + rotatedPropOutline
self.prop1.set_data(prop1Outline[0,:],prop1Outline[1,:])
self.prop1.set_3d_properties(prop1Outline[2,:])
self.prop2.set_data(prop2Outline[0,:],prop2Outline[1,:])
self.prop2.set_3d_properties(prop2Outline[2,:])
self.prop3.set_data(prop3Outline[0,:],prop3Outline[1,:])
self.prop3.set_3d_properties(prop3Outline[2,:])
self.prop4.set_data(prop4Outline[0,:],prop4Outline[1,:])
self.prop4.set_3d_properties(prop4Outline[2,:])
def generateCircle(self,radius):
circleResolution = 50
twoPiRadianArray = np.linspace(-np.pi,np.pi,circleResolution)
circleXdata = np.cos(twoPiRadianArray)*radius
circleYdata = np.sin(twoPiRadianArray)*radius
circleZdata = twoPiRadianArray * 0
circleData = np.concatenate(([circleXdata],[circleYdata]),0)
circleData = np.concatenate((circleData,[circleZdata]),0)
return circleData
|
{"/Quad3DGraphics.py": ["/utils.py"], "/testing.py": ["/utils.py"], "/animation.py": ["/Quad3DGraphics.py", "/utils.py", "/QuadController.py"], "/QuadController.py": ["/utils.py"]}
|
24,186
|
davidcGIThub/quadcopter_simulation
|
refs/heads/master
|
/testing.py
|
import numpy as np
from scipy.linalg import logm, expm
import utils
phi = np.pi/4
theta = np.pi/3
psi = -np.pi/6
R = np.dot(np.dot(utils.rotationX(phi),utils.rotationY(theta)) , utils.rotationZ(psi))
t = np.array([[1],[-3],[.5]])
T = utils.toSE3Matrix(R,t)
A = logm(T)
B = utils.logSE3(T)
print("R: " , R)
print(" ")
print("t: " , t)
print(" ")
print("General Method: " , A)
print(" ")
print("Closed Form Solution: " , B)
# t = np.array([[1],[-.5],[1]])
# T = np.concatenate( (np.concatenate((R,t),1) , np.array([[0,0,0,1]])) , 0)
# C = utils.logSE3_(R,t)
# D = logm(T)
# JSO3l = utils.leftJacobianSO3(R)
# JSO3r = utils.rightJacobianS03(R)
# JSE3 = utils.leftJacobianSE3(R,t)
# print("C: " , C)
# print("D: " , D)
# print("JSO3l: ", JSO3l)
# print("JSO3r", JSO3r )
# print("JSE3: ", JSE3)
# N = utils.normalizeRotation(R)
# print("N: " , N)
|
{"/Quad3DGraphics.py": ["/utils.py"], "/testing.py": ["/utils.py"], "/animation.py": ["/Quad3DGraphics.py", "/utils.py", "/QuadController.py"], "/QuadController.py": ["/utils.py"]}
|
24,187
|
davidcGIThub/quadcopter_simulation
|
refs/heads/master
|
/utils.py
|
import numpy as np
def rotationX(phi):
cphi = np.cos(phi)
sphi = np.sin(phi)
R = np.array([[1 , 0 , 0 ] , [0 , cphi , -sphi] , [0 , sphi , cphi] ])
return R
def rotationY(theta):
ctheta = np.cos(theta)
stheta = np.sin(theta)
R = np.array([ [ctheta , 0 , stheta] , [0 , 1 , 0 ] , [-stheta , 0 , ctheta ] ])
return R
def rotationZ(psi):
cpsi = np.cos(psi)
spsi = np.sin(psi)
R = np.array([ [cpsi , -spsi , 0] , [spsi , cpsi , 0 ] , [0 , 0 , 1] ])
return R
def vectorToSkew(vector):
vector = vector.flatten()
skewMatrix = np.array([[0 , -vector[2] , vector[1]] , [vector[2] , 0 , -vector[0]] , [-vector[1] , vector[0] , 0]])
return skewMatrix
def skewToVector(skewMatrix):
vector = np.array([ [skewMatrix[2,1]] , [skewMatrix[0,2]], [skewMatrix[1,0]]])
return vector
def logSO3(R):
theta = np.arccos((np.trace(R) - 1)/2)
X = theta*(R-np.transpose(R))/(2*np.sin(theta))
return X
def logSE3_(R,t):
theta = np.arccos((np.trace(R) - 1)/2)
if theta == 0:
w_skew = np.eye(3)
t_ = t
else:
w_skew = theta*(R-np.transpose(R))/(2*np.sin(theta))
w_skew_squared = np.dot(w_skew , w_skew)
V = np.eye(3) + (1-np.cos(theta))/(theta*theta) * w_skew \
+ (theta - np.sin(theta))/(theta*theta*theta) * w_skew_squared
t_ = np.linalg.solve(V,t)
X = np.concatenate((w_skew,t_),1)
X = np.concatenate((X,np.array([[0,0,0,0]])) , 0)
return X
def logSE3(T):
R = T[0:3,0:3]
t = T[0:3,3]
t = np.transpose(t[None,:])
X = logSE3_(R,t)
return X
def se3toCartesian(T):
w = skewToVector(T[0:3,0:3])
t = T[0:3,3]
t = np.transpose(t[None,:])
return np.concatenate((t,w),0)
def toSE3Matrix(R,t):
A = np.concatenate((R,t),1)
A = np.concatenate((A,np.array([[0,0,0,1]])),0)
return A
def SE3Transformation(R,t,points):
A = np.concatenate((R,t),1)
A = np.concatenate((A,[0,0,0,1]),0)
bottomRow = np.ones(np.size(points[1]))
pointsR4 = np.concatenate((points,bottomRow),0)
newPointsR4 = np.dot(A,pointsR4)
newPoints = newPointsR4[0:3,:]
return newPoints
def leftJacobianSO3(R):
theta = np.arccos((np.trace(R) - 1)/2)
THETA_skew = logSO3(R)
THETA_skew_squared = np.dot(THETA_skew,THETA_skew)
Jl = np.eye(3) + (1-np.cos(theta))/(theta*theta)*THETA_skew + (theta-np.sin(theta))/(theta*theta*theta)*THETA_skew_squared
return Jl
def leftJacobianInverseS03(R):
theta = np.arccos((np.trace(R) - 1)/2)
THETA_skew = logSO3(R)
THETA_skew_squared = np.dot(THETA_skew,THETA_skew)
invJl = np.eye(3) - THETA_skew/2 + ( 1/(theta*theta) + (1+np.cos(theta))/(2*theta*np.sin(theta)) )*THETA_skew_squared
return invJl
def rightJacobianS03(R):
theta = np.arccos((np.trace(R) - 1)/2)
THETA_skew = logSO3(R)
THETA_skew_squared = np.dot(THETA_skew,THETA_skew)
Jr = np.eye(3) - (1-np.cos(theta))/(theta*theta)*THETA_skew + (theta-np.sin(theta))/(theta*theta*theta)*THETA_skew_squared
return Jr
def rightJacobianInverseS03(R):
theta = np.arccos((np.trace(R) - 1)/2)
THETA_skew = logSO3(R)
THETA_skew_squared = np.dot(THETA_skew,THETA_skew)
invJr = np.eye(3) + THETA_skew/2 + (1/(theta*theta) - (1+np.cos(theta)/(2*theta*np.sin(theta))))*THETA_skew_squared
return invJr
def leftJacobianSE3(R,P):
P_skew = vectorToSkew(P)
theta = np.arccos((np.trace(R) - 1)/2)
if theta == 0:
Jl = np.eye(6)
else:
THETA_skew = logSO3(R)
THETAxP = np.dot(THETA_skew,P_skew)
PxTHETA = np.dot(P_skew,THETA_skew)
THETA_skew_squared = np.dot(THETA_skew,THETA_skew)
Qterm1 = P_skew/2
Qterm2 = (theta-np.sin(theta))/(theta**3) * (THETAxP + PxTHETA + np.dot(THETAxP,THETA_skew))
Qterm3 = (1-theta*theta/2-np.cos(theta))/(theta**4) \
* (np.dot(THETA_skew,THETAxP) + np.dot(PxTHETA,THETA_skew) - 3*np.dot(THETAxP,THETA_skew))
Qterm4 = 1/2*( (1-theta*theta/2-np.cos(theta))/theta**4 - 3*(theta-np.sin(theta)-(theta**3)/6)/theta**5) \
*(np.dot(THETAxP,THETA_skew_squared) + np.dot(THETA_skew_squared,PxTHETA))
Q = Qterm1 + Qterm2 -Qterm3 - Qterm4
JlSO3 = leftJacobianSO3(R)
Jl_top = np.concatenate((JlSO3,Q),1)
Jl_bottom = np.concatenate( (np.zeros((3,3)),JlSO3) , 1)
Jl = np.concatenate((Jl_top,Jl_bottom),0)
return Jl
def rightJacobianSE3(R,P):
P_skew = -vectorToSkew(P)
theta = -np.arccos((np.trace(R) - 1)/2)
THETA_skew = -logSO3(R)
THETAxP = np.dot(THETA_skew,P_skew)
PxTHETA = np.dot(P_skew,THETA_skew)
THETA_skew_squared = np.dot(THETA_skew,THETA_skew)
Qterm1 = P_skew/2
Qterm2 = (theta-np.sin(theta))/(theta**3) * (THETAxP + PxTHETA + np.dot(THETAxP,THETA_skew))
Qterm3 = (1-theta*theta/2-np.cos(theta))/(theta**4) \
* (np.dot(THETA_skew,THETAxP) + np.dot(PxTHETA,THETA_skew) - 3*np.dot(THETAxP,THETA_skew))
Qterm4 = 1/2*( (1-theta*theta/2-np.cos(theta))/theta**4 - 3*(theta-np.sin(theta)-(theta**3)/6)/theta**5) \
*(np.dot(THETAxP,THETA_skew_squared) + np.dot(THETA_skew_squared,PxTHETA))
Q = Qterm1 + Qterm2 -Qterm3 - Qterm4
JrSO3 = rightJacobianS03(R)
Jr_top = np.concatenate((JrSO3,Q),1)
Jr_bottom = np.concatenate( (np.zeros((3,3)),JrSO3) , 1)
Jr = np.concatenate((Jr_top,Jr_bottom),0)
return Jr
def normalizeRotation(R):
c3 = R[0:3,2]
c2 = R[0:3,1]
c1 = np.cross(c2,c3)
c2 = np.cross(c3,c1)
c1 = c1/np.linalg.norm(c1)
c2 = c2/np.linalg.norm(c2)
c3 = c3/np.linalg.norm(c3)
normalizedR = np.concatenate((np.concatenate((c1[:,None],c2[:,None]),1),c3[:,None]),1)
return normalizedR
def isclose(x, y, rtol=1.e-5, atol=1.e-8):
return abs(x-y) <= atol + rtol * abs(y)
|
{"/Quad3DGraphics.py": ["/utils.py"], "/testing.py": ["/utils.py"], "/animation.py": ["/Quad3DGraphics.py", "/utils.py", "/QuadController.py"], "/QuadController.py": ["/utils.py"]}
|
24,188
|
davidcGIThub/quadcopter_simulation
|
refs/heads/master
|
/exampleAnimation3.py
|
"""
A simple example of an animated plot... In 3D!
"""
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.animation as animation
# Attaching 3D axis to the figure
fig = plt.figure()
ax = p3.Axes3D(fig)
line1, = ax.plot([], [], [], lw=2, color='b')
target1, = ax.plot([],[],[],lw=2,color='r' , marker = 'o')
circle1, = ax.plot([],[],[],lw=2,color='g')
radius = 1
circleResolution = 50
twoPiRadianArray = np.linspace(-np.pi,np.pi,circleResolution)
circleXdata = np.cos(twoPiRadianArray)*radius
circleYdata = np.sin(twoPiRadianArray)*radius
circleZdata = np.sin(twoPiRadianArray*5)/2
def update_line(num,line,target,circle):
target.set_xdata([2])
target.set_ydata([3])
target.set_3d_properties([1])
line.set_data([0,1],[0,0])
line.set_3d_properties([0,0])
circle.set_data(circleXdata,circleYdata)
circle.set_3d_properties(circleZdata)
# Setting the axes properties
ax.set_xlim3d([-5.0, 5.0])
ax.set_xlabel('X')
ax.set_ylim3d([-5.0, 5.0])
ax.set_ylabel('Y')
ax.set_zlim3d([-5.0, 5.0])
ax.set_zlabel('Z')
ax.set_title('3D Test')
# Creating the Animation object
delayBetweenFrames = 50
frames = 25
line_ani = animation.FuncAnimation(fig, update_line, fargs=[line1,target1,circle1] , interval=delayBetweenFrames, blit=False)
plt.show()
|
{"/Quad3DGraphics.py": ["/utils.py"], "/testing.py": ["/utils.py"], "/animation.py": ["/Quad3DGraphics.py", "/utils.py", "/QuadController.py"], "/QuadController.py": ["/utils.py"]}
|
24,189
|
davidcGIThub/quadcopter_simulation
|
refs/heads/master
|
/animation.py
|
"""
A simple example of an animated plot... In 3D!
"""
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.animation as animation
from Quad3DGraphics import Quad3DGraphics
import utils
from QuadController import QuadController
# Attaching 3D axis to the figure
fig = plt.figure()
ax = p3.Axes3D(fig)
quad = Quad3DGraphics(ax)
kl = .5
ka = .2
controller = QuadController(kl,ka)
#target
xd = 2
yd = 3
zd = 4
psid = np.pi
target = Quad3DGraphics(ax,True,xd,yd,zd,psid)
def update_line(num,quad,controller,target,dt):
xc = quad.getTransformation()
xd = target.getTransformation()
V_body = controller.computeDesiredVelocities(xc,xd)
[rotation,translation] = controller.kinematicPropagation(V_body,xc,dt)
quad.update(rotation,translation)
quad.draw()
target.draw()
# Setting the axes properties
ax.set_xlim3d([-5.0, 5.0])
ax.set_xlabel('X')
ax.set_ylim3d([-5.0, 5.0])
ax.set_ylabel('Y')
ax.set_zlim3d([0.0, 10.0])
ax.set_zlabel('Z')
ax.set_title('3D Test')
# Creating the Animation object
delayBetweenFrames_ms = 50
dt = delayBetweenFrames_ms / 1000
frames = 25
line_ani = animation.FuncAnimation(fig, update_line, fargs=[quad,controller,target,dt] , interval=delayBetweenFrames_ms, blit=False)
plt.show()
|
{"/Quad3DGraphics.py": ["/utils.py"], "/testing.py": ["/utils.py"], "/animation.py": ["/Quad3DGraphics.py", "/utils.py", "/QuadController.py"], "/QuadController.py": ["/utils.py"]}
|
24,190
|
davidcGIThub/quadcopter_simulation
|
refs/heads/master
|
/params.py
|
#parameters file
armLength = 0.3
|
{"/Quad3DGraphics.py": ["/utils.py"], "/testing.py": ["/utils.py"], "/animation.py": ["/Quad3DGraphics.py", "/utils.py", "/QuadController.py"], "/QuadController.py": ["/utils.py"]}
|
24,191
|
davidcGIThub/quadcopter_simulation
|
refs/heads/master
|
/QuadController.py
|
import utils
import numpy as np
class QuadController:
def __init__(self,kl,ka):
self.kl = kl
def computeDesiredVelocities(self,xc,xd):
alpha = np.linalg.solve(xc,xd)
E = utils.logSE3(alpha)
e = utils.se3toCartesian(E)
Jl = utils.leftJacobianSE3(E[0:3,0:3],np.array([[E[0,3]],[E[1,3]],[E[2,3]]]))
u = self.kl*np.dot(Jl,e)
return u
def kinematicPropagation(self,V_body,xc,dt):
currentRotation = xc[0:3,0:3]
v_body = np.array([[V_body[0,0]],[V_body[1,0]],[V_body[2,0]]])
w_body = np.array([[V_body[3,0]],[V_body[4,0]],[V_body[5,0]]])
Rbody2local = currentRotation
v_local = np.dot(Rbody2local,v_body)
w_local = np.dot(Rbody2local,w_body)
translation = v_local*dt
rotation = dt*utils.vectorToSkew(w_local) + np.eye(3)
return [rotation,translation]
|
{"/Quad3DGraphics.py": ["/utils.py"], "/testing.py": ["/utils.py"], "/animation.py": ["/Quad3DGraphics.py", "/utils.py", "/QuadController.py"], "/QuadController.py": ["/utils.py"]}
|
24,192
|
davidcGIThub/quadcopter_simulation
|
refs/heads/master
|
/exampleAnimation2.py
|
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.animation import FuncAnimation
list_var_points = (1, 5, 4, 9, 8, 2, 6, 5, 2, 1, 9, 7, 10)
fig, ax = plt.subplots()
xfixdata, yfixdata = 14, 8
xdata, ydata = 5, None
ln, = plt.plot([], [], 'ro-', animated=True)
plt.plot([xfixdata], [yfixdata], 'bo', ms=10)
def init():
ax.set_xlim(0, 15)
ax.set_ylim(0, 15)
return ln,
def update(frame):
ydata = list_var_points[frame]
ln.set_data([xfixdata,xdata], [yfixdata,ydata])
return ln,
ani = FuncAnimation(fig, update, frames=range(len(list_var_points)),
init_func=init, blit=True)
plt.show()
|
{"/Quad3DGraphics.py": ["/utils.py"], "/testing.py": ["/utils.py"], "/animation.py": ["/Quad3DGraphics.py", "/utils.py", "/QuadController.py"], "/QuadController.py": ["/utils.py"]}
|
24,197
|
natalie-woerle/Weather-Visualizer
|
refs/heads/main
|
/visualization-GUI.py
|
import tkinter
from tkinter import *
import os
from datetime import datetime
from parse_web import WeatherParser
import generate_sample as gen
import plotter
root = Tk()
root.wm_geometry("1000x800")
root.resizable(False, False)
root.title("CSV Visualizer")
class App:
def __init__(self,master):
self.master = master
gui = Wetterdienst(self.master)
class Wetterdienst:
def __init__(self,parent):
self.parent = parent
self.parser = WeatherParser()
# ----
self.upper = Frame(self.parent)
self.lower_left = Frame(self.parent)
self.lower_right = Frame(self.parent)
self.plotter = plotter.Plotter(self.lower_right)
self.b_create = Button(self.upper,text="Create Sample", command=self.press_create)
self.file_view = Listbox(self.lower_left, width=15, height=24, font=("Helvetica", 12), selectmode=SINGLE)
self.scrollbar = Scrollbar(self.lower_left, orient="vertical")
self.scrollbar.config(command=self.file_view.yview)
self.file_view.config(yscrollcommand=self.scrollbar.set)
self.upper.grid(row="0")
self.b_create.grid(row="0",column="0")
self.lower_left.grid(row="1",column="0")
self.file_view.pack(side=LEFT)
self.scrollbar.pack(side=RIGHT, fill="y")
self.lower_right.grid(row="1",column="1")
self.file_view.bind("<Double-Button>", lambda event: self.plot_csv(event))
self.update_textbox()
def plot_csv(self, event):
self.plotter.clear_plot()
self.plotter.plot_by_day(self.file_view.get(ANCHOR))
def press_create(self):
gen.generate_sample()
self.update_textbox()
def update_textbox(self):
all_csvs = [datetime.strptime(filename,"%d.%m.%Y.csv") for filename in os.listdir(self.parser.csv_directory) if filename.endswith(".csv")]
all_csvs.sort()
self.file_view.delete(0,END)
for filename in all_csvs:
filename = datetime.strftime(filename,"%d.%m.%Y.csv")
self.file_view.insert(END,filename)
app = App(root)
root.mainloop()
|
{"/visualization-GUI.py": ["/parse_web.py", "/generate_sample.py", "/plotter.py"], "/generate_sample.py": ["/parse_web.py"], "/plotter.py": ["/parse_web.py"]}
|
24,198
|
natalie-woerle/Weather-Visualizer
|
refs/heads/main
|
/parse_web.py
|
import requests
from bs4 import BeautifulSoup as BS
from datetime import datetime
import os
import csv
class WeatherParser:
def __init__(self):
self.link = "https://www.wetterdienst.de/Deutschlandwetter/Wetterstationen"
self.website = BS(requests.get(self.link).text, "html.parser")
self.relevant_times = ["05:00","12:00","18:00", "00:00"]
self.relevant_stations = ["Augsburg",
"Fürstenzell",
"Illesheim",
"Kempten",
"Lechfeld",
"Rosenheim"]
self.data_list = []
self.csv_directory = os.path.join(os.getcwd(),'csv')
if not os.path.exists(self.csv_directory):
os.mkdir(self.csv_directory)
self.curr_date = datetime.now().strftime('%d.%m.%Y')
self.filename = f"{self.curr_date}.csv"
# parse website, create sublists [station, temperature, time] in data_list
def parse_website(self):
table_rows = [row.text.split("\n") for row in self.website.select("tbody tr.BY")]
for row in table_rows:
row = row[1:] #remove empty [0]
row[0] = row[0].strip() #remove whitespace around stat
if row[0] in self.relevant_stations:
if row[0] == "Illesheim":
row[2] = row[2].replace("°C","").strip() #format temp
formatted_data = [row[0],row[2],row[8]]
else:
row[1] = row[1].replace("°C","").strip()
formatted_data = [row[0],row[1],row[7]]
self.data_list.append(formatted_data)
def write_to_csv(self,csv_file):
for station, temperature, timestamp in self.data_list:
new_row = f"{timestamp};{station};{temperature}\n"
csv_file.write(new_row)
def timestamp_is_duplicate(self):
if os.path.isfile(self.csv_directory + self.filename) and self.data_list != []:
current_timestamp = self.data_list[0][2]
with open (self.filename, "r", encoding="UTF-8") as csv_file:
reader = csv.reader(csv_file, delimiter=";")
for row in reader:
if current_timestamp in row:
print(f"Error: Timestamp \"{current_timestamp}\" already exists in \"{self.filename}\".")
return True
return False
#---- create/append to csv file (station,temperature,time) ----#
def create_csv(self):
os.chdir(self.csv_directory)
self.parse_website()
if not self.timestamp_is_duplicate():
try:
with open (self.filename, "a+", newline="", encoding="UTF-8") as csv_file:
self.write_to_csv(csv_file)
print(f"\"{self.filename}\" updated successfully")
except PermissionError:
print(f"Error: Access denied, close \"{self.filename}\" and try again.")
elif self.data_list == []:
print("Error: No data. Check website.")
# order of operations:
# get website -> parse website -> append data from table rows on website to data_list -> open csv file in read mode (if existing) ->
# check if entry for timestamp already exists -> if no -> open file in append mode -> write data from data_list to file
|
{"/visualization-GUI.py": ["/parse_web.py", "/generate_sample.py", "/plotter.py"], "/generate_sample.py": ["/parse_web.py"], "/plotter.py": ["/parse_web.py"]}
|
24,199
|
natalie-woerle/Weather-Visualizer
|
refs/heads/main
|
/generate_sample.py
|
from parse_web import WeatherParser
import os
import random
from datetime import datetime,date
parser = WeatherParser()
def generate_sample():
os.chdir(parser.csv_directory)
start_dt = date.today().replace(day=1, month=1).toordinal()
end_dt = date.today().toordinal()
random_day = date.fromordinal(random.randint(start_dt, end_dt))
filename = datetime.strftime(random_day,"%d.%m.%Y.csv")
with open(filename, "a+", encoding="UTF-8") as file:
for rel_time in parser.relevant_times:
for station in parser.relevant_stations:
temp = round(random.uniform(10, 25),1)
file.write(f"{rel_time};{station};{temp}\n")
|
{"/visualization-GUI.py": ["/parse_web.py", "/generate_sample.py", "/plotter.py"], "/generate_sample.py": ["/parse_web.py"], "/plotter.py": ["/parse_web.py"]}
|
24,200
|
natalie-woerle/Weather-Visualizer
|
refs/heads/main
|
/plotter.py
|
from tkinter import *
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import os
import csv
import sys
from parse_web import WeatherParser
class Plotter:
def __init__(self, master):
self.master = master
self.parser = WeatherParser()
self.stations_amount = len(self.parser.relevant_stations)
self.times_amount = len(self.parser.relevant_times)
self.fig = Figure(figsize = (7, 7), dpi = 100)
self.ax = self.fig.subplots()
pos = self.ax.get_position()
self.canvas = FigureCanvasTkAgg(self.fig, master = self.master)
os.chdir(self.parser.csv_directory)
self.ax.set_position([pos.x0, pos.y0, pos.width * 0.8, pos.height])
def set_legend_position(self):
leg = self.ax.legend(self.parser.relevant_stations)
bb = leg.get_bbox_to_anchor().inverse_transformed(self.ax.transAxes)
xOffset = 0.4
bb.x0 += xOffset
bb.x1 += xOffset
leg.set_bbox_to_anchor(bb, transform = self.ax.transAxes)
def plot_by_day(self, filename):
with open(filename, "r", encoding="UTF-8") as file:
reader = csv.reader(file,delimiter=";")
all_data = [row for row in reader]
for i in range(self.stations_amount):
temperatures = []
for j in range(self.times_amount):
temp = float(all_data[i + self.stations_amount*j][2])
temperatures.append(temp)
self.ax.plot(self.parser.relevant_times, temperatures, label = all_data[i][1])
self.ax.set(xlabel="Time of Day", ylabel="Temperature in °C",title=filename[:-4])
self.set_legend_position()
self.canvas.draw()
self.canvas.get_tk_widget().pack()
def clear_plot(self):
self.ax.clear()
|
{"/visualization-GUI.py": ["/parse_web.py", "/generate_sample.py", "/plotter.py"], "/generate_sample.py": ["/parse_web.py"], "/plotter.py": ["/parse_web.py"]}
|
24,207
|
khaled-muhammad/Django-Job-Board
|
refs/heads/main
|
/job/models.py
|
from django.db import models
# Create your models here.
JTchoices = (
("Full time job", "Full time job"),
("Part time job", "Part time job")
)
class job(models.Model):
title = models.CharField(max_length=100)
#location
job_type = models.CharField(max_length=20, choices=JTchoices)
job_description = models.TextField(max_length=1000)
job_published_at = models.DateTimeField(auto_now=True)
job_vacancy = models.IntegerField(default=1)
job_salary = models.IntegerField(default=0)
job_category = models.ForeignKey('job_category', on_delete=models.CASCADE)
exprience = models.IntegerField(default=1)
def __str__(self):
return self.title
class job_category(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
|
{"/job/admin.py": ["/job/models.py"]}
|
24,208
|
khaled-muhammad/Django-Job-Board
|
refs/heads/main
|
/job/migrations/0006_job_category.py
|
# Generated by Django 3.1.5 on 2021-01-16 16:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('job', '0005_auto_20210116_1642'),
]
operations = [
migrations.CreateModel(
name='job_category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
]
|
{"/job/admin.py": ["/job/models.py"]}
|
24,209
|
khaled-muhammad/Django-Job-Board
|
refs/heads/main
|
/job/admin.py
|
from django.contrib import admin
# Register your models here.
from .models import job, job_category
admin.site.register(job)
admin.site.register(job_category)
|
{"/job/admin.py": ["/job/models.py"]}
|
24,210
|
khaled-muhammad/Django-Job-Board
|
refs/heads/main
|
/job/migrations/0004_auto_20210116_1635.py
|
# Generated by Django 3.1.5 on 2021-01-16 16:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('job', '0003_job_job_description'),
]
operations = [
migrations.AddField(
model_name='job',
name='exprience',
field=models.IntegerField(default=1),
),
migrations.AddField(
model_name='job',
name='job_published_at',
field=models.DateField(auto_now=True),
),
migrations.AddField(
model_name='job',
name='job_salary',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='job',
name='job_vacancy',
field=models.IntegerField(default=1),
),
]
|
{"/job/admin.py": ["/job/models.py"]}
|
24,213
|
diegojromerolopez/hibernia
|
refs/heads/main
|
/hibernia/server/kvstore/storage.py
|
from typing import Optional, Final
from gelidum.collections import frozendict
class KVStore(object):
def __init__(self):
self.__store = frozendict({})
def set(self, key: str, val: str) -> bool:
if val is None:
return False
self.__store = self.__store + frozendict({key: val})
return True
def get(self, key: str) -> Optional[str]:
return self.__store.get(key)
def del_item(self, key: str) -> bool:
if key in self.__store:
self.__store = frozendict(
{k: v for k, v in self.__store.items() if k != key}
)
return True
return False
KV_STORE: Final[KVStore] = KVStore()
|
{"/hibernia/server/kvstore/routes.py": ["/hibernia/server/kvstore/handlers.py"], "/hibernia/server/run.py": ["/hibernia/server/kvstore/routes.py"], "/hibernia/server/kvstore/handlers.py": ["/hibernia/server/kvstore/storage.py"]}
|
24,214
|
diegojromerolopez/hibernia
|
refs/heads/main
|
/hibernia/server/kvstore/routes.py
|
from typing import Final, List
from aiohttp import web
from aiohttp.web_routedef import RouteDef
from hibernia.server.kvstore.handlers import set_handler, get_handler, del_handler
ROUTES: Final[List[RouteDef]] = [
web.post('/{key}', set_handler),
web.get('/{key}', get_handler),
web.delete('/{key}', del_handler)
]
|
{"/hibernia/server/kvstore/routes.py": ["/hibernia/server/kvstore/handlers.py"], "/hibernia/server/run.py": ["/hibernia/server/kvstore/routes.py"], "/hibernia/server/kvstore/handlers.py": ["/hibernia/server/kvstore/storage.py"]}
|
24,215
|
diegojromerolopez/hibernia
|
refs/heads/main
|
/hibernia/server/run.py
|
from aiohttp import web
from hibernia.server.kvstore.routes import ROUTES
if __name__ == '__main__':
app = web.Application()
app.add_routes(ROUTES)
web.run_app(app)
|
{"/hibernia/server/kvstore/routes.py": ["/hibernia/server/kvstore/handlers.py"], "/hibernia/server/run.py": ["/hibernia/server/kvstore/routes.py"], "/hibernia/server/kvstore/handlers.py": ["/hibernia/server/kvstore/storage.py"]}
|
24,216
|
diegojromerolopez/hibernia
|
refs/heads/main
|
/hibernia/server/kvstore/handlers.py
|
from aiohttp import web
from hibernia.server.kvstore.storage import KV_STORE
async def set_handler(request):
key = request.match_info.get('key')
val = await request.text()
stored = KV_STORE.set(key, val)
if not stored:
return web.Response(status=400)
return web.Response(status=201)
async def get_handler(request):
key = request.match_info.get('key')
value = KV_STORE.get(key)
return web.Response(body=value, status=200)
async def del_handler(request):
key = request.match_info.get('key')
found = KV_STORE.del_item(key)
if not found:
return web.Response(status=404)
return web.Response(status=200)
|
{"/hibernia/server/kvstore/routes.py": ["/hibernia/server/kvstore/handlers.py"], "/hibernia/server/run.py": ["/hibernia/server/kvstore/routes.py"], "/hibernia/server/kvstore/handlers.py": ["/hibernia/server/kvstore/storage.py"]}
|
24,240
|
md4956/pgtest
|
refs/heads/master
|
/run.py
|
from maindir import app
# set the secret key. keep this really secret:
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
if __name__ == '__main__':
app.run(debug=True)
|
{"/run.py": ["/maindir/__init__.py"], "/maindir/signup/views.py": ["/maindir/__init__.py"], "/maindir/login/views.py": ["/maindir/__init__.py"], "/maindir/__init__.py": ["/maindir/login/views.py", "/maindir/signup/views.py"]}
|
24,241
|
md4956/pgtest
|
refs/heads/master
|
/maindir/models.py
|
import sqlite3 as sql
from werkzeug.security import check_password_hash
# manage SQLite3 db, insert user
def insertUser(username, password):
con = sql.connect("database.db")
cur = con.cursor()
cur.execute("INSERT INTO users (username,password) VALUES (?,?)", (username, password))
con.commit()
con.close()
# manage SQLite3 db, retrieve user
def retrieveUsers():
con = sql.connect("database.db")
cur = con.cursor()
cur.execute("SELECT username, password FROM users")
users = cur.fetchall()
con.close()
return users
class User():
def __init__(self, username):
self.username = username
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.username
@staticmethod
def validate_login(password_hash, password):
return check_password_hash(password_hash, password)
|
{"/run.py": ["/maindir/__init__.py"], "/maindir/signup/views.py": ["/maindir/__init__.py"], "/maindir/login/views.py": ["/maindir/__init__.py"], "/maindir/__init__.py": ["/maindir/login/views.py", "/maindir/signup/views.py"]}
|
24,242
|
md4956/pgtest
|
refs/heads/master
|
/maindir/signup/views.py
|
from flask import Blueprint, render_template, request, redirect, url_for, flash, get_flashed_messages
from maindir import models as db_handler
from pymongo import MongoClient
from maindir.security.models import security
# blueprint for this .py
signup_blueprint = Blueprint('signup', __name__, template_folder='templates')
client = MongoClient('mongodb://localhost:27017/')
db = client.pgco
# collection = db.verified_users
# signup page
@signup_blueprint.route('/', methods=['GET', 'POST'])
def signup():
return render_template('signup/signup.html')
# checking user and pass
#
#TODO have to do somthing
@signup_blueprint.route('/processing', methods=['POST'])
def signing_processing():
# user_array = db_handler.retrieveUsers()
collection = db.users
find = collection.find_one({'email': request.form['email']})
if find:
flash('this email is already exists. please log in or use another email', 'signup')
return redirect(url_for('signup.signup'))
else:
hashed_password = security.set_password(request.form['password'])
user = {'email': request.form['email'], 'password': hashed_password}
collection.insert_one(user).inserted_id
return 'done'
# u = collection.find({'email': request.form['email']})
# for i in u:
# if request.method == 'POST':
# mail = request.form['email']
# if request.form['email'] == u.find_one({'email': mail}): # if email was already exists
# flash('this email is already exists. please log in or use another email', 'signup')
# return redirect(url_for('signup.signup'))
#
# else:
# print('done')
# return redirect(url_for('signup.signed_up')) # if both username and password were correct
#
#
# redirect authenticated users to main page
@signup_blueprint.route('/signed_up')
def signed_up():
return '<h1>done !</h1>'
@signup_blueprint.route('/test')
def test():
mhasani = {
'email': "mohammad@live.com",
'passwrod': 'pass',
'name': 'mohammad',
'family': 'hasani',
'asdasdasd': 'asdasdasdasd'
}
collection = db.username
x = collection.insert_one(mhasani).inserted_id
y = collection.find_one({'email': 'mohammaasdasdasdd@live.com'})
print(y)
return 'ok'
|
{"/run.py": ["/maindir/__init__.py"], "/maindir/signup/views.py": ["/maindir/__init__.py"], "/maindir/login/views.py": ["/maindir/__init__.py"], "/maindir/__init__.py": ["/maindir/login/views.py", "/maindir/signup/views.py"]}
|
24,243
|
md4956/pgtest
|
refs/heads/master
|
/maindir/login/views.py
|
from flask import Blueprint, render_template, request, redirect, url_for, flash, get_flashed_messages
from maindir import models as db_handler
from pymongo import MongoClient
# blueprint for this .py
login_blueprint = Blueprint('login', __name__, template_folder='templates')
# login page
@login_blueprint.route('/', methods=['GET', 'POST'])
def login():
return render_template('login/login.html')
# checking user and pass
@login_blueprint.route('/processing', methods=['POST'])
def logging_processing():
user_array = db_handler.retrieveUsers()
for i in user_array:
if request.method == 'POST':
if request.form['username'] != i[0] and request.form['password'] == i[1]: # if username was wrong
flash('this username does not exist. please check the username and try again', 'login')
return redirect(url_for('login.login'))
elif request.form['username'] != i[0] and request.form['password'] != i[1]: # if both username and password
flash('this username does not exist. please check the username and try again', 'login') # were wrong
return redirect(url_for('login.login'))
elif request.form['username'] == i[0] and request.form['password'] != i[1]: # if username was correct
flash('password is wrong. please check and try again', 'login') # but password was not
return redirect(url_for('login.login'))
else:
return redirect(url_for('login.logged_in')) # if both username and password were correct
# redirect authenticated users to main page
@login_blueprint.route('/logged_in')
def logged_in():
return '<h1>welcome !</h1>'
#
# @login_blueprint.route('/test1')
# def logged1():
# flash("i'm coming from test1", 'login')
# return 'this is test'
#
#
# @login_blueprint.route('/test2')
# def logged2():
# return render_template('test.html')
|
{"/run.py": ["/maindir/__init__.py"], "/maindir/signup/views.py": ["/maindir/__init__.py"], "/maindir/login/views.py": ["/maindir/__init__.py"], "/maindir/__init__.py": ["/maindir/login/views.py", "/maindir/signup/views.py"]}
|
24,244
|
md4956/pgtest
|
refs/heads/master
|
/maindir/__init__.py
|
from flask import Flask
from maindir.login.views import login_blueprint
from maindir.signup.views import signup_blueprint
from flask_login import LoginManager
app = Flask(__name__, instance_relative_config=True)
# moarafi o link e blueprint ha be app
app.register_blueprint(login_blueprint, url_prefix='/login')
app.register_blueprint(signup_blueprint, url_prefix='/signup')
# login_manager = LoginManager()
# login_manager.init_app(app)
# login_manager.login_view = "users.login"
|
{"/run.py": ["/maindir/__init__.py"], "/maindir/signup/views.py": ["/maindir/__init__.py"], "/maindir/login/views.py": ["/maindir/__init__.py"], "/maindir/__init__.py": ["/maindir/login/views.py", "/maindir/signup/views.py"]}
|
24,245
|
md4956/pgtest
|
refs/heads/master
|
/mongotest.py
|
from flask import Flask
from pymongo import MongoClient
import datetime
import pprint
client = MongoClient()
db = client.pgco345
post = {"author": "aaa ",
"text": "My first blog postpostpostpostpost!",
"tags": ["mongodb", "python", "pymongo"],
"date": datetime.datetime.utcnow()}
posts3 = db.postssss
post_id = posts3.insert_one(post).inserted_id
print(post_id)
pprint.pprint(posts3.find_one({"author": "asghar"}))
client = MongoClient('localhost', 27017)
|
{"/run.py": ["/maindir/__init__.py"], "/maindir/signup/views.py": ["/maindir/__init__.py"], "/maindir/login/views.py": ["/maindir/__init__.py"], "/maindir/__init__.py": ["/maindir/login/views.py", "/maindir/signup/views.py"]}
|
24,361
|
tglanz/async-python-examples
|
refs/heads/master
|
/runners/readfile_threads.py
|
import io
import asyncio
import concurrent
def read_file(file_path):
print("reading file")
with open(file_path, mode='r') as file:
while True:
line = file.readline()
if not line:
break
print(line.strip())
def print_loop(count, context):
print("print loop start")
for i in range(0, count):
print("index {} - {}".format(context, i))
print("print loop end")
async def run():
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
loop = asyncio.get_event_loop()
await asyncio.gather(
loop.run_in_executor(executor, read_file, "data/lorem-ipsum.txt"),
loop.run_in_executor(executor, print_loop, 140, 'a'),
loop.run_in_executor(executor, print_loop, 140, 'b'),
)
def execute():
print("creating event loop")
loop = asyncio.get_event_loop()
print("creating coroutine")
coro = run()
print("dispatching coroutine to event loop")
loop.run_until_complete(coro)
loop.close()
|
{"/main.py": ["/runners/first.py", "/runners/readfile_coroutines.py", "/runners/readfile_threads.py", "/runners/async_writer.py"]}
|
24,362
|
tglanz/async-python-examples
|
refs/heads/master
|
/runners/first.py
|
import asyncio
async def run():
delay = .5
for i in range(0, 3):
print("going to sleep {}".format(i))
await asyncio.sleep(delay)
print("woke up {}".format(i))
def execute():
print("creating loop")
loop = asyncio.get_event_loop()
print("creating corouting")
coroutine = run()
print("running coroutine")
loop.run_until_complete(coroutine)
print("closing loop")
loop.close()
|
{"/main.py": ["/runners/first.py", "/runners/readfile_coroutines.py", "/runners/readfile_threads.py", "/runners/async_writer.py"]}
|
24,363
|
tglanz/async-python-examples
|
refs/heads/master
|
/runners/readfile_coroutines.py
|
import asyncio
import aiofiles
async def read_file(file_path):
print("reading file")
async with aiofiles.open(file_path, mode='r') as file:
while True:
line = await file.readline()
if not line:
break
print(line.strip())
async def print_loop(count):
print("print loop start")
for i in range(0, count):
await asyncio.sleep(.01)
print("index {}".format(i))
print("print loop end")
async def run():
await asyncio.gather(
read_file("data/lorem-ipsum.txt"),
print_loop(300)
)
def execute():
print("creating event loop")
loop = asyncio.get_event_loop()
print("creating coroutine")
coro = run()
print("dispatching coroutine to event loop")
loop.run_until_complete(coro)
|
{"/main.py": ["/runners/first.py", "/runners/readfile_coroutines.py", "/runners/readfile_threads.py", "/runners/async_writer.py"]}
|
24,364
|
tglanz/async-python-examples
|
refs/heads/master
|
/main.py
|
from argparse import ArgumentParser
import runners.first
import runners.producer_consumer
import runners.readfile_coroutines
import runners.readfile_threads
import runners.async_writer
runners_by_name = {
'first': runners.first,
'producer-consumer': runners.producer_consumer,
'readfile-coroutines': runners.readfile_coroutines,
'readfile-threads': runners.readfile_threads,
'async_writer': runners.async_writer,
}
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('runner', type=str)
args = parser.parse_args()
runner_name = args.runner
runner = runners_by_name[runner_name]
if runner is None:
print("No such runner {}", runner_name)
else:
runners_by_name[runner_name].execute()
|
{"/main.py": ["/runners/first.py", "/runners/readfile_coroutines.py", "/runners/readfile_threads.py", "/runners/async_writer.py"]}
|
24,365
|
tglanz/async-python-examples
|
refs/heads/master
|
/runners/async_writer.py
|
from queue import Queue, Empty
import time
import asyncio
import concurrent
def consume(write_file, transform, queue, should_consume):
while True:
try:
item = queue.get(True, 1)
transformed_item = transform(item)
if transformed_item is not None:
write_file.write(transformed_item)
queue.task_done()
except Empty:
if not should_consume():
break
time.sleep(1)
def produce(queue, item):
queue.put(item)
class AsyncWriterContext:
def __init__(self, loop):
self.loop = loop
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=2)
self.queue = Queue()
self.should_consume = False
async def stop_consuming(self):
self.should_consume = False
self.queue.join()
async def start_consuming(self, write_file, transform_item):
self.should_consume = True
await self.loop.run_in_executor(self.executor, consume, write_file, transform_item, self.queue, lambda: self.should_consume)
async def put(self, item):
await self.loop.run_in_executor(self.executor, produce, self.queue, item)
async def entry_point(async_writer_context):
for i in range(0, 5):
await async_writer_context.put(i)
time.sleep(3)
for i in range(8, 12):
await async_writer_context.put(i)
await async_writer_context.stop_consuming()
def execute():
loop = asyncio.get_event_loop()
async_writer_context = AsyncWriterContext(loop)
with open('out.bin', 'wb') as file:
loop.run_until_complete(
asyncio.gather(
entry_point(async_writer_context),
async_writer_context.start_consuming(file, bytes)
)
)
|
{"/main.py": ["/runners/first.py", "/runners/readfile_coroutines.py", "/runners/readfile_threads.py", "/runners/async_writer.py"]}
|
24,368
|
ManivaDigital-AB/Happicard
|
refs/heads/main
|
/backend/api/orders/views.py
|
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import permissions, status, generics, views
from rest_framework.response import Response
from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
from uuid import UUID
import requests
import stripe
import json
from .serializers import (
OrderSerializer,
OrderItemSerializer,
OrderListSerializer,
OrderItemListSerializer,
HappicardSerializer,
PayoutSerializer,
TransferSerializer,
)
from .models import (
Order,
OrderItem,
)
from backend.tasks import send_happicard_email_task, outbound_mms_task
from backend.utils import Util
DEFAULT_FROM_NUMBER = settings.DEFAULT_FROM_NUMBER
class OrderListView(generics.ListAPIView):
permission_classes = (permissions.AllowAny,)
authentication_classes = ()
serializer_class = OrderListSerializer
queryset = Order.objects.all()
class OrderItemListView(generics.ListAPIView):
permission_classes = (permissions.AllowAny,)
authentication_classes = ()
serializer_class = OrderItemListSerializer
queryset = OrderItem.objects.all()
class OrderItemCreateView(generics.CreateAPIView):
"""
Create Order Item View
"""
permission_classes = (permissions.AllowAny,)
serializer_class = OrderItemSerializer
def post(self, request):
order = request.data
serializer = self.serializer_class(data=order)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class StripePaymentIntentView(generics.GenericAPIView):
"""
Stripe Payment View
"""
permission_classes = (permissions.AllowAny,)
serializer_class = OrderSerializer
def post(self, request):
order = request.data
serializer = self.serializer_class(data=order)
if serializer.is_valid(raise_exception=True):
serializer.save()
order = serializer.data
current_order = Order.objects.get(id=order["id"])
total = current_order.get_order_total
for item in current_order.items.all():
print(item)
try:
for item in current_order.items.all():
if item.giftcard:
stripe.api_key = settings.STRIPE_DEV_STORE_SK
else:
stripe.api_key = settings.STRIPE_DEV_NGO_SK
intent = stripe.PaymentIntent.create(
amount=total,
currency="sek",
payment_method_types=["card"],
receipt_email=current_order.email,
)
return Response(
{
"client_secret": intent.client_secret,
"order": order,
}
)
except stripe.error.CardError as e:
body = e.json_body
err = body.get("error", {})
print("Status is: %s" % e.http_status)
print("Type is: %s" % err.get("type"))
print("Code is: %s" % err.get("code"))
print("Message is %s" % err.get("message"))
return Response({"message": err.get("message")}, status=e.http_status)
except stripe.error.RateLimitError as e:
return Response({"Error": "The API was not able to respond, try again."})
except stripe.error.InvalidRequestError as e:
return Response({"Error": "Invalid parameters, unable to process payment."})
except stripe.error.AuthenticationError as e:
pass
except stripe.error.APIConnectionError as e:
return Response({"Error": "Network communication failed, try again."})
except stripe.error.StripeError as e:
return Response({"Error": "Internal Stripe Error, contact support."})
except Exception as e:
return Response({"message": "Unable to process payment, try again."})
class HappicardSendView(generics.GenericAPIView):
"""
Happicard Send View
"""
permission_classes = (permissions.AllowAny,)
serializer_class = HappicardSerializer
def get(self, request, pk):
order = Order.objects.get(id=pk)
sender_name = order.first_name
recipient_name = order.happicard_recipient_name
recipient_email_choice = order.happicard_recipient_email_choice
recipient_sms_choice = order.happicard_recipient_sms_choice
personal_message = order.happicard_personal_message
email_subject = f"{sender_name} sent you a Happicard"
if order.happicard_personal_image:
personal_image = order.happicard_personal_image
if order.happicard_personal_video:
personal_video = order.happicard_personal_video
try:
rebate_code = [
item.match_price_choice_with_rebate for item in order.items.all()
].pop()
except:
return Response(
{"Error": "An item in your basket is missing a rebate code."}
)
try:
redeem_website = [
item.get_redeem_website for item in order.items.all()
].pop()
except:
return Response(
{
"Error": "An item in your basket doesn't include a website for redeeming code."
}
)
if recipient_email_choice and recipient_sms_choice:
recipient_email = order.happicard_recipient_email
confirmation = {
"to_email": recipient_email,
"email_body": personal_message,
"email_subject": email_subject,
}
send_happicard_email_task.apply_async(
args=[
confirmation,
recipient_name,
rebate_code,
redeem_website,
],
eta=order.happicard_delivery_date,
)
recipient_number = order.happicard_recipient_number
outbound_mms_task.apply_async(
args=[
recipient_number,
DEFAULT_FROM_NUMBER,
personal_message,
recipient_name,
sender_name,
rebate_code,
redeem_website,
],
eta=order.happicard_delivery_date,
)
return Response(
{"Success": "Happicard email and SMS successfully sent."},
status=status.HTTP_200_OK,
)
elif recipient_sms_choice and not recipient_email_choice:
recipient_number = order.happicard_recipient_number
outbound_mms_task.apply_async(
args=[
recipient_number,
DEFAULT_FROM_NUMBER,
personal_message,
recipient_name,
sender_name,
rebate_code,
redeem_website,
],
eta=order.happicard_delivery_date,
)
return Response(
{"Success": "Happicard SMS successfully sent."},
status=status.HTTP_200_OK,
)
else:
recipient_email = order.happicard_recipient_email
confirmation = {
"to_email": recipient_email,
"email_body": personal_message,
"email_subject": email_subject,
}
send_happicard_email_task.apply_async(
args=[
confirmation,
recipient_name,
rebate_code,
redeem_website,
],
eta=order.happicard_delivery_date,
)
return Response(
{
"Success": f"Happicard email will be successfully sent on {happicard_delivery_date}."
},
status=status.HTTP_200_OK,
)
class StripePayoutView(generics.GenericAPIView):
"""
Stripe Payout View
"""
permission_classes = (permissions.AllowAny,)
serializer_class = PayoutSerializer
def post(self, request):
payout = request.data
serializer = self.serializer_class(data=payout)
serializer.is_valid(raise_exception=True)
payout = serializer.data
current_order = Order.objects.get(id=payout.get("order_id"))
total = current_order.get_order_total
payout_total = (4 * total) / 100.0
try:
payout = stripe.Payout.create(
amount=int(payout_total),
currency="sek",
destination=payout.get("destination"),
)
except:
return Response({"Error": "Payout Could Not Be Processed"})
return Response(status=status.HTTP_200_OK, data=payout)
class StripeTransferView(generics.GenericAPIView):
"""
Stripe Transfer View
"""
permission_classes = (permissions.AllowAny,)
serializer_class = TransferSerializer
def post(self, request):
transfer = request.data
serializer = self.serializer_class(data=transfer)
serializer.is_valid(raise_exception=True)
transfer = serializer.data
current_order = Order.objects.get(id=transfer.get("order_id"))
total = current_order.get_order_total
payout_total = (4 * total) / 100.0
try:
payout = stripe.Transfer.create(
amount=int(payout_total),
currency="sek",
source=transfer.get("source"),
destination=transfer.get("destination"),
)
except:
return Response({"Error": "Transfer Could Not Be Completed"})
return Response(status=status.HTTP_200_OK, data=payout)
class OrderItemDetailView(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (permissions.AllowAny,)
authentication_classes = ()
queryset = OrderItem.objects.all()
serializer_class = OrderItemListSerializer
class OrderDetailView(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (permissions.AllowAny,)
authentication_classes = ()
queryset = Order.objects.all()
serializer_class = OrderListSerializer
|
{"/backend/api/orders/views.py": ["/backend/api/orders/serializers.py", "/backend/api/orders/models.py", "/backend/tasks.py", "/backend/utils.py"], "/backend/settings/dev.py": ["/backend/settings/base.py"], "/backend/settings/__init__.py": ["/backend/settings/base.py"], "/backend/api/seo/views.py": ["/backend/api/seo/serializers.py", "/backend/api/seo/models.py"], "/backend/api/profiles/admin.py": ["/backend/api/profiles/models.py"], "/backend/settings/prod.py": ["/backend/settings/base.py"], "/backend/api/accounts/admin.py": ["/backend/api/accounts/models.py", "/backend/utils.py"], "/backend/api/customizations/migrations/0001_initial.py": ["/backend/settings/storage_backends.py"], "/backend/api/customizations/serializers.py": ["/backend/api/customizations/models.py"], "/backend/api/items/migrations/0005_auto_20210322_1716.py": ["/backend/settings/storage_backends.py"], "/backend/api/seo/admin.py": ["/backend/api/seo/models.py"], "/backend/api/customizations/migrations/0003_auto_20210319_1559.py": ["/backend/settings/storage_backends.py"], "/backend/api/items/serializers.py": ["/backend/api/items/models.py"], "/backend/api/orders/serializers.py": ["/backend/api/orders/models.py", "/backend/api/items/models.py"], "/backend/api/accounts/serializers.py": ["/backend/api/accounts/models.py"], "/backend/api/customizations/models.py": ["/backend/settings/storage_backends.py"], "/backend/api/accounts/views.py": ["/backend/api/accounts/serializers.py", "/backend/api/accounts/models.py", "/backend/utils.py"], "/backend/api/customizations/migrations/0009_auto_20210322_1716.py": ["/backend/settings/storage_backends.py"], "/backend/api/customizations/migrations/0008_auto_20210322_1624.py": ["/backend/settings/storage_backends.py"], "/backend/api/customizations/views.py": ["/backend/api/customizations/serializers.py", "/backend/api/customizations/models.py"], "/backend/api/orders/admin.py": ["/backend/api/orders/models.py"], "/backend/api/customizations/admin.py": ["/backend/api/customizations/models.py"], "/backend/api/profiles/migrations/0004_auto_20210322_1716.py": ["/backend/settings/storage_backends.py"], "/backend/api/seo/serializers.py": ["/backend/api/seo/models.py"], "/backend/api/orders/models.py": ["/backend/api/items/models.py", "/backend/settings/storage_backends.py", "/backend/tasks.py"], "/backend/api/items/views.py": ["/backend/api/items/models.py", "/backend/api/items/serializers.py"], "/backend/api/profiles/serializers.py": ["/backend/api/profiles/models.py"], "/backend/api/items/models.py": ["/backend/api/accounts/models.py", "/backend/settings/storage_backends.py"], "/backend/api/profiles/models.py": ["/backend/api/items/models.py", "/backend/api/accounts/models.py", "/backend/settings/storage_backends.py"], "/backend/tasks.py": ["/backend/utils.py"], "/backend/settings/base.py": ["/backend/settings/dev.py", "/backend/settings/prod.py"], "/backend/api/items/admin.py": ["/backend/api/items/models.py"], "/backend/api/orders/migrations/0001_initial.py": ["/backend/settings/storage_backends.py"], "/backend/api/profiles/views.py": ["/backend/api/profiles/serializers.py", "/backend/api/profiles/models.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.