id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
140529 | import pandas as pd
import argparse
def writeOutFile(outputDF, outfile):
outputDF.to_csv(outfile, sep=',', encoding='utf-8', index=False)
def createOutputDF(inputDF, n):
diff = n - 1
outputDF = pd.DataFrame()
for i in range(int(inputDF.shape[0] / n)):
if i == 0:
dfl = pd.DataFrame()
dfl = inputDF.loc[i:(i + n - 1)].T
new_header = dfl.iloc[0]
dfl = dfl[1:]
dfl.columns = new_header
outputDF = outputDF.append(dfl, ignore_index=False)
else:
dfl = pd.DataFrame()
dfl = inputDF.loc[i + i * diff:(i + i * diff + n - 1)].T
new_header = dfl.iloc[0]
dfl = dfl[1:]
dfl.columns = new_header
outputDF = outputDF.append(dfl, ignore_index=False)
return outputDF
def createInitDF(inputfile, d):
inputDF = pd.read_csv(inputfile, delimiter=d, header=None)
return inputDF
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-n", help="Number of rows to be grouped", type=int)
parser.add_argument(
"-i", "--inputfile", help="Provide the path of the input file")
parser.add_argument("-d", "--delimiter", help="Provide the delimiter")
parser.add_argument(
"-o", "--outputfile", help="Provide the path of the output file")
args = parser.parse_args()
n = args.n
infile = args.inputfile
d = args.delimiter
outfile = args.outputfile
initDF = createInitDF(infile, d)
outputDF = createOutputDF(initDF, n)
writeOutFile(outputDF, outfile)
print("Please check the putput file generated!")
if __name__ == '__main__':
main()
| StarcoderdataPython |
3320537 | <reponame>abartoha/python-snippets-ref
'''Shutdown Windows'''
import os
#use with care
os.system('shutdown -s -t 0')
| StarcoderdataPython |
3384228 | #!/usr/bin/env python
"""
Copyright (c) 2016 <NAME>.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import print_function
import re
import imp
import os
import sys
from subprocess import Popen, PIPE
from time import strftime
from optparse import OptionParser
from random import sample
from os import listdir, makedirs
from os.path import isfile, join, dirname, exists
from lib.common import color
from lib.settings import BW
from lib.settings import ASK, PLUS, INFO, TEST, WARN, ERROR, DEBUG
from lib.logger import logger
sys.dont_write_bytecode = True
NAME = "stringTransformer"
VERSION = "v0.1"
URL = "https://github.com/alevsk/stringTransformer/"
# Maximum length of left option column in help listing
MAX_HELP_OPTION_LENGTH = 20
BANNER = """
_______
_____ | _ | ____
| | \ V / | |
| \ \ \_ _/ / / |
\ \ \ \ ' / / / /
\ \ | 'V' | / /
|\ \_____| \ / |_____/ /|
| \ | | / |
| | ,/|| ||\, | |
| `| ' || || ' |` |
| | | || || | | |
\ | | || || | | /
\. | | ||_|| | | ./
\ | | |___| | | /
\\' , _____ , '/
\/ ___ \/
/___\
"""
REPRESENTATIONS_DIR = "representations"
# Location of the folder where results will be written to
OUTPUT_DIR = "output"
# Location of Git repository
GIT_REPOSITORY = "https://github.com/alevsk/stringTransformer.git"
EXAMPLES = """
Examples:
./stringTransformer.py -i [STRING]
./stringTransformer.py -i [STRING] --exclude "hexa, octal"
./stringTransformer.py -i [STRING] --only "hexa, octal"
./stringTransformer.py -i [STRING] --params "rot.cipher=13,rot.encoding=utf-8"
./stringTransformer.py --load list.txt
./stringTransformer.py --list
"""
def print(*args, **kwargs):
"""
Currently no purpose.
"""
return __builtins__.print(*args, **kwargs)
def update():
"""
Updates the program via git pull.
"""
print("%s Checking for updates..." % INFO)
process = Popen("git pull %s HEAD" % GIT_REPOSITORY, shell=True,
stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
success = not process.returncode
if success:
updated = "Already" not in stdout
process = Popen("git rev-parse --verify HEAD", shell=True,
stdout=PIPE, stderr=PIPE)
stdout, _ = process.communicate()
revision = (stdout[:7] if stdout and
re.search(r"(?i)[0-9a-f]{32}", stdout) else "-")
print("%s the latest revision '%s'." %
("%s Already at" % INFO if not updated else
"%s Updated to" % PLUS, revision))
else:
print("%s Problem occurred while updating program.\n" % WARN)
_ = re.search(r"(?P<error>error:[^:]*files\swould\sbe\soverwritten"
r"\sby\smerge:(?:\n\t[^\n]+)*)", stderr)
if _:
def question():
"""Asks question until a valid answer of y or n is provided."""
print("\n%s Would you like to overwrite your changes and set "
"your local copy to the latest commit?" % ASK)
sys_stdout.write("%s ALL of your local changes will be deleted"
" [Y/n]: " % WARN)
_ = raw_input()
if not _:
_ = "y"
if _.lower() == "n":
exit()
elif _.lower() == "y":
return
else:
print("%s Did not understand your answer! Try again." %
ERROR)
question()
print("%s" % _.group("error"))
question()
if "untracked" in stderr:
cmd = "git clean -df"
else:
cmd = "git reset --hard"
process = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
stdout, _ = process.communicate()
if "HEAD is now at" in stdout:
print("\n%s Local copy reset to current git branch." % INFO)
print("%s Attemping to run update again..." % INFO)
else:
print("%s Unable to reset local copy to current git branch." %
WARN)
exit()
update()
else:
print("%s Please make sure that you have "
"a 'git' package installed." % INFO)
print(stderr)
def list_representations(extension=False):
"""
List available string representations for input transformation found in the representations folder.
"""
return [_ if extension else _.replace(".py", "")
for _ in listdir(REPRESENTATIONS_DIR) if isfile(join(REPRESENTATIONS_DIR, _))]
def load_representations(list):
"""
Load transformation modules dynamically
"""
modules = []
for file in list:
module = imp.load_source(file, "%s/%s.py" % (REPRESENTATIONS_DIR, file))
if hasattr(module, file):
rep = getattr(module, file)()
modules.append(rep)
return modules
def parseParams(params):
parameters = {}
params = params.split(',')
for param in params:
a = param.split('=')
b = a[0].split('.')
if b[0] in parameters:
obj = parameters[b[0]]
obj[b[1]] = a[1]
else:
obj = {}
obj[b[1]] = a[1]
parameters[b[0]] = obj
return parameters
def parse_args():
"""
Parses the command line arguments.
"""
# Override epilog formatting
OptionParser.format_epilog = lambda self, formatter: self.epilog
parser = OptionParser(usage="usage: %prog -i INPUT_STRING | --input INPUT_STRING "
"| --load FILE",
epilog=EXAMPLES)
parser.add_option("-i", "--input", dest="input",
help="set the input string to test with")
parser.add_option("-l", "--load", dest="load_file",
help="load list of input strings (one per line)")
parser.add_option("-x", "--exclude", dest="exclude",
help="exclude this representations")
parser.add_option("-o", "--only", dest="only",
help="transform input only to this representations")
parser.add_option("-O", "--output", dest="output",
help="generate an output file")
parser.add_option("-p", "--params", dest="params",
help="use custom parameters on transformation functions")
parser.add_option("--list", action="store_true", dest="list",
help="list available input representations")
parser.add_option("--update", action="store_true", dest="update",
help="update from the official git repository")
parser.formatter.store_option_strings(parser)
parser.formatter.store_option_strings = lambda _: None
for option, value in parser.formatter.option_strings.items():
value = re.sub(r"\A(-\w+) (\w+), (--[\w-]+=(\2))\Z", r"\g<1>/\g<3>",
value)
value = value.replace(", ", '/')
if len(value) > MAX_HELP_OPTION_LENGTH:
value = ("%%.%ds.." % (MAX_HELP_OPTION_LENGTH -
parser.formatter.indent_increment)) % value
parser.formatter.option_strings[option] = value
args = parser.parse_args()[0]
if not any((args.input, args.update,
args.list, args.load_file)):
parser.error("Required argument is missing. Use '-h' for help.")
return args
def main():
"""
Initializes and executes the program.
"""
print("%s\n\n%s %s (%s)\n" % (BANNER, NAME, VERSION, URL))
args = parse_args()
if args.update:
update()
exit()
if args.list:
representations = list_representations()
for _ in representations:
print("- %s" % _)
print("\n")
exit()
inputs = []
params = {}
output = ""
representations = list_representations()
if args.only:
representations = [representation for representation in representations if representation in args.only]
elif args.exclude:
representations = [representation for representation in representations if representation not in args.exclude]
print("%s Loaded %d %s to apply." %
(INFO, len(representations), "representations" if len(representations) == 1 else "representations"))
if args.load_file:
if not isfile(args.load_file):
print("%s could not find the file \"%s\"" %
(WARN, color(args.load_file)))
exit()
_ = sum(1 for line in open(args.load_file, "r"))
if _ < 1:
print("%s the file \"%s\" doesn't contain any valid input." %
(WARN, color(args.load_file)))
exit()
inputs += [line.rstrip('\n') for line in open(args.load_file, "r")]
print("%s Loaded %d input strings%s from \"%s\".\n" %
(INFO, _, "s" if _ != 1 else "", color(args.load_file)))
if args.input:
inputs.append(args.input)
if(args.params):
params = parseParams(args.params)
print("%s Starting tests at: \"%s\"\n" % (INFO, color(strftime("%X"), BW)))
if not exists(OUTPUT_DIR):
makedirs(OUTPUT_DIR)
modules = load_representations(representations)
for string in inputs:
print("%s\n\n%s applying transformation...\n" % (string, INFO))
for module in modules:
transformation = module.transform(string, params[module.__class__.__name__] if module.__class__.__name__ in params else {}) + "\n"
output += transformation
print(module.__class__.__name__ + ":\n")
print(transformation)
print("==================================\n")
if args.output:
f = open(OUTPUT_DIR + '/' + args.output,'w')
f.write(output)
f.close()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("\n%s Ctrl-C pressed." % INFO) | StarcoderdataPython |
3368072 | <filename>src/aggs/ping.py<gh_stars>1-10
# -*- coding:utf-8 -*-
"""
ping.py
~~~~~~~~
网络延迟报警
:author: kerrygao, Fufu, 2021/6/21
"""
from . import AggsPlugin
from ..libs.metric import Metric
class Ping(AggsPlugin):
"""ping 网络延迟报警"""
name = 'ping'
async def alarm(self, metric: Metric) -> Metric:
"""ping 网络延迟报警"""
alarm_conf = self.get_plugin_conf_value('alarm', {})
if not alarm_conf:
return metric
target = metric.get('address')
tag = metric.get('tag')
average = self.get_plugin_conf_ab_value([f'alarm|target|{tag}|average', 'alarm|average'], -0.1)
loss = self.get_plugin_conf_ab_value([f'alarm|target|{tag}|loss', 'alarm|loss'], -0.1)
maximum = self.get_plugin_conf_ab_value([f'alarm|target|{tag}|maximum', 'alarm|maximum'], -0.1)
if metric.get('loss', -1.1) >= loss >= 0:
self.put_alarm_metric(f'{tag} 丢包比例过高(%): {metric.get("loss")}>={loss}', more=target)
elif metric.get('average', -1.1) >= average >= 0:
self.put_alarm_metric(f'{tag} 平均延迟过高(ms): {metric.get("average")}>={average}', more=target)
elif metric.get('maximum', -1.1) >= maximum >= 0:
self.put_alarm_metric(f'{tag} 最大延迟过高(ms): {metric.get("maximum")}>={maximum}', more=target)
return metric
| StarcoderdataPython |
193617 | import pdb
import copy
import numpy as np
from importlib_resources import files
import matplotlib.pyplot as plt
from matplotlib import font_manager
from matplotlib.figure import Figure
from .utilities import *
from .process_model import *
# Define font settings
fontsize = 12
font_files = font_manager.findSystemFonts(fontpaths=[files('hes_off.core').joinpath("fonts")])
for font_file in font_files:
font_manager.fontManager.addfont(font_file)
plt.rcParams['font.family'] = 'Arial'
class IntegratedModel:
"""
The IntegratedModel object defines and stores parameters of the HES-OFF concept
"""
stage_labels = ["Peak years", "Midlife years", "Tail years"]
def __init__(self, IN):
# Declare input variables as instance variables
self.IN= copy.deepcopy(IN)
# Process specifications
self.HEAT_DEMAND = np.asarray(self.IN["HEAT_DEMAND"])*1e6
self.POWER_DEMAND = np.asarray(self.IN["POWER_DEMAND"])*1e6
self.STAGE_LENGTH = np.asarray(self.IN["STAGE_LENGTH"])
# Gas turbine specifications
self.GT_MODEL = self.IN["GT_MODEL"]
self.GT_UNITS = self.IN["GT_UNITS"]
self.GT_MAX_H2 = self.IN["GT_MAX_H2"]/100
# Wind farm specifications
self.WT_MODEL = self.IN["WT_MODEL"]
self.WT_REF_HEIGHT = IN["WT_REF_HEIGHT"]
self.WT_HUB_HEIGHT = IN["WT_HUB_HEIGHT"]
self.WT_RATED_POWER = self.IN["WT_RATED_POWER"]*1e6
# Electrolizer system specifications
self.EL_MODEL = self.IN["EL_MODEL"]
self.EL_RATED_POWER = self.IN["EL_RATED_POWER"]*1e6
try:
self.EL_EFFICIENCY = np.asarray(self.IN["EL_EFFICIENCY"])
if self.EL_EFFICIENCY[0] is None:
self.EL_EFFICIENCY = np.zeros((1,))
except:
self.EL_EFFICIENCY = np.zeros((1,))
# Fuel cell system specifications
self.FC_MODEL = self.IN["FC_MODEL"]
self.FC_RATED_POWER = self.IN["FC_RATED_POWER"]*1e6
try:
self.FC_EFFICIENCY = np.asarray(self.IN["FC_EFFICIENCY"])
if self.FC_EFFICIENCY[0] is None:
self.FC_EFFICIENCY = np.zeros((1,))
except:
self.FC_EFFICIENCY = np.zeros((1,))
# Hydrogen storage specifications
self.H2_CAPACITY = self.IN["H2_CAPACITY"]
self.H2_INITIAL_LEVEL = self.IN["H2_INITIAL_LEVEL"] / 100
self.H2_RECHARGE_THRESHOLD = self.IN["H2_RECHARGE_THRESHOLD"]/100
self.H2_COFIRE_THRESHOLD = self.IN["H2_COFIRE_THRESHOLD"]/100
# Wind data specifications
self.WIND_FILENAME = self.IN["WIND_FILENAME"]
self.WIND_DATA = read_wind_data(IN["WIND_FILENAME"])
self.WIND_SPEED = self.WIND_DATA["speed"]
self.WIND_TIME = self.WIND_DATA["time"]
# Check the input variable values
self.check_input_variables()
def __str__(self):
class_info = '\nHES-OFF concept specifications\n'
for key, value in self.IN.items():
class_info += "{:<24}{}\n".format(key, value)
return class_info
def check_input_variables(self):
""" Check that the values of the input variables are feasible """
if not (self.HEAT_DEMAND.size == self.POWER_DEMAND.size == self.STAGE_LENGTH.size):
raise Exception("The number of elements of POWER_DEMAND, HEAT_DEMAND and STAGE_LENGTH must be the same")
if np.any(self.HEAT_DEMAND < 0.0):
raise Exception("HEAT_DEMAND values must be positive")
if np.any(self.POWER_DEMAND < 0.0):
raise Exception("POWER_DEMAND values must be positive")
if np.any(self.STAGE_LENGTH < 0.0):
raise Exception("STAGE_LENGTH values must be positive")
if self.GT_MAX_H2 < 0.0 or self.GT_MAX_H2 > 1.00:
raise Exception("GT_MAX_H2 must be between zero and one")
if self.GT_UNITS < 0 or not self.GT_UNITS.is_integer():
raise Exception("GT_UNITS must be a positive integer")
if self.WT_RATED_POWER < 0.0:
raise Exception("WT_RATED_POWER must be positive or zero")
if self.WT_HUB_HEIGHT < 0.0:
raise Exception("WT_HUB_HEIGHT must be positive")
if self.WT_REF_HEIGHT < 0.0:
raise Exception("WT_REF_HEIGHT must be positive")
if self.WT_HUB_HEIGHT < self.WT_REF_HEIGHT:
raise Exception("WT_HUB_HEIGHT must be larger than WT_REF_HEIGHT")
if self.EL_RATED_POWER < 0.0:
raise Exception("EL_RATED_POWER must be positive or zero")
if self.FC_RATED_POWER < 0.0:
raise Exception("FC_RATED_POWER must be positive or zero")
if self.H2_CAPACITY < 0.0:
raise Exception("H2_CAPACITY must be positive or zero")
if self.H2_INITIAL_LEVEL < 0.0 or self.H2_INITIAL_LEVEL > 1.00:
raise Exception("H2_INITIAL_LEVEL must be between zero and one")
if self.H2_RECHARGE_THRESHOLD < 0.0 or self.H2_RECHARGE_THRESHOLD > 1.00:
raise Exception("H2_RECHARGE_THRESHOLD must be between zero and one")
if self.H2_COFIRE_THRESHOLD < 0.0 or self.H2_COFIRE_THRESHOLD > 1.00:
raise Exception("H2_COFIRE_THRESHOLD must be between zero and one")
if self.H2_COFIRE_THRESHOLD <= self.H2_RECHARGE_THRESHOLD:
raise Exception("H2_RECHARGE_THRESHOLD must be lower than H2_COFIRE_THRESHOLD")
return True
def evaluate_process_model(self):
# Evaluate the process model
self.process_output = evaluate_process_model(self.HEAT_DEMAND, self.POWER_DEMAND,
self.GT_MODEL, self.GT_UNITS, self.GT_MAX_H2,
self.WT_MODEL, self.WT_RATED_POWER,
self.WT_REF_HEIGHT, self.WT_HUB_HEIGHT,
self.EL_MODEL, self.EL_RATED_POWER, self.EL_EFFICIENCY,
self.FC_MODEL, self.FC_RATED_POWER, self.FC_EFFICIENCY,
self.H2_CAPACITY, self.H2_INITIAL_LEVEL,
self.H2_RECHARGE_THRESHOLD, self.H2_COFIRE_THRESHOLD,
self.WIND_SPEED, self.WIND_TIME,
natural_gas, hydrogen)
# output_dict = {"CO2_emissions":}
self.GT_energy = self.create_entry("GT_power")
self.WT_energy = self.create_entry("WT_power")
self.FC_energy = self.create_entry("FC_power")
self.EL_energy = self.create_entry("EL_power")
self.H2_utilized = self.create_entry("H2_utilized")
self.NG_utilized = self.create_entry("NG_utilized")
self.CO2_emissions = self.create_entry("CO2_emissions")
self.energy_deficit = self.create_entry("power_deficit")
self.WT_energy_loss = self.create_entry("WT_energy_loss")
self.WT_energy_available = self.create_entry("WT_energy_available")
def create_entry(self, name):
value = np.sum(self.process_output[name], axis=1)*self.STAGE_LENGTH # Units (kg)
value = np.concatenate((value, np.sum(value)[np.newaxis]))
return value
def get_H2_depletion_time_GT(self):
GT_fuel = create_fluid_mixture(fluids=(natural_gas, hydrogen), fractions=(1.00 - self.GT_MAX_H2, self.GT_MAX_H2), fraction_type="molar")
GT_power_max = compute_GT_maximum_power(model=self.GT_MODEL, number_of_units=self.GT_UNITS)
GT_efficiency = compute_GT_efficiency(model=self.GT_MODEL, number_of_units=self.GT_UNITS, power_output=GT_power_max)
GT_fuel_flow = compute_GT_fuel_consumption(power_output=GT_power_max, conversion_efficiency=GT_efficiency, fuel=GT_fuel)[0]
H2_mass_flow = GT_fuel_flow * GT_fuel["y"][-1] # Hydrogen is the last component
depletion_time = self.H2_CAPACITY / H2_mass_flow / 3600
return depletion_time
def get_H2_depletion_time_FC(self):
# Compute mass flow rate of hydrogen fed to the fuel cell system (kg/s)
H2_mass_flow = compute_FC_hydrogen_consumption(model=self.FC_MODEL,
efficiency_coefficients=self.FC_EFFICIENCY,
rated_power=self.FC_RATED_POWER,
power_output=self.FC_RATED_POWER)[0]
# Compute the time required to deplete the entire hydrogen storage
depletion_time = self.H2_CAPACITY / H2_mass_flow / 3600
return depletion_time # Units (h)
def get_H2_refilling_time_EL(self):
# Compute mass flow rate of hydrogen fed to the fuel cell system (kg/s)
H2_mass_flow = compute_EL_hydrogen_production(model=self.EL_MODEL,
efficiency_coefficients=self.EL_EFFICIENCY,
rated_power=self.EL_RATED_POWER,
power_input=self.EL_RATED_POWER)[0]
# Compute the time required to deplete the entire hydrogen storage
refill_time = self.H2_CAPACITY / H2_mass_flow / 3600
return refill_time # Units (h)
# ------------------------------------------------------------------------------------------------------------------ ##
# Plot results
# ------------------------------------------------------------------------------------------------------------------ ##
def plot_wind_timeseries(self, is_sorted=False):
# Plot the wind speed profile as a function of time
fig = Figure(figsize=(6.0, 4.0))
ax = fig.subplots(1)
ax.set_xlabel('Time (hours)', fontsize=fontsize, color='k', labelpad=fontsize)
ax.set_ylabel('Wind speed (m/s)', fontsize=fontsize, color='k', labelpad=fontsize)
# ax.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.0f'))
# ax.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.0f'))
for t in ax.xaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
for t in ax.yaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
if is_sorted:
ax.plot(self.WIND_TIME, np.sort(self.WIND_SPEED)[::-1], linewidth=0.75, linestyle='-', color='k',
marker=' ', markersize=4.5, markeredgewidth=1.25, markeredgecolor='k', markerfacecolor='w',
label=None)
else:
ax.plot(self.WIND_TIME, self.WIND_SPEED, linewidth=0.75, linestyle='-', color='k',
marker=' ', markersize=4.5, markeredgewidth=1.25, markeredgecolor='k', markerfacecolor='w',
label=None)
fig.tight_layout()
return fig
def plot_carbon_dioxide_emissions(self):
""" Plot the carbon dioxide emissions over time """
fig = Figure(figsize=(6.0, 6.5))
fig.suptitle('CO$_2$ emissions accumulated over a year (kton)', fontsize=fontsize+1, fontweight='normal', color='k')
axes = fig.subplots(len(self.stage_labels))
for index, ax in enumerate(axes):
x, y = self.process_output["times"][index, :] / 24, np.cumsum(self.process_output["CO2_emissions"][index, :]) / 1e3 / 1e3
for t in ax.xaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
for t in ax.yaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
ax.plot(x, y, linewidth=0.75, linestyle='-', color='k', label="Period "+str(index+1), marker="")
# ax.set_ylabel('CO$_2$ emissions (Mt)', fontsize=fontsize, color='k', labelpad=fontsize)
ax.set_ylabel(self.stage_labels[index], fontsize=fontsize, color='k', labelpad=fontsize)
if index + 1 == len(self.stage_labels):
ax.set_xlabel('Time (days)', fontsize=fontsize, color='k', labelpad=fontsize)
# ax.legend(ncol=1, loc='lower right', fontsize=fontsize-1, edgecolor='k', framealpha=1.0, handlelength=0.0)
dy = np.max(y)
ax.set_ylim([-dy/5, np.max(y)+dy/5])
fig.tight_layout()
return fig
def plot_power_deficit(self):
""" Plot the energy deficit over time """
fig = Figure(figsize=(6.0, 6.5))
fig.suptitle('Power deficit over a year (MW)', fontsize=fontsize + 1, fontweight='normal', color='k')
axes = fig.subplots(len(self.stage_labels))
for index, ax in enumerate(axes):
x, y = self.process_output["times"][index, :] / 24, self.process_output["power_deficit"][index, :] / 1e6
for t in ax.xaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
for t in ax.yaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
ax.plot(x, y, linewidth=0.75, linestyle='-', color='k', label=self.stage_labels[index], marker="")
# ax.set_ylabel('Deficit (MW)', fontsize=fontsize, color='k', labelpad=fontsize)
ax.set_ylabel(self.stage_labels[index], fontsize=fontsize, color='k', labelpad=fontsize)
if index + 1 == len(self.stage_labels):
ax.set_xlabel('Time (days)', fontsize=fontsize, color='k', labelpad=fontsize)
# ax.legend(ncol=1, loc='lower right', fontsize=fontsize - 1, edgecolor='k', framealpha=1.0, handlelength=0.0)
dy = max(1, np.max(y))
ax.set_ylim([-dy / 5, np.max(y) + dy / 5])
fig.tight_layout()
return fig
def plot_hydrogen_level(self):
""" Plot hydrogen storage level over time """
fig = Figure(figsize=(6.0, 6.5))
fig.suptitle('Hydrogen storage level over a year (kg)', fontsize=fontsize+1, fontweight='normal', color='k')
axes = fig.subplots(len(self.stage_labels))
for index, ax in enumerate(axes):
x, y = self.process_output["times"][index, :] / 24, self.process_output["H2_level"][index, :]
for t in ax.xaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
for t in ax.yaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
ax.plot([0.0], [0.0], linestyle="", marker="", label="Period " + str(index + 1))
ax.plot(x, y, linewidth=0.75, linestyle='-', color='k', label="", marker="")
# ax.set_ylabel('H$_2$ level (kg)', fontsize=fontsize, color='k', labelpad=fontsize)
ax.set_ylabel(self.stage_labels[index], fontsize=fontsize, color='k', labelpad=fontsize)
if index + 1 == len(self.stage_labels):
ax.set_xlabel('Time (days)', fontsize=fontsize, color='k', labelpad=fontsize)
# ax.legend(ncol=1, loc='lower right', fontsize=fontsize-1, edgecolor='k', framealpha=1.0, handlelength=0.0)
dy = np.max(y)
dy = np.maximum(dy, 1.00)
ax.set_ylim([-dy/5, np.max(y)+dy/5])
fig.tight_layout()
return fig
def plot_power_balance(self):
""" Plot hydrogen storage level over time """
fig = Figure(figsize=(6.0, 6.5))
fig.suptitle('Power balance (MW)', fontsize=fontsize + 1, fontweight='normal', color='k')
axes = fig.subplots(len(self.stage_labels))
for index, ax in enumerate(axes):
x = self.process_output["times"][index, :] / 24
y1 = self.process_output["GT_power"][index, :] / 1e6
y2 = self.process_output["WT_power"][index, :] / 1e6
y3 = self.process_output["FC_power"][index, :] / 1e6
y4 = self.process_output["EL_power"][index, :] / 1e6
y5 = y1 + y2 + y3 - y4
for t in ax.xaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
for t in ax.yaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
ax.stackplot(x, y1, y2, y3, labels=["GT", "WT", "FC"], colors=["orangered", "forestgreen", "black"])
ax.stackplot(x, -y4, labels=["EL"], colors=["royalblue"])
# ax.set_ylabel('Power (MW)', fontsize=fontsize, color='k', labelpad=fontsize)
ax.set_ylabel(self.stage_labels[index], fontsize=fontsize, color='k', labelpad=fontsize)
if index + 1 == len(self.stage_labels):
ax.set_xlabel('Time (days)', fontsize=fontsize, color='k', labelpad=fontsize)
ax.legend(ncol=1,bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=fontsize - 1, edgecolor='k', framealpha=1.0)
# dy = np.max(y)
# dy = np.maximum(dy, 1.00)
# ax.set_ylim([-dy / 5, np.max(y) + dy / 5])
ax.set_xlim([0, x[-1]])
fig.tight_layout()
return fig
def plot_hydrogen_balance(self):
""" Plot the hydrogen balance over time """
fig = Figure(figsize=(6.0, 6.5))
fig.suptitle('Hydrogen balance over a year (kg/s)', fontsize=fontsize+1, fontweight='normal', color='k')
axes = fig.subplots(len(self.stage_labels))
for index, ax in enumerate(axes):
x = self.process_output["times"][index, :] / 24
y1 = self.process_output["H2_cofired"][index, :]
y2 = self.process_output["H2_utilized"][index, :]
y3 = self.process_output["H2_produced"][index, :]
for t in ax.xaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
for t in ax.yaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
ax.stackplot(x, y3, labels=["EL"], colors=["royalblue"])
ax.stackplot(x, -y1, -y2, labels=["GT", "FC"], colors=["orangered", "black"])
# ax.set_ylabel('Mass flow (kg/s)', fontsize=fontsize, color='k', labelpad=fontsize)
ax.set_ylabel(self.stage_labels[index], fontsize=fontsize, color='k', labelpad=fontsize)
if index + 1 == len(self.stage_labels):
ax.set_xlabel('Time (days)', fontsize=fontsize, color='k', labelpad=fontsize)
ax.legend(ncol=1, title="Period " + str(index + 1), bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=fontsize - 1, edgecolor='k', framealpha=1.0)
dy = np.max([np.max(y1+y2), np.max(y3), 0.002])
ax.set_ylim([-1*dy, 1*dy])
fig.tight_layout()
return fig
def plot_flag(self):
""" Plot the operation flag over time """
n_axes = self.process_output["times"].shape[0]
fig = plt.figure(figsize=(6.0, 5.5))
fig.suptitle('Process flag over a year', fontsize=fontsize+1, fontweight='normal', color='k')
axes = fig.subplots(n_axes)
for index, ax in enumerate(axes):
x, y = self.process_output["times"][index, :] / 24, self.process_output["flag"][index, :]
for t in ax.xaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
for t in ax.yaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
ax.plot(x, y, linewidth=0.75, linestyle='', color='k', label="Period "+str(index+1), marker="o",
markerfacecolor="w", markeredgecolor="k", markersize=3.0, markeredgewidth=0.75)
ax.set_ylabel('Flag', fontsize=fontsize, color='k', labelpad=fontsize)
if index + 1 == n_axes:
ax.set_xlabel('Time (days)', fontsize=fontsize, color='k', labelpad=fontsize)
ax.legend(ncol=1, loc='lower right', fontsize=fontsize-1, edgecolor='k', framealpha=1.0, handlelength=0.0)
ax.set_ylim([-0.5, 6.5])
fig.tight_layout()
return fig
def plot_sensitivity_analysis(self, variable, low_value, high_value):
# Create a copy of the input dictionary
IN = copy.deepcopy(self.IN)
# Compute the performance for a range of input variables
values = np.linspace(low_value, high_value, 21)
CO2_emissions, energy_deficit = [], []
print("Starting", variable, "sensitivity analysis...")
for i, value in enumerate(values):
print_progress(i, len(values))
IN[variable] = value
EnergySystem = IntegratedModel(IN)
EnergySystem.evaluate_process_model()
CO2_emissions.append(EnergySystem.CO2_emissions_total)
energy_deficit.append(EnergySystem.energy_deficit_total)
# Convert list to Numpy arrays
CO2_emissions = np.asarray(CO2_emissions)
energy_deficit = np.asarray(energy_deficit)
# Plot the results
fig = plt.figure(figsize=(6, 4))
ax = fig.add_subplot(111)
ax.set_xlabel(variable, fontsize=fontsize, color='k', labelpad=fontsize)
ax.set_ylabel('CO$_2$ emissions (Mt)', fontsize=fontsize, color='k', labelpad=fontsize)
# ax.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.0f'))
# ax.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.0f'))
for t in ax.xaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
for t in ax.yaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
ax.plot(values, CO2_emissions/1e6, linewidth=0.75, linestyle='-', color='k',
marker=' ', markersize=4.5, markeredgewidth=1.25, markeredgecolor='k', markerfacecolor='w', label=None)
fig.tight_layout()
# Plot the results
fig = plt.figure(figsize=(6, 4))
ax = fig.add_subplot(111)
ax.set_xlabel(variable, fontsize=fontsize, color='k', labelpad=fontsize)
ax.set_ylabel('Energy deficit (MWh)', fontsize=fontsize, color='k', labelpad=fontsize)
# ax.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.0f'))
# ax.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.0f'))
for t in ax.xaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
for t in ax.yaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
ax.plot(values, energy_deficit, linewidth=0.75, linestyle='-', color='k',
marker=' ', markersize=4.5, markeredgewidth=1.25, markeredgecolor='k', markerfacecolor='w', label=None)
fig.tight_layout()
return fig
def plot_wt_power_available(self):
""" Plot available wind power over time """
fig = Figure(figsize=(6.0, 6.5))
fig.suptitle('Available wind power over a year (MW)', fontsize=fontsize+1, fontweight='normal', color='k')
axes = fig.subplots(len(self.stage_labels))
for index, ax in enumerate(axes):
x, y = self.process_output["times"][index, :] / 24, self.process_output["WT_energy_available"][index, :] /1e6
for t in ax.xaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
for t in ax.yaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
ax.plot([0.0], [0.0], linestyle="", marker="", label="Period " + str(index + 1))
ax.plot(x[::70], y[::70], linewidth=0.75, linestyle='-', color='k', label="", marker="")
# ax.set_ylabel('Power (MW)', fontsize=fontsize, color='k', labelpad=fontsize)
ax.set_ylabel(self.stage_labels[index], fontsize=fontsize, color='k', labelpad=fontsize)
if index + 1 == len(self.stage_labels):
ax.set_xlabel('Time (days)', fontsize=fontsize, color='k', labelpad=fontsize)
# ax.legend(ncol=1, loc='lower right', fontsize=fontsize-1, edgecolor='k', framealpha=1.0, handlelength=0.0)
dy = np.max(y)
dy = np.maximum(dy, 1.00)
ax.set_ylim([-dy/5, np.max(y)+dy/5])
fig.tight_layout()
return fig
| StarcoderdataPython |
3212390 | <filename>libs/approachAnalyser.py
import pandas as pd
from libs.utils import haversine, loadBook, getPerf, isaDiff, calcWindComponents
from configuration.units import runwayUnits
def findStop(flight):
return flight[flight.IAS > 20].index.max()
def isStable(approach, modelConfig, approachType, runwayTrack):
VRef = float(modelConfig.loc["stallSpeed", "Value"]) * 1.3
stableTable = pd.DataFrame(columns=["Actual", "Book", "Stability", "Units"])
stableTable.loc["Approach Max IAS"] = [
int(approach.IAS.max()),
int(VRef + 20),
approach.IAS.max() > VRef + 20,
"knots",
]
stableTable.loc["Approach Min IAS"] = [
int(approach.IAS.min()),
int(VRef),
approach.IAS.min() < VRef,
"knots",
]
stableTable.loc["Approach Max Sink Rate"] = [
int(approach.VSpd.min()),
-1000,
approach.VSpd.min() < -1000,
"fpm",
]
if approachType == "IFR":
stableTable.loc["Approach Loc deviation"] = [
round(approach.HCDI.abs().max(), 1),
1,
approach.HCDI.abs().max() > 1,
"-",
]
stableTable.loc["Approach Glide deviation"] = [
round(approach.VCDI.abs().max(), 1),
1,
approach.VCDI.abs().max() > 1,
"-",
]
else:
stableTable.loc["Approach Track"] = [
int((approach.TRK - runwayTrack).abs().max()),
10,
(approach.TRK - runwayTrack).abs().max() > 10,
"degrees",
]
stableTable["Stability"] = stableTable["Stability"].apply(
lambda x: "Unstable" if x else "Stable"
)
stableTable.loc["Approach Stability"] = [
"Stable" if (stableTable["Stability"] == "Stable").all() else "Unstable",
"True",
"-",
"-",
]
return stableTable
def calcLandingWeight(flight, modelConfig, takeoffWeight, threshold):
startFuel = flight["FQtyL"].max() + flight["FQtyR"].max()
landingFuel = (
flight.loc[threshold - 10 : threshold + 10, "FQtyL"].mean()
+ flight.loc[threshold - 10 : threshold + 10, "FQtyR"].mean()
)
fuelWeightPerUSG = float(modelConfig.loc["fuelWeightPerUSG", "Value"])
return takeoffWeight - (startFuel - landingFuel) * fuelWeightPerUSG
def approachPerformance(flight, model, modelConfig, approachType, takeoffWeight):
stop = findStop(flight)
landingAltitude = flight.loc[stop, "AltB"]
runwayTrack = flight.loc[stop - 10 : stop + 10, "TRK"].mean()
threshold = flight[flight.AltB > (landingAltitude + 50)].index.max()
thresholdIAS = flight.loc[threshold, "IAS"]
if approachType == "IFR":
gateHeight = 1000
else:
gateHeight = 500
gate = flight[
flight.AltB > (landingAltitude + gateHeight)
].index.max() # stabilised approach gate
stableTable = isStable(
flight.loc[gate:threshold], modelConfig, approachType, runwayTrack
)
thresholdIASBook = float(modelConfig.loc["thresholdIAS", "Value"])
approachTable = pd.DataFrame(columns=["Actual", "Book", "Variance", "Units"])
approachTable.loc["Approach IAS over threshold"] = [
int(thresholdIAS),
int(thresholdIASBook),
round(100 * (thresholdIAS / thresholdIASBook - 1)),
"knots",
]
landingDistance = haversine(
flight.loc[threshold, "Longitude"],
flight.loc[threshold, "Latitude"],
flight.loc[stop, "Longitude"],
flight.loc[stop, "Latitude"],
runwayUnits,
)
landingDistanceBook = loadBook("landing", model)
tempVISA = isaDiff(flight.loc[threshold, "OAT"], flight.loc[threshold, "AltPress"])
windSpeed = flight.loc[threshold, "WndSpd"]
windDirection = flight.loc[threshold, "WndDr"]
headwind, crosswind = calcWindComponents(windSpeed, windDirection, runwayTrack)
landingWeight = round(
calcLandingWeight(flight, modelConfig, takeoffWeight, threshold)
)
landingWeightBook = float(modelConfig.loc["maxLandingWeight", "Value"])
bookLandingDistance = getPerf(
landingDistanceBook,
[tempVISA, flight.loc[threshold, "AltPress"], landingWeight, headwind],
runwayUnits,
)
approachTable.loc["Approach Landing Distance"] = [
int(landingDistance),
int(bookLandingDistance),
round(100 * (landingDistance / bookLandingDistance - 1)),
runwayUnits,
]
approachTable.loc["Approach Headwind"] = [int(headwind), "-", "-", "knots"]
bookCrosswind = float(modelConfig.loc["maxCrosswind", "Value"])
approachTable.loc["Approach Crosswind"] = [
int(abs(crosswind)),
int(bookCrosswind),
round(100 * (abs(crosswind) / bookCrosswind - 1)),
"knots",
]
approachTable.loc["Approach Landing Weight"] = [
int(landingWeight),
int(landingWeightBook),
round(100 * (landingWeight / landingWeightBook - 1)),
"lbs",
]
return approachTable, stableTable
| StarcoderdataPython |
3298768 | <reponame>hariom95/cosine_similarity_tfidf_nltk
import nltk
import string
# used for looping through folders/files
from os import listdir
from os.path import isfile, join
#Calc tfidf and cosine similarity
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
# All text entries to compare will appear here
BASE_INPUT_DIR = "./inputdata/"
def returnListOfFilePaths(folderPath):
fileInfo = []
listOfFileNames = [fileName for fileName in listdir(folderPath) if isfile(join(folderPath, fileName))]
listOfFilePaths = [join(folderPath, fileName) for fileName in listdir(folderPath) if isfile(join(folderPath, fileName))]
fileInfo.append(listOfFileNames)
fileInfo.append(listOfFilePaths)
return fileInfo
def create_docContentDict(filePaths):
rawContentDict = {}
for filePath in filePaths:
with open(filePath, "r") as ifile:
fileContent = ifile.read()
rawContentDict[filePath] = fileContent
return rawContentDict
def tokenizeContent(contentsRaw):
tokenized = nltk.tokenize.word_tokenize(contentsRaw)
return tokenized
def removeStopWordsFromTokenized(contentsTokenized):
stop_word_set = set(nltk.corpus.stopwords.words("english"))
filteredContents = [word for word in contentsTokenized if word not in stop_word_set]
return filteredContents
def performPorterStemmingOnContents(contentsTokenized):
porterStemmer = nltk.stem.PorterStemmer()
filteredContents = [porterStemmer.stem(word) for word in contentsTokenized]
return filteredContents
def removePunctuationFromTokenized(contentsTokenized):
excludePuncuation = set(string.punctuation)
# manually add additional punctuation to remove
doubleSingleQuote = '\'\''
doubleDash = '--'
doubleTick = '``'
excludePuncuation.add(doubleSingleQuote)
excludePuncuation.add(doubleDash)
excludePuncuation.add(doubleTick)
filteredContents = [word for word in contentsTokenized if word not in excludePuncuation]
return filteredContents
def convertItemsToLower(contentsRaw):
filteredContents = [term.lower() for term in contentsRaw]
return filteredContents
# process data without writing inspection file information to file
def processData(rawContents):
cleaned = tokenizeContent(rawContents)
cleaned = removeStopWordsFromTokenized(cleaned)
cleaned = performPorterStemmingOnContents(cleaned)
cleaned = removePunctuationFromTokenized(cleaned)
cleaned = convertItemsToLower(cleaned)
return cleaned
# print TFIDF values in 'table' format
def print_TFIDF_for_all(term, values, fileNames):
values = values.transpose() # files along 'x-axis', terms along 'y-axis'
numValues = len(values[0])
print(' ', end="") #bank space for formatting output
for n in range(len(fileNames)):
print('{0:18}'.format(fileNames[n]), end="") #file names
print()
for i in range(len(term)):
print('{0:8}'.format(term[i]), end='\t| ') #the term
for j in range(numValues):
print('{0:.12f}'.format(values[i][j]), end=' ') #the value, corresponding to the file name, for the term
print()
# write TFIDF values in 'table' format
def write_TFIDF_for_all(term, values, fileNames):
filePath = "../results/tfid.txt"
outFile = open(filePath, 'a')
title = "TFIDF\n"
outFile.write(title)
values = values.transpose() # files along 'x-axis', terms along 'y-axis'
numValues = len(values[0])
outFile.write(' \t') #bank space for formatting output
for n in range(len(fileNames)):
outFile.write('{0:18}'.format(fileNames[n])) #file names
outFile.write("\n")
for i in range(len(term)):
outFile.write('{0:15}'.format(term[i])) #the term
outFile.write('\t| ')
for j in range(numValues):
outFile.write('{0:.12f}'.format(values[i][j])) #the value, corresponding to the file name, for the term
outFile.write(' ')
outFile.write("\n")
outFile.close()
# TODO: modify this to build matrix then print from matrix form
def calc_and_print_CosineSimilarity_for_all(tfs, fileNames):
#print(cosine_similarity(tfs[0], tfs[1]))
print("\n\n\n========COSINE SIMILARITY====================================================================\n")
numFiles = len(fileNames)
names = []
print(' ', end="") #formatting
for i in range(numFiles):
if i == 0:
for k in range(numFiles):
print(fileNames[k], end=' ')
print()
print(fileNames[i], end=' ')
for n in range(numFiles):
#print(fileNames[n], end='\t')
matrixValue = cosine_similarity(tfs[i], tfs[n])
numValue = matrixValue[0][0]
#print(numValue, end='\t')
names.append(fileNames[n])
print(" {0:.8f}".format(numValue), end=' ')
#(cosine_similarity(tfs[i], tfs[n]))[0][0]
print()
print("\n\n=============================================================================================\n")
def calc_and_write_CosineSimilarity_for_all(tfs, fileNames):
filePath = "../results/cosine_similarity.txt"
outFile = open(filePath, 'a')
title = "COSINE SIMILARITY\n"
outFile.write(title)
numFiles = len(fileNames)
names = []
outFile.write(' ')
for i in range(numFiles):
if i == 0:
for k in range(numFiles):
outFile.write(fileNames[k])
outFile.write(' ')
outFile.write("\n")
outFile.write(fileNames[i])
outFile.write(' ')
for n in range(numFiles):
matrixValue = cosine_similarity(tfs[i], tfs[n])
numValue = matrixValue[0][0]
names.append(fileNames[n])
outFile.write('{0:.8f}'.format(numValue))
outFile.write(' ')
#(cosine_similarity(tfs[i], tfs[n]))[0][0]
outFile.write("\n")
outFile.close()
def main(printResults=True):
baseFolderPath = "./inputdata/"
fileNames, filePathList = returnListOfFilePaths(baseFolderPath)
rawContentDict = create_docContentDict(filePathList)
# calculate tfidf
tfidf = TfidfVectorizer(tokenizer=processData, stop_words='english')
tfs = tfidf.fit_transform(rawContentDict.values())
tfs_Values = tfs.toarray()
tfs_Term = tfidf.get_feature_names()
if printResults:
# print results
print_TFIDF_for_all(tfs_Term, tfs_Values, fileNames)
calc_and_print_CosineSimilarity_for_all(tfs, fileNames)
else:
# write results to file
write_TFIDF_for_all(tfs_Term, tfs_Values, fileNames)
calc_and_write_CosineSimilarity_for_all(tfs, fileNames)
if __name__ == "__main__":
main() | StarcoderdataPython |
191009 | # Generated by Django 3.1.3 on 2021-02-09 11:15
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('netbox_bgp', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='community',
options={'verbose_name_plural': 'Communities'},
),
migrations.AlterField(
model_name='asn',
name='number',
field=models.PositiveIntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(4294967294)]),
),
migrations.AlterField(
model_name='community',
name='number',
field=models.PositiveIntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(4294967294)]),
),
]
| StarcoderdataPython |
3322037 | from django.contrib import admin
from .models import Portfolio, Contact, ProjectTech
class PortfolioAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ["title",]}
admin.site.register(Portfolio, PortfolioAdmin)
admin.site.register(ProjectTech)
admin.site.register(Contact)
| StarcoderdataPython |
179735 | <reponame>dxwayne/SAS_3DSpectrograps
__init__ = ["SpectrographUI_savejsondict","SpectrographUI_loadjsondict"]
def SpectrographUI_savejsondict(self,jdict):
'''Autogenerated code from ui's make/awk trick.'''
jdict['compLampSwitch'] = self.compLampSwitch.isChecked()
jdict['flatLampSwitch'] = self.flatLampSwitch.isChecked()
jdict['heater1Switch'] = self.heater1Switch.isChecked()
jdict['heater2Switch'] = self.heater2Switch.isChecked()
jdict['slitLampSwitch'] = self.slitLampSwitch.isChecked()
jdict['DecPosition'] = self.DecPosition.text()
jdict['PAPosition'] = self.PAPosition.text()
jdict['RAPosition'] = self.RAPosition.text()
jdict['airmassValue'] = self.airmassValue.text()
jdict['focusAbsolute'] = self.focusAbsolute.text()
jdict['focusOffset'] = self.focusOffset.text()
jdict['gratingAbsolute'] = self.gratingAbsolute.text()
jdict['gratingOffset'] = self.gratingOffset.text()
jdict['heater1Delta'] = self.heater1Delta.text()
jdict['heater1SetPoint'] = self.heater1SetPoint.text()
jdict['heater2Delta'] = self.heater2Delta.text()
jdict['heater2SetPoint'] = self.heater2SetPoint.text()
jdict['rotatorAbsolute'] = self.rotatorAbsolute.text()
jdict['rotatorOffset'] = self.rotatorOffset.text()
jdict['rotatorPosition'] = self.rotatorPosition.text()
jdict['temp1Value'] = self.temp1Value.text()
jdict['temp2Value'] = self.temp2Value.text()
jdict['temp3Value'] = self.temp3Value.text()
jdict['temp4Value'] = self.temp4Value.text()
jdict['temp5Value'] = self.temp5Value.text()
jdict['temp6Value'] = self.temp6Value.text()
jdict['temp7Value'] = self.temp7Value.text()
jdict['spectroLog'] = self.spectroLog.toPlainText()
jdict['horizontalSlider'] = self.horizontalSlider.TickPosition()
jdict['focusSteps'] = self.focusSteps.value()
jdict['gratingSteps'] = self.gratingSteps.value()
jdict['rotatorAngle'] = self.rotatorAngle.value()
jdict['rotatorSteps'] = self.rotatorSteps.value()
# def SpectrographUI_savejsondict
def SpectrographUI_loadjsondict(self,jdict):
'''Autogenerated code from ui's make/awk trick.'''
self.compLampSwitch.setChecked(jdict['compLampSwitch'])
self.flatLampSwitch.setChecked(jdict['flatLampSwitch'])
self.heater1Switch.setChecked(jdict['heater1Switch'])
self.heater2Switch.setChecked(jdict['heater2Switch'])
self.slitLampSwitch.setChecked(jdict['slitLampSwitch'])
self.DecPosition.setText(jdict['DecPosition'])
self.PAPosition.setText(jdict['PAPosition'])
self.RAPosition.setText(jdict['RAPosition'])
self.airmassValue.setText(jdict['airmassValue'])
self.focusAbsolute.setText(jdict['focusAbsolute'])
self.focusOffset.setText(jdict['focusOffset'])
self.gratingAbsolute.setText(jdict['gratingAbsolute'])
self.gratingOffset.setText(jdict['gratingOffset'])
self.heater1Delta.setText(jdict['heater1Delta'])
self.heater1SetPoint.setText(jdict['heater1SetPoint'])
self.heater2Delta.setText(jdict['heater2Delta'])
self.heater2SetPoint.setText(jdict['heater2SetPoint'])
self.rotatorAbsolute.setText(jdict['rotatorAbsolute'])
self.rotatorOffset.setText(jdict['rotatorOffset'])
self.rotatorPosition.setText(jdict['rotatorPosition'])
self.temp1Value.setText(jdict['temp1Value'])
self.temp2Value.setText(jdict['temp2Value'])
self.temp3Value.setText(jdict['temp3Value'])
self.temp4Value.setText(jdict['temp4Value'])
self.temp5Value.setText(jdict['temp5Value'])
self.temp6Value.setText(jdict['temp6Value'])
self.temp7Value.setText(jdict['temp7Value'])
self.spectroLog.insertPlainText(jdict['spectroLog'])
self.horizontalSlider.setValue(jdict['horizontalSlider'])
self.focusSteps.setValue(jdict['focusSteps'])
self.gratingSteps.setValue(jdict['gratingSteps'])
self.rotatorAngle.setValue(jdict['rotatorAngle'])
self.rotatorSteps.setValue(jdict['rotatorSteps'])
# def SpectrographUI_loadjsondict
| StarcoderdataPython |
89191 | import logging
import pytest
from pyspark.sql import SparkSession
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
def quiet_py4j():
"""Suppress spark logging for the test context."""
logger = logging.getLogger('py4j')
logger.setLevel(logging.WARN)
@pytest.fixture(scope="session")
def spark_session(request):
"""Fixture for creating a spark context."""
spark = (SparkSession
.builder
.master('local[2]')
.config('spark.jars.packages', 'com.databricks:spark-avro_2.11:3.0.1')
.appName('pytest-pyspark-local-testing')
.enableHiveSupport()
.getOrCreate())
request.addfinalizer(lambda: spark.stop())
quiet_py4j()
return spark
@pytest.fixture(scope="session")
def spark_context(request):
""" fixture for creating a spark context
Args:
request: pytest.FixtureRequest object
"""
conf = (SparkConf().setMaster("local[2]").setAppName("pytest-pyspark-local-testing"))
sc = SparkContext(conf=conf)
request.addfinalizer(lambda: sc.stop())
quiet_py4j()
return sc
| StarcoderdataPython |
3244814 | <filename>clubsuite/suite/tests/test_mdl_user.py<gh_stars>10-100
from django.test import TestCase
from suite.models import UserManager
from django.contrib.auth import get_user_model
from django.contrib.auth.hashers import check_password
from suite.models import Club
class UserTestCase(TestCase):
def setUp(self):
#create user
self.user = get_user_model().objects.create(first_name="First",last_name="Last",email="<EMAIL>")
self.user.set_password("<PASSWORD>")
self.user.save()
#create club
self.club=Club.objects.create(club_name="club1",club_type="PUB",club_description="a club")
self.club._create_permissions()
self.owner = get_user_model().objects.create(first_name="Officer",last_name="Last",email="<EMAIL>")
self.owner.set_password("<PASSWORD>")
self.owner.save()
self.club._set_owner(self.owner)
def test_user(self):
self.assertEqual(self.user.email,"<EMAIL>")
self.assertEqual(self.user.get_short_name(),"First")
self.assertEqual(self.user.get_full_name(),"First Last")
self.assertEqual(self.user.last_name,"Last")
self.assertTrue(check_password("<PASSWORD>",self.user.password))
def test_get_clubs(self):
#create club2
club2=Club.objects.create(club_name="club2",club_type="PUB",club_description="a club")
club2._create_permissions()
club2._set_owner(self.owner)
#add user to both clubs
self.club.add_member(self.owner,self.user)
club2.add_member(self.owner,self.user)
#test get clubs
club_list=self.user.get_clubs()
self.assertEqual(club_list[0],self.club)
self.assertEqual(club_list[1],club2)
def test_get_clubs(self):
#create club
club2=Club.objects.create(club_name="club2",club_type="PUB",club_description="something")
club2._create_permissions()
club2._set_owner(self.owner)
self.assertEqual(len(self.owner.get_clubs()),2)
| StarcoderdataPython |
35590 | <filename>src/py_call.py<gh_stars>0
#/bin/python3
import os
import subprocess
# Const:
with open("config",'r') as conf:
VENV_A = conf.read()
PYTHON="python"
PYTHON3_VENV_A = os.path.join(VENV_A, "bin", "python3")
PIP=""
PIP_VENV_A= os.path.join(VENV_A, "bin", "pip3")
# Functions:
def python_call(argv):
subprocess.call([PYTHON, argv])
def python_vcall(argv):
subprocess.check_output([PYTHON3_VENV_A, argv])
def pip_vinstall(argv):
subprocess.check_output([PIP_VENV_A, argv])
| StarcoderdataPython |
1768483 | <gh_stars>0
from typing import Any, Callable, List, Mapping, Optional
from snorkel.preprocess import BasePreprocessor
from snorkel.types import DataPoint
class LabelingFunction:
"""Base class for labeling functions.
A labeling function (LF) is a function that takes a data point
as input and produces an integer label, corresponding to a
class. A labeling function can also abstain from voting by
outputting ``-1``. For examples, see the Snorkel tutorials.
This class wraps a Python function outputting a label. Extra
functionality, such as running preprocessors and storing
resources, is provided. Simple LFs can be defined via a
decorator. See ``labeling_function``.
Parameters
----------
name
Name of the LF
f
Function that implements the core LF logic
resources
Labeling resources passed in to ``f`` via ``kwargs``
pre
Preprocessors to run on data points before LF execution
fault_tolerant
Output ``-1`` if LF execution fails?
Raises
------
ValueError
Calling incorrectly defined preprocessors
Attributes
----------
name
See above
fault_tolerant
See above
"""
def __init__(
self,
name: str,
f: Callable[..., int],
resources: Optional[Mapping[str, Any]] = None,
pre: Optional[List[BasePreprocessor]] = None,
fault_tolerant: bool = False,
) -> None:
self.name = name
self.fault_tolerant = fault_tolerant
self._f = f
self._resources = resources or {}
self._pre = pre or []
def _preprocess_data_point(self, x: DataPoint) -> DataPoint:
for preprocessor in self._pre:
x = preprocessor(x)
if x is None:
raise ValueError("Preprocessor should not return None")
return x
def __call__(self, x: DataPoint) -> int:
"""Label data point.
Runs all preprocessors, then passes to LF. If an exception
is encountered and the LF is in fault tolerant mode,
the LF abstains from voting.
Parameters
----------
x
Data point to label
Returns
-------
int
Label for data point
"""
x = self._preprocess_data_point(x)
if self.fault_tolerant:
try:
return self._f(x, **self._resources)
except Exception:
return -1
return self._f(x, **self._resources)
def __repr__(self) -> str:
preprocessor_str = f", Preprocessors: {self._pre}"
return f"{type(self).__name__} {self.name}{preprocessor_str}"
class labeling_function:
"""Decorator to define a LabelingFunction object from a function.
Parameters
----------
name
Name of the LF
resources
Labeling resources passed in to ``f`` via ``kwargs``
preprocessors
Preprocessors to run on data points before LF execution
fault_tolerant
Output ``-1`` if LF execution fails?
Examples
--------
>>> @labeling_function()
... def f(x):
... return 0 if x.a > 42 else -1
>>> f
LabelingFunction f, Preprocessors: []
>>> from types import SimpleNamespace
>>> x = SimpleNamespace(a=90, b=12)
>>> f(x)
0
>>> @labeling_function(name="my_lf")
... def g(x):
... return 0 if x.a > 42 else -1
>>> g
LabelingFunction my_lf, Preprocessors: []
"""
def __init__(
self,
name: Optional[str] = None,
resources: Optional[Mapping[str, Any]] = None,
pre: Optional[List[BasePreprocessor]] = None,
fault_tolerant: bool = False,
) -> None:
if callable(name):
raise ValueError("Looks like this decorator is missing parentheses!")
self.name = name
self.resources = resources
self.pre = pre
self.fault_tolerant = fault_tolerant
def __call__(self, f: Callable[..., int]) -> LabelingFunction:
"""Wrap a function to create a ``LabelingFunction``.
Parameters
----------
f
Function that implements the core LF logic
Returns
-------
LabelingFunction
New ``LabelingFunction`` executing logic in wrapped function
"""
name = self.name or f.__name__
return LabelingFunction(
name=name,
f=f,
resources=self.resources,
pre=self.pre,
fault_tolerant=self.fault_tolerant,
)
| StarcoderdataPython |
4800324 | <reponame>hesslink111/neuralnetworknibbles<filename>util/xorshift.py
class XorShift:
"""Class for generating pseudo-random numbers based on a seed. This allows us to generate predictable sequences
so we can play the same games. Based on xorshift+ from https://en.wikipedia.org/wiki/Xorshift"""
def __init__(self):
self.state = [1921003817, 838877476]
def random(self):
x = self.state[0]
y = self.state[1]
self.state[0] = y
x ^= (x << 23) & 0x7FFFFFFF
self.state[1] = x ^ y ^ (x >> 17) ^ (y >> 26)
return self.state[1] + y & 0x7FFFFFFF
def randrange(self, start, stop):
return (self.random() % (stop - start)) + start
| StarcoderdataPython |
147458 | <filename>output/models/nist_data/atomic/nmtoken/schema_instance/nistschema_sv_iv_atomic_nmtoken_enumeration_4_xsd/__init__.py
from output.models.nist_data.atomic.nmtoken.schema_instance.nistschema_sv_iv_atomic_nmtoken_enumeration_4_xsd.nistschema_sv_iv_atomic_nmtoken_enumeration_4 import (
NistschemaSvIvAtomicNmtokenEnumeration4,
NistschemaSvIvAtomicNmtokenEnumeration4Type,
)
__all__ = [
"NistschemaSvIvAtomicNmtokenEnumeration4",
"NistschemaSvIvAtomicNmtokenEnumeration4Type",
]
| StarcoderdataPython |
4819986 | <reponame>katajakasa/aetherguild4
from django.apps import AppConfig
from django.db.models.signals import post_save
class MainsiteConfig(AppConfig):
name = 'aether.main_site'
def ready(self):
from .signals import postprocess_newsitem
from .models import NewsItem
post_save.connect(postprocess_newsitem, sender=NewsItem)
| StarcoderdataPython |
1724027 | from covid.utils.viz import plotly_wordcloud
| StarcoderdataPython |
3349112 | <filename>Calculator/Multiplication.py<gh_stars>0
def multiplication(a,b):
a = float(a)
b = float(b)
return a*b | StarcoderdataPython |
3246752 | <filename>activitysim/abm/test/test_misc/setup_utils.py
# ActivitySim
# See full license in LICENSE.txt.
import os
import logging
import pkg_resources
import openmatrix as omx
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as pdt
import pytest
import yaml
from activitysim.core import random
from activitysim.core import tracing
from activitysim.core import pipeline
from activitysim.core import inject
from activitysim.core import config
# set the max households for all tests (this is to limit memory use on travis)
HOUSEHOLDS_SAMPLE_SIZE = 50
HOUSEHOLDS_SAMPLE_RATE = 0.01 # HOUSEHOLDS_SAMPLE_RATE / 5000 households
# household with mandatory, non mandatory, atwork_subtours, and joint tours
HH_ID = 257341
# [ 257341 1234246 1402915 1511245 1931827 1931908 2307195 2366390 2408855
# 2518594 2549865 982981 1594365 1057690 1234121 2098971]
# SKIP_FULL_RUN = True
SKIP_FULL_RUN = False
def example_path(dirname):
resource = os.path.join('examples', 'example_mtc', dirname)
return pkg_resources.resource_filename('activitysim', resource)
def setup_dirs(ancillary_configs_dir=None, data_dir=None):
# ancillary_configs_dir is used by run_mp to test multiprocess
test_pipeline_configs_dir = os.path.join(os.path.dirname(__file__), 'configs')
example_configs_dir = example_path('configs')
configs_dir = [test_pipeline_configs_dir, example_configs_dir]
if ancillary_configs_dir is not None:
configs_dir = [ancillary_configs_dir] + configs_dir
inject.add_injectable('configs_dir', configs_dir)
output_dir = os.path.join(os.path.dirname(__file__), 'output')
inject.add_injectable('output_dir', output_dir)
if not data_dir:
data_dir = example_path('data')
inject.add_injectable('data_dir', data_dir)
inject.clear_cache()
tracing.config_logger()
tracing.delete_output_files('csv')
tracing.delete_output_files('txt')
tracing.delete_output_files('yaml')
tracing.delete_output_files('omx')
def teardown_function(func):
inject.clear_cache()
inject.reinject_decorated_tables()
def close_handlers():
loggers = logging.Logger.manager.loggerDict
for name in loggers:
logger = logging.getLogger(name)
logger.handlers = []
logger.propagate = True
logger.setLevel(logging.NOTSET)
def inject_settings(**kwargs):
settings = config.read_settings_file('settings.yaml', mandatory=True)
for k in kwargs:
settings[k] = kwargs[k]
inject.add_injectable("settings", settings)
return settings
| StarcoderdataPython |
1688989 | <reponame>Chikiyaflix/server<filename>src/routes/streammap.py<gh_stars>0
import os
import urllib
import flask
import requests
import src.functions.config
streammapBP = flask.Blueprint("streammap", __name__)
@streammapBP.route("/api/v1/streammap")
async def streammapFunction():
a = flask.request.args.get("a") # AUTH
id = flask.request.args.get("id") # ID
name = flask.request.args.get("name") # NAME
server = flask.request.args.get("server") # SERVER
config = src.functions.config.readConfig()
if config.get("kill_switch") == True:
return flask.jsonify(
{
"code": 200,
"content": [{"name": "UNAVAILABLE", "url": "", "type": "normal"}],
"message": "Stream list generated successfully.",
"success": True,
}
)
if (
any(a == account["auth"] for account in config["account_list"])
or config.get("auth") == False
):
stream_list = [
{
"name": "Original",
"url": "%s/api/v1/redirectdownload/%s?a=%s&id=%s"
% (server, urllib.parse.quote(name), a, id),
"type": "normal",
}
]
if config.get("transcoded") == True:
req = requests.get(
"https://drive.google.com/get_video_info?docid=%s" % (id),
headers={"Authorization": "Bearer %s" % (config.get("access_token"))},
)
parsed = urllib.parse.parse_qs(urllib.parse.unquote(req.text))
if parsed.get("status") == ["ok"]:
for fmt in parsed["fmt_list"][0].split(","):
fmt_data = fmt.split("/")
stream_list.append(
{
"name": fmt_data[1],
"url": "%s/api/v1/redirectdownload/%s?a=%s&id=%s&itag=%s"
% (server, urllib.parse.quote(name), a, id, fmt_data[0]),
"type": "auto",
}
)
subtitle = {"url": ""}
if config.get("subtitles") == True:
config, drive = src.functions.credentials.refreshCredentials(
src.functions.config.readConfig()
)
params = {
"supportsAllDrives": True,
"fields": "parents",
"fileId": id,
}
parent = drive.files().get(**params).execute()["parents"][0]
params = {
"pageToken": None,
"supportsAllDrives": True,
"includeItemsFromAllDrives": True,
"fields": "files(id,name,mimeType,parents), incompleteSearch, nextPageToken",
"q": "'%s' in parents and trashed = false and (name contains '.srt' or name contains '.vtt')"
% (parent),
"orderBy": "name",
}
while True:
response = drive.files().list(**params).execute()
for file in response["files"]:
name_split = os.path.splitext(name)[0]
if name_split in file["name"]:
subtitle = {
"url": "%s/api/v1/subtitledownload/%s?a=%s&id=%s"
% (server, file["name"], a, file["id"])
}
try:
params["pageToken"] = response["nextPageToken"]
except KeyError:
break
if (
config.get("prefer_mkv") == False
and config.get("prefer_mp4") == False
and len(stream_list) > 1
):
default_quality = 1
elif config.get("prefer_mp4", True) == True and name.endswith(".mp4"):
default_quality = 0
elif config.get("prefer_mkv", False) == True and name.endswith(".mkv"):
default_quality = 0
elif len(stream_list) > 1:
default_quality = 1
else:
default_quality = 0
return flask.jsonify(
{
"code": 200,
"content": {
"default_quality": default_quality,
"sources": stream_list,
"subtitle": subtitle,
},
"message": "Stream list generated successfully!",
"success": True,
}
)
else:
return (
flask.jsonify(
{
"code": 401,
"content": None,
"message": "Your credentials are invalid.",
"success": False,
}
),
401,
)
| StarcoderdataPython |
1604026 | from __future__ import annotations
__all__ = ['lock_seed', 'trace', 'trace_module', 'whereami']
import gc
import inspect
import os
import random
import types
from collections.abc import Iterator
from contextlib import suppress
from itertools import islice
from types import FrameType
import numpy as np
import wrapt
from ._import_hook import register_post_import_hook
def _get_module(frame: FrameType) -> str:
if (module := inspect.getmodule(frame)) and module.__spec__:
return module.__spec__.name
return '__main__'
def _get_function(frame: FrameType) -> str:
function = frame.f_code.co_name
function = next(
(f.__qualname__
for f in gc.get_referrers(frame.f_code) if inspect.isfunction(f)),
function)
return '' if function == '<module>' else function
def _stack(frame: FrameType | None) -> Iterator[str]:
while frame:
yield f'{_get_module(frame)}:{_get_function(frame)}:{frame.f_lineno}'
if frame.f_code.co_name == '<module>': # Stop on module-level scope
return
frame = frame.f_back
def stack(skip: int = 0, limit: int | None = None) -> Iterator[str]:
"""Returns iterator of FrameInfos, stopping on module-level scope"""
frame = inspect.currentframe()
calls = _stack(frame)
calls = islice(calls, skip + 1, None) # Skip 'skip' outerless frames
if not limit:
return calls
return islice(calls, limit) # Keep at most `limit` outer frames
def whereami(skip: int = 0, limit: int | None = None) -> str:
calls = stack(skip + 1, limit)
return ' -> '.join(reversed([*calls]))
@wrapt.decorator
def trace(fn, _, args, kwargs):
print(
f'<({whereami(3)})> : {fn.__module__ or ""}.{fn.__qualname__}',
flush=True)
return fn(*args, **kwargs)
def _set_trace(obj, seen=None, prefix=None, module=None):
# TODO: rewrite using unittest.mock
if isinstance(obj, types.ModuleType):
if seen is None:
seen = set()
prefix = obj.__name__
if not obj.__name__.startswith(prefix) or obj.__name__ in seen:
return
seen.add(obj.__name__)
for name in dir(obj):
_set_trace(
getattr(obj, name), module=obj, seen=seen, prefix=prefix)
if not callable(obj):
return
if not hasattr(obj, '__dict__'):
setattr(module, obj.__qualname__, trace(obj))
print(f'wraps "{module.__name__}:{obj.__qualname__}"')
return
for name in obj.__dict__:
with suppress(AttributeError, TypeError):
member = getattr(obj, name)
if not callable(member):
continue
decorated = trace(member)
for m in (decorated, member, obj):
with suppress(AttributeError):
decorated.__module__ = m.__module__
break
else:
decorated.__module__ = getattr(module, '__name__', '')
setattr(obj, name, decorated)
print(f'wraps "{module.__name__}:{obj.__qualname__}.{name}"')
def trace_module(name):
"""Enables call logging for each callable inside module name"""
register_post_import_hook(_set_trace, name)
# ---------------------------------------------------------------------------
def lock_seed(seed: int) -> None:
"""Set seed for all modules: random/numpy/torch"""
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
def _torch_seed(torch):
import torch
import torch.backends.cudnn
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
register_post_import_hook(_torch_seed, 'torch')
| StarcoderdataPython |
53638 | <filename>detect_mcdonald_logo.py
#!/usr/bin/python
import sys, getopt
import copy
import matplotlib.pyplot as plt
import cv2 as cv
import modules.data_backend_operations as db
import modules.preprocessing as pr
import modules.segmentation as seg
import modules.classification as cls
def detect_logo(base_img, show_step_pictures=False, save=False):
classifier_data = db.import_vector_for_cls()
if classifier_data is not None:
img_ = db.resize_picture(base_img)
img_rgb = pr.convert_BGR2RGB(img_)
if show_step_pictures:
db.print_img(img_rgb, title="Image after resizing")
img = pr.convert_BGR2HSV(img_)
img_tresholded = pr.get_treshold(img, 22, 8)
if show_step_pictures:
db.print_img(img_tresholded, title="Image after tresholding", gray_scale_flag=True)
img_closed = pr.make_binary_operations(img_tresholded)
if show_step_pictures:
db.print_img(img_closed, title="Image after closing", gray_scale_flag=True)
img_segmented, seg_no, segments = seg.get_segments(img_closed)
img__ = copy.deepcopy(img_rgb)
save_flag = False
for segment in segments:
point_min, point_max = seg.determine_extreme_points_seg(segment["cordinates"])
img__ = db.create_bbox(img__, point_min, point_max, color=(255, 0, 0), thickness=2)
segm_bbox = seg.crop_segment(img_segmented, segment["cordinates"])
segm_bbox_border, perimeter, _ = seg.get_Moore_Neighborhood_countour(segm_bbox, segment["key"])
img_segmented[point_min[1]:point_max[1], point_min[0]:point_max[0]] = segm_bbox
features = cls.calculate_invariants(segment)
area = cls.calculate_area(segment)
features[0] = cls.calculate_Malinowska_ratio(area, perimeter)
is_M_logo = cls.check_segment(features, classifier_data["feature value"], classifier_data["standard deviation"])
if is_M_logo is True:
save_flag = True
img_rgb = db.create_bbox(img_rgb, point_min, point_max, color=(0, 255, 0), thickness=2)
print(f"possible segments found: {seg_no}")
if show_step_pictures:
db.print_img(img__, "Annotaded bboxes for all segments")
db.print_img(img_rgb, "Founded logos")
if save:
if save_flag:
plt.imsave("save\detection_result.png", img_rgb)
return 0
else:
print("No detected logos. Save skipped")
return -1
else:
print("No data for clasification uploaded")
return -1
def main(argv):
argumentList = sys.argv[1:]
options = "hi:n:ds"
long_options = ["Help", "Image", "Dataset_number", ]
display_steps = False
img = None
n = -1
img_loaded = False
save_flag = False
try:
arguments, values = getopt.getopt(argumentList, options, long_options)
for currentArgument, currentValue in arguments:
if currentArgument in ("-h", "--Help"):
print ("\
-i <inputimage> \n\
-n <number_from_dataset> - make detection from exaples \n\
-d <bool> - display steps during detection \n\
-h -display help \n\
-s -save detection result to .png\n")
sys.exit()
elif currentArgument in ("-i", "--Image"):
print ("Loading image: ", currentValue)
img = cv.imread(currentValue)
img_loaded = True
elif currentArgument in ("-n", "--Dataset_number"):
print ("Loading image from dataset number: ", currentValue)
n = int(currentValue)
if n > 15 and n < -1:
print("wrong number")
sys.exit()
elif currentArgument in ("-d", "--Display"):
display_steps = True
elif currentArgument in ("-s", "--Save"):
save_flag = True
if img_loaded:
if detect_logo(img, show_step_pictures=display_steps, save=save_flag) == 0:
input("Processing finished. Press Enter to continue...")
else:
input("No config file. Press Enter to continue...")
sys.exit()
img = db.get_img_from_dataset(n)
if img is not None:
if detect_logo(img, show_step_pictures=display_steps, save=save_flag) == 0:
input("Processing finished. Press Enter to continue...")
else:
input("No config file. Press Enter to continue...")
sys.exit()
else:
print("No image loaded")
input("Processing finished. Press Enter to continue...")
except getopt.error as err:
print (str(err))
input("Press Enter to continue...")
if __name__ == "__main__":
main(sys.argv[1:]) | StarcoderdataPython |
1651583 | import tweepy
import wget
import os
from PIL import Image
from pytesseract import *
from kafka import KafkaProducer
from kafka.errors import KafkaError
from collections import OrderedDict
import json
# twitter api auth
twitter_consumer_key = "pTHQmNMSbD6BJnZ1XtA8sJXY8"
twitter_consumer_secret = "<KEY>"
twitter_access_token = "<KEY>"
twitter_access_secret = "<KEY>"
auth = tweepy.OAuthHandler(twitter_consumer_key, twitter_consumer_secret)
auth.set_access_token(twitter_access_token, twitter_access_secret)
api = tweepy.API(auth)
# count=1
# kafka producer 선언
producer = KafkaProducer(bootstrap_servers=['localhost:9092'], max_request_size=5242880)
class MyStreamListener(tweepy.StreamListener):
def on_status(self, status):
# global count
# 트위터 text
text = status.text
# json 생성
file_data = OrderedDict()
file_data["text"] = str(text.encode('utf-8'))
file_data["OCRtext"]=""
if 'media' in status.entities:
for image in status.entities['media']:
# 트위터 이미지
# print(image['media_url'])
# f='photo'+str(count)+'.jpg'
f = 'photo.jpg'
wget.download(image['media_url'], out=f)
# OCR 텍스트 추출
img = Image.open(f)
OCRtext = pytesseract.image_to_string(img, lang='kor+eng')
print(text)
print(OCRtext)
print(str(text.encode('utf-8')))
file_data["OCRtext"]=OCRtext.encode('utf-8')
file_name = './' + f
if os.path.isfile(file_name):
os.remove(file_name)
# count+=1
# produce
future = producer.send('testTopic', json.dumps(file_data).encode('utf-8'))
try:
recorde_metadata = future.get(timeout=10)
except KafkaError:
pass
# print(status.text)
myStreamListener = MyStreamListener()
myStream = tweepy.Stream(auth = api.auth, listener=myStreamListener)
myStream.filter(track=['출장'])
# myStream.sample(languages=['ko']) | StarcoderdataPython |
132098 | def digitDifferenceSort(a):
def dg(n):
s = list(map(int, str(n)))
return max(s) - min(s)
ans = [(a[i], i) for i in range(len(a))]
A = sorted(ans, key = lambda x: (dg(x[0]), -x[1]))
return [c[0] for c in A]
| StarcoderdataPython |
94669 | # Copyright (c) 2013- The Spyder Development Team and Docrepr Contributors
#
# Distributed under the terms of the BSD BSD 3-Clause License
"""Simple tests of docrepr's output."""
# Standard library imports
import copy
import subprocess
import sys
import tempfile
from pathlib import Path
# Third party imports
import numpy as np
import pytest
from IPython.core.oinspect import Inspector, object_info
# Local imports
import docrepr
import docrepr.sphinxify
# ---- Test data
# A sample function to test
def get_random_ingredients(kind=None):
"""
Return a list of random ingredients as strings.
:param kind: Optional "kind" of ingredients.
:type kind: list[str] or None
:raise ValueError: If the kind is invalid.
:return: The ingredients list.
:rtype: list[str]
"""
if 'spam' in kind:
return ['spam', 'spam', 'eggs', 'spam']
return ['eggs', 'bacon', 'spam']
# A sample class to test
class SpamCans:
"""
Cans of spam.
:param n_cans: Number of cans of spam.
:type n_cans: int
:raise ValueError: If spam is negative.
"""
def __init__(self, n_cans=1):
"""Spam init."""
if n_cans < 0:
raise ValueError('Spam must be non-negative!')
self.n_cans = n_cans
def eat_one(self):
"""
Eat one can of spam.
:raise ValueError: If we're all out of spam.
:return: The number of cans of spam left.
:rtype: int
"""
if self.n_cans <= 0:
raise ValueError('All out of spam!')
self.n_cans -= 1
return self.n_cans
PLOT_DOCSTRING = """
.. plot::
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
"""
# Test cases
TEST_CASES = {
'empty_oinfo': {
'obj': None,
'oinfo': {},
'options': {},
},
'basic': {
'obj': None,
'oinfo': {
'name': 'Foo',
'docstring': 'A test',
'type_name': 'Function',
},
'options': {},
},
'function_nosphinx_python_docs': {
'obj': subprocess.run,
'oinfo': {'name': 'run'},
'options': {},
},
'class_nosphinx_python_docs': {
'obj': tempfile.TemporaryDirectory,
'oinfo': {'name': 'TemporaryDirectory'},
'options': {},
},
'method_nosphinx_thirdparty': {
'obj': Inspector().info,
'oinfo': {'name': 'Inspector.info'},
'options': {},
},
'function_sphinx': {
'obj': get_random_ingredients,
'oinfo': {'name': 'get_random_ingredients'},
'options': {},
},
'class_sphinx': {
'obj': SpamCans,
'oinfo': {'name': 'SpamCans'},
'options': {},
},
'method_sphinx': {
'obj': SpamCans().eat_one,
'oinfo': {'name': 'SpamCans.eat_one'},
'options': {},
},
'render_math': {
'obj': None,
'oinfo': {
'name': 'Foo',
'docstring': 'This is some math :math:`a^2 = b^2 + c^2`',
},
'options': {},
},
'no_render_math': {
'obj': None,
'oinfo': {
'name': 'Foo',
'docstring': 'This is a rational number :math:`\\frac{x}{y}`',
},
'options': {'render_math': False},
},
'numpy_module': {
'obj': np,
'oinfo': {'name': 'NumPy'},
'options': {},
},
'numpy_sin': {
'obj': np.sin,
'oinfo': {'name': 'sin'},
'options': {},
},
'collapse': {
'obj': np.sin,
'oinfo': {'name': 'sin'},
'options': {'collapse_sections': True},
},
'outline': {
'obj': np.sin,
'oinfo': {'name': 'sin'},
'options': {'outline': True},
},
'plot': {
'obj': None,
'oinfo': {
'name': 'Foo',
'docstring': PLOT_DOCSTRING
},
'options': {},
},
'no_docstring': {
'obj': None,
'oinfo': {'docstring': '<no docstring>'},
'options': {},
},
}
# ---- Helper functions
def _test_cases_to_params(test_cases):
return [
(test_id, *test_case.values())
for test_id, test_case in test_cases.items()
]
# ---- Fixtures
@pytest.fixture(name='build_oinfo')
def fixture_build_oinfo():
"""Generate object information for tests."""
def _build_oinfo(obj=None, **oinfo_data):
if obj is not None:
oinfo = Inspector().info(obj)
else:
oinfo = object_info()
oinfo = {**oinfo, **oinfo_data}
return oinfo
return _build_oinfo
@pytest.fixture(name='set_docrepr_options')
def fixture_set_docrepr_options():
"""Set docrepr's rendering options and restore them after."""
default_options = copy.deepcopy(docrepr.options)
def _set_docrepr_options(**docrepr_options):
docrepr.options.update(docrepr_options)
yield _set_docrepr_options
docrepr.options.clear()
docrepr.options.update(default_options)
# ---- Tests
@pytest.mark.asyncio
@pytest.mark.parametrize(
('test_id', 'obj', 'oinfo_data', 'docrepr_options'),
_test_cases_to_params(TEST_CASES),
ids=list(TEST_CASES.keys()),
)
async def test_sphinxify(
build_oinfo, set_docrepr_options, open_browser, compare_screenshots,
test_id, obj, oinfo_data, docrepr_options,
):
"""Test the operation of the Sphinxify module on various docstrings."""
if (oinfo_data.get('docstring', None) == PLOT_DOCSTRING
and sys.version_info.major == 3
and sys.version_info.minor == 6
and sys.platform.startswith('win')):
pytest.skip(
'Plot fails on Py3.6 on Windows; older version of Matplotlib?')
oinfo = build_oinfo(obj, **oinfo_data)
set_docrepr_options(**docrepr_options)
url = docrepr.sphinxify.rich_repr(oinfo)
output_file = Path(url)
assert output_file.is_file()
assert output_file.suffix == '.html'
assert output_file.stat().st_size > 512
file_text = output_file.read_text(encoding='utf-8', errors='strict')
assert len(file_text) > 512
await compare_screenshots(test_id, url)
open_browser(url)
| StarcoderdataPython |
1642073 | import json
from docker.types import RestartPolicy
from wotemu.enums import NetworkConditions
from wotemu.topology.models import (Broker, BuiltinApps, Network, Node,
NodeApp, NodeResources, Service, Topology)
_ID_1 = "loc1"
_ID_2 = "loc2"
_THING_ID_DETECTOR = "urn:org:fundacionctic:thing:wotemu:detector"
_THING_ID_HISTORIAN = "urn:org:fundacionctic:thing:historian"
def topology():
network_edge_1 = Network(
name=f"edge_2g_{_ID_1}",
conditions=NetworkConditions.GPRS)
network_edge_2 = Network(
name=f"edge_3g_{_ID_2}",
conditions=NetworkConditions.REGULAR_3G)
network_cloud = Network(
name="cloud",
conditions=NetworkConditions.CABLE)
network_cloud_user = Network(
name="cloud_user",
conditions=NetworkConditions.CABLE)
broker = Broker(
name=f"broker",
networks=[network_edge_1, network_edge_2])
camera_resources = NodeResources(
target_cpu_speed=200,
mem_limit="256M")
nodes_camera_1 = [
Node(
name=f"camera_{_ID_1}_{idx}",
app=NodeApp(path=BuiltinApps.CAMERA, mqtt=True),
networks=[network_edge_1],
resources=camera_resources,
broker=broker,
broker_network=network_edge_1)
for idx in range(2)
]
nodes_camera_2 = [
Node(
name=f"camera_{_ID_2}_{idx}",
app=NodeApp(path=BuiltinApps.CAMERA, mqtt=True),
networks=[network_edge_2],
resources=camera_resources,
broker=broker,
broker_network=network_edge_2)
for idx in range(6)
]
camera_hostnames_1 = [
f"{item.name}.{network_edge_1.name}"
for item in nodes_camera_1
]
camera_hostnames_2 = [
f"{item.name}.{network_edge_2.name}"
for item in nodes_camera_2
]
param_cameras = json.dumps([
{"servient_host": cam_name}
for cam_name in camera_hostnames_1 + camera_hostnames_2
])
app_detector = NodeApp(
path=BuiltinApps.DETECTOR,
params={"cameras": param_cameras},
http=True)
node_detector = Node(
name=f"detector",
app=app_detector,
networks=[network_edge_1, network_edge_2, network_cloud])
mongo = Service(
name="mongo",
image="mongo:4",
restart_policy=RestartPolicy(condition="on-failure"))
historian_observed_things = [
{
"servient_host": f"{node_detector.name}.{network_cloud.name}",
"thing_id": _THING_ID_DETECTOR
}
]
historian_app = NodeApp(
path=BuiltinApps.MONGO_HISTORIAN,
http=True,
params={
"mongo_uri": "mongodb://mongo",
"observed_things": json.dumps(historian_observed_things)
})
node_historian = Node(
name="cloud",
app=historian_app,
networks=[network_cloud, network_cloud_user])
node_historian.link_service(mongo)
user_app = NodeApp(
path=BuiltinApps.CALLER,
params={
"servient_host": f"{node_historian.name}.{network_cloud_user.name}",
"thing_id": _THING_ID_HISTORIAN,
"params": json.dumps({"write": None, "list": None}),
"lambd": 5
})
node_user = Node(
name="user",
app=user_app,
networks=[network_cloud_user],
scale=5)
topology = Topology(nodes=[
*nodes_camera_1,
*nodes_camera_2,
node_detector,
node_historian,
node_user
])
return topology
| StarcoderdataPython |
1794055 | # Generated by Django 2.2.5 on 2019-10-27 19:10
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("plan", "0001_initial"),
]
operations = [
migrations.AlterUniqueTogether(
name="schedule", unique_together={("name", "semester", "person")},
),
]
| StarcoderdataPython |
1745706 | <reponame>JackyCSer/WebServiceCompositionSystem<gh_stars>1-10
from __future__ import print_function
from . import pddl_types
def parse_condition(alist):
condition = parse_condition_aux(alist, False)
# TODO: The next line doesn't appear to do anything good,
# since uniquify_variables doesn't modify the condition in place.
# Conditions in actions or axioms are uniquified elsewhere, but
# it looks like goal conditions are never uniquified at all
# (which would be a bug).
condition.uniquify_variables({})
return condition
def parse_condition_aux(alist, negated):
"""Parse a PDDL condition. The condition is translated into NNF on the fly."""
tag = alist[0]
if tag in ("and", "or", "not", "imply"):
args = alist[1:]
if tag == "imply":
assert len(args) == 2
if tag == "not":
assert len(args) == 1
return parse_condition_aux(args[0], not negated)
elif tag in ("forall", "exists"):
parameters = pddl_types.parse_typed_list(alist[1])
args = alist[2:]
assert len(args) == 1
elif negated:
return NegatedAtom(alist[0], alist[1:])
else:
return Atom(alist[0], alist[1:])
if tag == "imply":
parts = [parse_condition_aux(args[0], not negated),
parse_condition_aux(args[1], negated)]
tag = "or"
else:
parts = [parse_condition_aux(part, negated) for part in args]
if tag == "and" and not negated or tag == "or" and negated:
return Conjunction(parts)
elif tag == "or" and not negated or tag == "and" and negated:
return Disjunction(parts)
elif tag == "forall" and not negated or tag == "exists" and negated:
return UniversalCondition(parameters, parts)
elif tag == "exists" and not negated or tag == "forall" and negated:
return ExistentialCondition(parameters, parts)
def parse_literal(alist):
if alist[0] == "not":
assert len(alist) == 2
alist = alist[1]
return NegatedAtom(alist[0], alist[1:])
else:
return Atom(alist[0], alist[1:])
# Conditions (of any type) are immutable, because they need to
# be hashed occasionally. Immutability also allows more efficient comparison
# based on a precomputed hash value.
#
# Careful: Most other classes (e.g. Effects, Axioms, Actions) are not!
class Condition(object):
def __init__(self, parts):
self.parts = tuple(parts)
self.hash = hash((self.__class__, self.parts))
def __hash__(self):
return self.hash
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.hash < other.hash
def __le__(self, other):
return self.hash <= other.hash
def dump(self, indent=" "):
print("%s%s" % (indent, self._dump()))
for part in self.parts:
part.dump(indent + " ")
def _dump(self):
return self.__class__.__name__
def _postorder_visit(self, method_name, *args):
part_results = [part._postorder_visit(method_name, *args)
for part in self.parts]
method = getattr(self, method_name, self._propagate)
return method(part_results, *args)
def _propagate(self, parts, *args):
return self.change_parts(parts)
def simplified(self):
return self._postorder_visit("_simplified")
def relaxed(self):
return self._postorder_visit("_relaxed")
def untyped(self):
return self._postorder_visit("_untyped")
def uniquify_variables(self, type_map, renamings={}):
# Cannot used _postorder_visit because this requires preorder
# for quantified effects.
if not self.parts:
return self
else:
return self.__class__([part.uniquify_variables(type_map, renamings)
for part in self.parts])
def to_untyped_strips(self):
raise ValueError("Not a STRIPS condition: %s" % self.__class__.__name__)
def instantiate(self, var_mapping, init_facts, fluent_facts, unknown_facts, result):
raise ValueError("Cannot instantiate condition: not normalized")
def free_variables(self):
result = set()
for part in self.parts:
result |= part.free_variables()
return result
def has_disjunction(self):
for part in self.parts:
if part.has_disjunction():
return True
return False
def has_existential_part(self):
for part in self.parts:
if part.has_existential_part():
return True
return False
def has_universal_part(self):
for part in self.parts:
if part.has_universal_part():
return True
return False
class ConstantCondition(Condition):
# Defining __eq__ blocks inheritance of __hash__, so must set it explicitly.
__hash__ = Condition.__hash__
parts = ()
def __init__(self):
self.hash = hash(self.__class__)
def change_parts(self, parts):
return self
def __eq__(self, other):
return self.__class__ is other.__class__
class Impossible(Exception):
pass
class Falsity(ConstantCondition):
def instantiate(self, var_mapping, init_facts, fluent_facts, unknown_facts, result):
raise Impossible()
def negate(self):
return Truth()
class Truth(ConstantCondition):
def to_untyped_strips(self):
return []
def instantiate(self, var_mapping, init_facts, fluent_facts, unknown_facts, result):
pass
def negate(self):
return Falsity()
class JunctorCondition(Condition):
# Defining __eq__ blocks inheritance of __hash__, so must set it explicitly.
__hash__ = Condition.__hash__
def __eq__(self, other):
# Compare hash first for speed reasons.
return (self.hash == other.hash and
self.__class__ is other.__class__ and
self.parts == other.parts)
def change_parts(self, parts):
return self.__class__(parts)
class Conjunction(JunctorCondition):
def _simplified(self, parts):
result_parts = []
for part in parts:
if isinstance(part, Conjunction):
result_parts += part.parts
elif isinstance(part, Falsity):
return Falsity()
elif not isinstance(part, Truth):
result_parts.append(part)
if not result_parts:
return Truth()
if len(result_parts) == 1:
return result_parts[0]
return Conjunction(result_parts)
def to_untyped_strips(self):
result = []
for part in self.parts:
result += part.to_untyped_strips()
return result
def instantiate(self, var_mapping, init_facts, fluent_facts, unknown_facts, result):
assert not result, "Condition not simplified"
for part in self.parts:
part.instantiate(var_mapping, init_facts, fluent_facts, unknown_facts, result)
def negate(self):
return Disjunction([p.negate() for p in self.parts])
class Disjunction(JunctorCondition):
def _simplified(self, parts):
result_parts = []
for part in parts:
if isinstance(part, Disjunction):
result_parts += part.parts
elif isinstance(part, Truth):
return Truth()
elif not isinstance(part, Falsity):
result_parts.append(part)
if not result_parts:
return Falsity()
if len(result_parts) == 1:
return result_parts[0]
return Disjunction(result_parts)
def negate(self):
return Conjunction([p.negate() for p in self.parts])
def has_disjunction(self):
return True
class QuantifiedCondition(Condition):
# Defining __eq__ blocks inheritance of __hash__, so must set it explicitly.
__hash__ = Condition.__hash__
def __init__(self, parameters, parts):
self.parameters = tuple(parameters)
self.parts = tuple(parts)
self.hash = hash((self.__class__, self.parameters, self.parts))
def __eq__(self, other):
# Compare hash first for speed reasons.
return (self.hash == other.hash and
self.__class__ is other.__class__ and
self.parameters == other.parameters and
self.parts == other.parts)
def _dump(self, indent=" "):
arglist = ", ".join(map(str, self.parameters))
return "%s %s" % (self.__class__.__name__, arglist)
def _simplified(self, parts):
if isinstance(parts[0], ConstantCondition):
return parts[0]
else:
return self._propagate(parts)
def uniquify_variables(self, type_map, renamings={}):
renamings = dict(renamings) # Create a copy.
new_parameters = [par.uniquify_name(type_map, renamings)
for par in self.parameters]
new_parts = (self.parts[0].uniquify_variables(type_map, renamings),)
return self.__class__(new_parameters, new_parts)
def free_variables(self):
result = Condition.free_variables(self)
for par in self.parameters:
result.discard(par.name)
return result
def change_parts(self, parts):
return self.__class__(self.parameters, parts)
class UniversalCondition(QuantifiedCondition):
def _untyped(self, parts):
type_literals = [NegatedAtom(par.type, [par.name]) for par in self.parameters]
return UniversalCondition(self.parameters,
[Disjunction(type_literals + parts)])
def negate(self):
return ExistentialCondition(self.parameters, [p.negate() for p in self.parts])
def has_universal_part(self):
return True
class ExistentialCondition(QuantifiedCondition):
def _untyped(self, parts):
type_literals = [Atom(par.type, [par.name]) for par in self.parameters]
return ExistentialCondition(self.parameters,
[Conjunction(type_literals + parts)])
def negate(self):
return UniversalCondition(self.parameters, [p.negate() for p in self.parts])
def instantiate(self, var_mapping, init_facts, fluent_facts, unknown_facts, result):
assert not result, "Condition not simplified"
self.parts[0].instantiate(var_mapping, init_facts, fluent_facts, unknown_facts, result)
def has_existential_part(self):
return True
class Literal(Condition):
# Defining __eq__ blocks inheritance of __hash__, so must set it explicitly.
__hash__ = Condition.__hash__
parts = []
def __init__(self, predicate, args):
self.predicate = predicate
self.args = tuple(args)
self.hash = hash((self.__class__, self.predicate, self.args))
def __eq__(self, other):
# Compare hash first for speed reasons.
return (self.hash == other.hash and
self.__class__ is other.__class__ and
self.predicate == other.predicate and
self.args == other.args)
def __ne__(self, other):
return not self == other
@property
def key(self):
return str(self.predicate), self.args
def __lt__(self, other):
return self.key < other.key
def __le__(self, other):
return self.key <= other.key
def __str__(self):
return "%s %s(%s)" % (self.__class__.__name__, self.predicate,
", ".join(map(str, self.args)))
def __repr__(self):
return '<%s>' % self
def _dump(self):
return str(self)
def change_parts(self, parts):
return self
def uniquify_variables(self, type_map, renamings={}):
return self.rename_variables(renamings)
def rename_variables(self, renamings):
new_args = tuple(renamings.get(arg, arg) for arg in self.args)
return self.__class__(self.predicate, new_args)
def replace_argument(self, position, new_arg):
new_args = list(self.args)
new_args[position] = new_arg
return self.__class__(self.predicate, new_args)
def free_variables(self):
return set(arg for arg in self.args if arg[0] == "?")
class Atom(Literal):
negated = False
def to_untyped_strips(self):
return [self]
def instantiate(self, var_mapping, init_facts, fluent_facts, unknown_facts, result):
args = [var_mapping.get(arg, arg) for arg in self.args]
atom = Atom(self.predicate, args)
if atom in fluent_facts:
result.append(atom)
elif atom in unknown_facts:
result.append(atom)
# POND case: else it is a static or an observation fact which
# is always false.
def negate(self):
return NegatedAtom(self.predicate, self.args)
def positive(self):
return self
class NegatedAtom(Literal):
negated = True
def _relaxed(self, parts):
return Truth()
def instantiate(self, var_mapping, init_facts, fluent_facts, unknown_facts,result):
args = [var_mapping.get(arg, arg) for arg in self.args]
atom = Atom(self.predicate, args)
if atom in fluent_facts:
result.append(NegatedAtom(self.predicate, args))
elif atom in init_facts:
raise Impossible()
def negate(self):
return Atom(self.predicate, self.args)
positive = negate
| StarcoderdataPython |
108936 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
days_dict = {1:31, 2:29, 3:31, 4:30, 5:31, 6:30, 7:31,8:31, 9:30, 10:31, 11:30, 12:31}
start_week = {1:3, 2:6, 3:0, 4:3, 5:5, 6:1, 7:3, 8:6, 9:2, 10:4, 11:0, 12:2}
with open('Q3-output.txt', 'w') as f:
for i in range(1,13):
print('\n\n\t\t\t%d月'%i, file = f)
print('日\t一\t二\t三\t四\t五\t六', file = f)
for k in range(start_week[i]):
print('\t', end = '', file = f)
for j in range(1, days_dict[i]+1):
print(j, '\t', end = '', file = f)
if (start_week[i]+j) % 7 == 0:
print('\n', file = f)
# In[ ]:
| StarcoderdataPython |
1660937 | """@file cluster_toolkit.py
Modeling using cluster_toolkit
"""
# Functions to model halo profiles
import numpy as np
import cluster_toolkit as ct
from . import func_layer
from . func_layer import *
from .parent_class import CLMModeling
from .. utils import _patch_rho_crit_to_cd2018
from .. cosmology.cluster_toolkit import AstroPyCosmology
Cosmology = AstroPyCosmology
__all__ = ['CTModeling', 'Modeling', 'Cosmology']+func_layer.__all__
def _assert_correct_type_ct(arg):
""" Convert the argument to a type compatible with cluster_toolkit
cluster_toolkit does not handle correctly scalar arguments that are
not float or numpy array and others that contain non-float64 elements.
It only convert lists to the correct type. To circumvent this we
pre-convert all arguments going to cluster_toolkit to the appropriated
types.
Parameters
----------
arg : array_like or scalar
Returns
-------
scale_factor : array_like
Scale factor
"""
if np.isscalar(arg):
return float(arg)
return np.array(arg).astype(np.float64, order='C', copy=False)
class CTModeling(CLMModeling):
r"""Object with functions for halo mass modeling
Attributes
----------
backend: str
Name of the backend being used
massdef : str
Profile mass definition (`mean`, `critical`, `virial` - letter case independent)
delta_mdef : int
Mass overdensity definition.
halo_profile_model : str
Profile model parameterization (`nfw`, `einasto`, `hernquist` - letter case independent)
cosmo: Cosmology
Cosmology object
hdpm: Object
Backend object with halo profiles
mdef_dict: dict
Dictionary with the definitions for mass
hdpm_dict: dict
Dictionary with the definitions for profile
"""
# pylint: disable=too-many-instance-attributes
def __init__(self, massdef='mean', delta_mdef=200, halo_profile_model='nfw',
validate_input=True):
CLMModeling.__init__(self, validate_input)
# Update class attributes
self.backend = 'ct'
self.mdef_dict = {'mean': 'mean'}
self.hdpm_dict = {'nfw': 'nfw'}
self.cosmo_class = AstroPyCosmology
# Attributes exclusive to this class
self.cor_factor = _patch_rho_crit_to_cd2018(2.77533742639e+11)
# Set halo profile and cosmology
self.set_halo_density_profile(halo_profile_model, massdef, delta_mdef)
self.set_cosmo(None)
def _set_halo_density_profile(self, halo_profile_model='nfw', massdef='mean', delta_mdef=200):
""""set halo density profile"""
# Update values
self.halo_profile_model = halo_profile_model
self.massdef = massdef
self.delta_mdef = delta_mdef
def _set_concentration(self, cdelta):
"""" set concentration"""
self.cdelta = cdelta
def _set_mass(self, mdelta):
"""" set mass"""
self.mdelta = mdelta
def _eval_3d_density(self, r3d, z_cl):
""""eval 3d density"""
h = self.cosmo['h']
Omega_m = self.cosmo.get_E2Omega_m(z_cl)*self.cor_factor
return ct.density.rho_nfw_at_r(
_assert_correct_type_ct(r3d)*h, self.mdelta*h,
self.cdelta, Omega_m, delta=self.delta_mdef)*h**2
def _eval_surface_density(self, r_proj, z_cl):
""""eval surface density"""
h = self.cosmo['h']
Omega_m = self.cosmo.get_E2Omega_m(z_cl)*self.cor_factor
return ct.deltasigma.Sigma_nfw_at_R(
_assert_correct_type_ct(r_proj)*h, self.mdelta*h,
self.cdelta, Omega_m, delta=self.delta_mdef)*h*1.0e12 # pc**-2 to Mpc**-2
def _eval_mean_surface_density(self, r_proj, z_cl):
r''' Computes the mean value of surface density inside radius r_proj
Parameters
----------
r_proj : array_like
Projected radial position from the cluster center in :math:`M\!pc`.
z_cl: float
Redshift of the cluster
Returns
-------
array_like, float
Excess surface density in units of :math:`M_\odot\ Mpc^{-2}`.
Note
----
This function just adds eval_surface_density+eval_excess_surface_density
'''
return (self.eval_surface_density(r_proj, z_cl)
+self.eval_excess_surface_density(r_proj, z_cl))
def _eval_excess_surface_density(self, r_proj, z_cl):
""""eval excess surface density"""
if np.min(r_proj) < 1.e-11:
raise ValueError(
f"Rmin = {np.min(r_proj):.2e} Mpc!"
" This value is too small and may cause computational issues.")
Omega_m = self.cosmo.get_E2Omega_m(z_cl)*self.cor_factor
h = self.cosmo['h']
r_proj = _assert_correct_type_ct(r_proj)*h
# Computing sigma on a larger range than the radial range requested,
# with at least 1000 points.
sigma_r_proj = np.logspace(np.log10(np.min(
r_proj))-1, np.log10(np.max(r_proj))+1, np.max([1000, 10*np.array(r_proj).size]))
sigma = self.eval_surface_density(
sigma_r_proj/h, z_cl)/(h*1e12) # rm norm for ct
# ^ Note: Let's not use this naming convention when transfering ct to ccl....
return ct.deltasigma.DeltaSigma_at_R(
r_proj, sigma_r_proj, sigma, self.mdelta*h,
self.cdelta, Omega_m, delta=self.delta_mdef)*h*1.0e12 # pc**-2 to Mpc**-2
Modeling = CTModeling
| StarcoderdataPython |
1729002 | <reponame>Keneral/atools
#!/usr/bin/env python3.4
#
# Copyright 2016 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "<EMAIL>"
from future import standard_library
standard_library.install_aliases()
import copy
import importlib
import inspect
import os
import pkgutil
import sys
from acts import keys
from acts import logger
from acts import records
from acts import signals
from acts import utils
class USERError(Exception):
"""Raised when a problem is caused by user mistake, e.g. wrong command,
misformatted config, test info, wrong test paths etc.
"""
class TestRunner(object):
"""The class that instantiates test classes, executes test cases, and
report results.
Attributes:
self.test_run_info: A dictionary containing the information needed by
test classes for this test run, including params,
controllers, and other objects. All of these will
be passed to test classes.
self.test_configs: A dictionary that is the original test configuration
passed in by user.
self.id: A string that is the unique identifier of this test run.
self.log_path: A string representing the path of the dir under which
all logs from this test run should be written.
self.log: The logger object used throughout this test run.
self.controller_registry: A dictionary that holds the controller
objects used in a test run.
self.controller_destructors: A dictionary that holds the controller
distructors. Keys are controllers' names.
self.test_classes: A dictionary where we can look up the test classes
by name to instantiate.
self.run_list: A list of tuples specifying what tests to run.
self.results: The test result object used to record the results of
this test run.
self.running: A boolean signifies whether this test run is ongoing or
not.
"""
def __init__(self, test_configs, run_list):
self.test_run_info = {}
self.test_configs = test_configs
self.testbed_configs = self.test_configs[keys.Config.key_testbed.value]
self.testbed_name = self.testbed_configs[keys.Config.key_testbed_name.value]
start_time = logger.get_log_file_timestamp()
self.id = "{}@{}".format(self.testbed_<EMAIL>, start_time)
# log_path should be set before parsing configs.
l_path = os.path.join(self.test_configs[keys.Config.key_log_path.value],
self.testbed_name,
start_time)
self.log_path = os.path.abspath(l_path)
self.log = logger.get_test_logger(self.log_path,
self.id,
self.testbed_name)
self.controller_registry = {}
self.controller_destructors = {}
self.run_list = run_list
self.results = records.TestResult()
self.running = False
def import_test_modules(self, test_paths):
"""Imports test classes from test scripts.
1. Locate all .py files under test paths.
2. Import the .py files as modules.
3. Find the module members that are test classes.
4. Categorize the test classes by name.
Args:
test_paths: A list of directory paths where the test files reside.
Returns:
A dictionary where keys are test class name strings, values are
actual test classes that can be instantiated.
"""
def is_testfile_name(name, ext):
if ext == ".py":
if name.endswith("Test") or name.endswith("_test"):
return True
return False
file_list = utils.find_files(test_paths, is_testfile_name)
test_classes = {}
for path, name, _ in file_list:
sys.path.append(path)
try:
module = importlib.import_module(name)
except:
for test_cls_name, _ in self.run_list:
alt_name = name.replace('_', '').lower()
alt_cls_name = test_cls_name.lower()
# Only block if a test class on the run list causes an
# import error. We need to check against both naming
# conventions: AaaBbb and aaa_bbb.
if name == test_cls_name or alt_name == alt_cls_name:
msg = ("Encountered error importing test class %s, "
"abort.") % test_cls_name
# This exception is logged here to help with debugging
# under py2, because "raise X from Y" syntax is only
# supported under py3.
self.log.exception(msg)
raise USERError(msg)
continue
for member_name in dir(module):
if not member_name.startswith("__"):
if member_name.endswith("Test"):
test_class = getattr(module, member_name)
if inspect.isclass(test_class):
test_classes[member_name] = test_class
return test_classes
@staticmethod
def verify_controller_module(module):
"""Verifies a module object follows the required interface for
controllers.
Args:
module: An object that is a controller module. This is usually
imported with import statements or loaded by importlib.
Raises:
ControllerError is raised if the module does not match the ACTS
controller interface, or one of the required members is null.
"""
required_attributes = ("create",
"destroy",
"ACTS_CONTROLLER_CONFIG_NAME")
for attr in required_attributes:
if not hasattr(module, attr):
raise signals.ControllerError(("Module %s missing required "
"controller module attribute %s.") % (module.__name__,
attr))
if not getattr(module, attr):
raise signals.ControllerError(("Controller interface %s in %s "
"cannot be null.") % (attr, module.__name__))
def register_controller(self, module, required=True):
"""Registers a controller module for a test run.
This declares a controller dependency of this test class. If the target
module exists and matches the controller interface, the controller
module will be instantiated with corresponding configs in the test
config file. The module should be imported first.
Params:
module: A module that follows the controller module interface.
required: A bool. If True, failing to register the specified
controller module raises exceptions. If False, returns
None upon failures.
Returns:
A list of controller objects instantiated from controller_module, or
None.
Raises:
When required is True, ControllerError is raised if no corresponding
config can be found.
Regardless of the value of "required", ControllerError is raised if
the controller module has already been registered or any other error
occurred in the registration process.
"""
TestRunner.verify_controller_module(module)
try:
# If this is a builtin controller module, use the default ref name.
module_ref_name = module.ACTS_CONTROLLER_REFERENCE_NAME
builtin = True
except AttributeError:
# Or use the module's name
builtin = False
module_ref_name = module.__name__.split('.')[-1]
if module_ref_name in self.controller_registry:
raise signals.ControllerError(("Controller module %s has already "
"been registered. It can not be "
"registered again."
) % module_ref_name)
# Create controller objects.
create = module.create
module_config_name = module.ACTS_CONTROLLER_CONFIG_NAME
if module_config_name not in self.testbed_configs:
if required:
raise signals.ControllerError(
"No corresponding config found for %s" %
module_config_name)
self.log.warning(
"No corresponding config found for optional controller %s",
module_config_name)
return None
try:
# Make a deep copy of the config to pass to the controller module,
# in case the controller module modifies the config internally.
original_config = self.testbed_configs[module_config_name]
controller_config = copy.deepcopy(original_config)
objects = create(controller_config, self.log)
except:
self.log.exception(("Failed to initialize objects for controller "
"%s, abort!"), module_config_name)
raise
if not isinstance(objects, list):
raise ControllerError(("Controller module %s did not return a list"
" of objects, abort.") % module_ref_name)
self.controller_registry[module_ref_name] = objects
# TODO(angli): After all tests move to register_controller, stop
# tracking controller objs in test_run_info.
if builtin:
self.test_run_info[module_ref_name] = objects
self.log.debug("Found %d objects for controller %s", len(objects),
module_config_name)
destroy_func = module.destroy
self.controller_destructors[module_ref_name] = destroy_func
return objects
def unregister_controllers(self):
"""Destroy controller objects and clear internal registry.
This will be called at the end of each TestRunner.run call.
"""
for name, destroy in self.controller_destructors.items():
try:
self.log.debug("Destroying %s.", name)
destroy(self.controller_registry[name])
except:
self.log.exception("Exception occurred destroying %s.", name)
self.controller_registry = {}
self.controller_destructors = {}
def parse_config(self, test_configs):
"""Parses the test configuration and unpacks objects and parameters
into a dictionary to be passed to test classes.
Args:
test_configs: A json object representing the test configurations.
"""
self.test_run_info[keys.Config.ikey_testbed_name.value] = self.testbed_name
# Instantiate builtin controllers
for ctrl_name in keys.Config.builtin_controller_names.value:
if ctrl_name in self.testbed_configs:
module_name = keys.get_module_name(ctrl_name)
module = importlib.import_module("acts.controllers.%s" %
module_name)
self.register_controller(module)
# Unpack other params.
self.test_run_info["register_controller"] = self.register_controller
self.test_run_info[keys.Config.ikey_logpath.value] = self.log_path
self.test_run_info[keys.Config.ikey_logger.value] = self.log
cli_args = test_configs[keys.Config.ikey_cli_args.value]
self.test_run_info[keys.Config.ikey_cli_args.value] = cli_args
user_param_pairs = []
for item in test_configs.items():
if item[0] not in keys.Config.reserved_keys.value:
user_param_pairs.append(item)
self.test_run_info[keys.Config.ikey_user_param.value] = copy.deepcopy(
dict(user_param_pairs))
def set_test_util_logs(self, module=None):
"""Sets the log object to each test util module.
This recursively include all modules under acts.test_utils and sets the
main test logger to each module.
Args:
module: A module under acts.test_utils.
"""
# Initial condition of recursion.
if not module:
module = importlib.import_module("acts.test_utils")
# Somehow pkgutil.walk_packages is not working for me.
# Using iter_modules for now.
pkg_iter = pkgutil.iter_modules(module.__path__, module.__name__ + '.')
for _, module_name, ispkg in pkg_iter:
m = importlib.import_module(module_name)
if ispkg:
self.set_test_util_logs(module=m)
else:
self.log.debug("Setting logger to test util module %s",
module_name)
setattr(m, "log", self.log)
def run_test_class(self, test_cls_name, test_cases=None):
"""Instantiates and executes a test class.
If test_cases is None, the test cases listed by self.tests will be
executed instead. If self.tests is empty as well, no test case in this
test class will be executed.
Args:
test_cls_name: Name of the test class to execute.
test_cases: List of test case names to execute within the class.
Returns:
A tuple, with the number of cases passed at index 0, and the total
number of test cases at index 1.
"""
try:
test_cls = self.test_classes[test_cls_name]
except KeyError:
raise USERError(("Unable to locate class %s in any of the test "
"paths specified.") % test_cls_name)
with test_cls(self.test_run_info) as test_cls_instance:
try:
cls_result = test_cls_instance.run(test_cases)
self.results += cls_result
except signals.TestAbortAll as e:
self.results += e.results
raise e
def run(self):
"""Executes test cases.
This will instantiate controller and test classes, and execute test
classes. This can be called multiple times to repeatly execute the
requested test cases.
A call to TestRunner.stop should eventually happen to conclude the life
cycle of a TestRunner.
"""
if not self.running:
self.running = True
# Initialize controller objects and pack appropriate objects/params
# to be passed to test class.
self.parse_config(self.test_configs)
t_configs = self.test_configs[keys.Config.key_test_paths.value]
self.test_classes = self.import_test_modules(t_configs)
self.log.debug("Executing run list %s.", self.run_list)
try:
for test_cls_name, test_case_names in self.run_list:
if not self.running:
break
if test_case_names:
self.log.debug("Executing test cases %s in test class %s.",
test_case_names,
test_cls_name)
else:
self.log.debug("Executing test class %s", test_cls_name)
try:
self.run_test_class(test_cls_name, test_case_names)
except signals.TestAbortAll as e:
self.log.warning(("Abort all subsequent test classes. Reason: "
"%s"), e)
raise
finally:
self.unregister_controllers()
def stop(self):
"""Releases resources from test run. Should always be called after
TestRunner.run finishes.
This function concludes a test run and writes out a test report.
"""
if self.running:
msg = "\nSummary for test run %s: %s\n" % (self.id,
self.results.summary_str())
self._write_results_json_str()
self.log.info(msg.strip())
logger.kill_test_logger(self.log)
self.running = False
def _write_results_json_str(self):
"""Writes out a json file with the test result info for easy parsing.
TODO(angli): This should be replaced by standard log record mechanism.
"""
path = os.path.join(self.log_path, "test_run_summary.json")
with open(path, 'w') as f:
f.write(self.results.json_str())
if __name__ == "__main__":
pass
| StarcoderdataPython |
3286380 | import pytest
import time
import json
from datetime import datetime
from kubernetes.client.rest import ApiException
import backupstore
from backupstore import set_random_backupstore # NOQA
import common
from common import client, core_api, apps_api, batch_v1_beta_api # NOQA
from common import random_labels, volume_name # NOQA
from common import storage_class, statefulset, pvc # NOQA
from common import make_deployment_with_pvc # NOQA
from common import get_self_host_id
from common import create_storage_class
from common import create_pv_for_volume
from common import create_pvc_for_volume
from common import create_and_check_volume
from common import read_volume_data
from common import wait_for_volume_detached
from common import wait_for_volume_healthy
from common import wait_for_volume_healthy_no_frontend
from common import wait_for_volume_recurring_job_update
from common import wait_volume_kubernetes_status
from common import write_pod_volume_random_data
from common import write_volume_random_data
from common import create_and_wait_deployment
from common import wait_deployment_replica_ready
from common import create_and_wait_statefulset
from common import get_statefulset_pod_info
from common import update_statefulset_manifests
from common import check_pod_existence
from common import crash_engine_process_with_sigkill
from common import wait_for_backup_volume
from common import wait_for_backup_completion
from common import wait_for_backup_count
from common import wait_for_backup_to_start
from common import wait_for_snapshot_count
from common import check_recurring_jobs
from common import cleanup_all_recurring_jobs
from common import create_recurring_jobs
from common import update_recurring_job
from common import wait_for_recurring_jobs_cleanup
from common import wait_for_cron_job_count
from common import wait_for_cron_job_create
from common import wait_for_cron_job_delete
from common import JOB_LABEL
from common import KUBERNETES_STATUS_LABEL
from common import LONGHORN_NAMESPACE
from common import RETRY_BACKUP_COUNTS
from common import RETRY_BACKUP_INTERVAL
from common import SETTING_RECURRING_JOB_WHILE_VOLUME_DETACHED
from common import SIZE, Mi, Gi
RECURRING_JOB_LABEL = "RecurringJob"
RECURRING_JOB_NAME = "recurring-test"
NAME = "name"
ISGROUP = "isGroup"
TASK = "task"
GROUPS = "groups"
CRON = "cron"
RETAIN = "retain"
SNAPSHOT = "snapshot"
BACKUP = "backup"
CONCURRENCY = "concurrency"
LABELS = "labels"
DEFAULT = "default"
SCHEDULE_1MIN = "* * * * *"
WRITE_DATA_INTERVAL = 10
def wait_until_begin_of_a_minute():
while True:
current_time = datetime.utcnow()
if current_time.second == 0:
break
time.sleep(1)
def wait_until_begin_of_an_even_minute():
while True:
current_time = datetime.utcnow()
if current_time.second == 0 and current_time.minute % 2 == 0:
break
time.sleep(1)
# wait for backup progress created by recurring job to
# exceed the minimum_progress percentage.
def wait_for_recurring_backup_to_start(client, core_api, volume_name, expected_snapshot_count, minimum_progress=0): # NOQA
job_pod_name = volume_name + '-backup-c'
snapshot_name = ''
snapshots = []
check_pod_existence(core_api, job_pod_name, namespace=LONGHORN_NAMESPACE)
# Find the snapshot which is being backed up
for _ in range(RETRY_BACKUP_COUNTS):
volume = client.by_id_volume(volume_name)
try:
snapshots = volume.snapshotList()
assert len(snapshots) == expected_snapshot_count + 1
for snapshot in snapshots:
if snapshot.children['volume-head']:
snapshot_name = snapshot.name
break
if len(snapshot_name) != 0:
break
except (AttributeError, ApiException, AssertionError):
time.sleep(RETRY_BACKUP_INTERVAL)
assert len(snapshot_name) != 0
# To ensure the progress of backup
wait_for_backup_to_start(client, volume_name,
snapshot_name=snapshot_name,
chk_progress=minimum_progress)
return snapshot_name
@pytest.mark.recurring_job # NOQA
def test_recurring_job(set_random_backupstore, client, volume_name): # NOQA
"""
Scenario : test recurring job (S3/NFS)
Given `snapshot1` recurring job created and cron at 1 min and retain 2.
`backup1` recurring job created and cron at 2 min and retain 1.
`backup2` recurring job created and cron at 1 min and retain 2.
And a volume created and attached.
When label volume with recurring job `snapshot1`.
label volume with recurring job `backup1`.
And wait until the 20th second since the beginning of an even minute.
And write data to volume.
wait for 2 minutes.
And write data to volume.
wait for 2 minutes.
Then volume have 4 snapshots.
(2 from `snapshot1`, 1 from `backup1`, 1 from `volume-head`)
When label volume with recurring job `backup2`
And write data to volume.
wait for 2 minutes.
And write data to volume.
wait for 2 minutes.
Then volume have 5 snapshots.
(2 from `snapshot1`, 1 from `backup1`, 1 from `backup2`,
1 from `volume-head`)
When wait until backups complete.
Then `backup1` completed 2 backups.
`backup2` completed 3 backups.
"""
'''
The timeline looks like this:
0 1 2 3 4 5 6 7 8 9 10 (minute)
|W | | W | | |W | | W | | | (write data)
| S | S | | S | S | | (snapshot1)
| | B | B | | | | | | (backup1)
| | | | | | B B B | | (backup2)
'''
snap1 = SNAPSHOT + "1"
back1 = BACKUP + "1"
back2 = BACKUP + "2"
recurring_jobs = {
snap1: {
TASK: SNAPSHOT,
GROUPS: [],
CRON: SCHEDULE_1MIN,
RETAIN: 2,
CONCURRENCY: 1,
LABELS: {},
},
back1: {
TASK: BACKUP,
GROUPS: [],
CRON: "*/2 * * * *",
RETAIN: 1,
CONCURRENCY: 1,
LABELS: {},
},
back2: {
TASK: BACKUP,
GROUPS: [],
CRON: SCHEDULE_1MIN,
RETAIN: 2,
CONCURRENCY: 1,
LABELS: {},
},
}
create_recurring_jobs(client, recurring_jobs)
check_recurring_jobs(client, recurring_jobs)
volume = client.create_volume(name=volume_name, size=SIZE,
numberOfReplicas=2)
volume = wait_for_volume_detached(client, volume_name)
volume = volume.attach(hostId=get_self_host_id())
volume = wait_for_volume_healthy(client, volume_name)
volume.recurringJobAdd(name=snap1, isGroup=False)
volume.recurringJobAdd(name=back1, isGroup=False)
wait_for_volume_recurring_job_update(volume,
jobs=[snap1, back1],
groups=[DEFAULT])
wait_until_begin_of_an_even_minute()
# wait until the 20th second of an even minute
# make sure that snapshot job happens before the backup job
time.sleep(20)
write_volume_random_data(volume)
time.sleep(60 * 2)
write_volume_random_data(volume)
time.sleep(60 * 2)
wait_for_snapshot_count(volume, 4)
volume.recurringJobAdd(name=back2, isGroup=False)
wait_for_volume_recurring_job_update(volume,
jobs=[snap1, back1, back2],
groups=[DEFAULT])
write_volume_random_data(volume)
time.sleep(60 * 2)
write_volume_random_data(volume)
time.sleep(60 * 2)
# 2 from job_snap, 1 from job_backup, 1 from job_backup2, 1 volume-head
wait_for_snapshot_count(volume, 5)
complete_backup_1_count = 0
complete_backup_2_count = 0
volume = client.by_id_volume(volume_name)
wait_for_backup_completion(client, volume_name)
for b in volume.backupStatus:
if "backup1-" in b.snapshot:
complete_backup_1_count += 1
elif "backup2-" in b.snapshot:
complete_backup_2_count += 1
# 2 completed backups from backup1
# 2 or more completed backups from backup2
# NOTE: NFS backup can be slow sometimes and error prone
assert complete_backup_1_count == 2
assert complete_backup_2_count >= 2
assert complete_backup_2_count < 4
@pytest.mark.recurring_job # NOQA
def test_recurring_job_in_volume_creation(client, volume_name): # NOQA
"""
Scenario: test create volume with recurring jobs
Given 2 recurring jobs created.
And volume create and a attached.
When label recurring job to volume.
And write data to volume.
wait 2.5 minutes.
And write data to volume.
wait 2.5 minutes.
Then volume have 4 snapshots.
"""
recurring_jobs = {
SNAPSHOT: {
TASK: SNAPSHOT,
GROUPS: [],
CRON: SCHEDULE_1MIN,
RETAIN: 2,
CONCURRENCY: 1,
LABELS: {},
},
BACKUP: {
TASK: BACKUP,
GROUPS: [],
CRON: "*/2 * * * *",
RETAIN: 1,
CONCURRENCY: 1,
LABELS: {},
},
}
create_recurring_jobs(client, recurring_jobs)
check_recurring_jobs(client, recurring_jobs)
client.create_volume(name=volume_name, size=SIZE,
numberOfReplicas=2)
volume = wait_for_volume_detached(client, volume_name)
volume.attach(hostId=get_self_host_id())
volume = wait_for_volume_healthy(client, volume_name)
volume.recurringJobAdd(name=SNAPSHOT, isGroup=False)
volume.recurringJobAdd(name=BACKUP, isGroup=False)
wait_for_volume_recurring_job_update(volume,
jobs=[SNAPSHOT, BACKUP],
groups=[DEFAULT])
wait_until_begin_of_an_even_minute()
# wait until the 10th second of an even minute
# to avoid writing data at the same time backup is taking
time.sleep(10)
write_volume_random_data(volume)
time.sleep(150) # 2.5 minutes
write_volume_random_data(volume)
time.sleep(150) # 2.5 minutes
wait_for_snapshot_count(volume, 4)
@pytest.mark.recurring_job # NOQA
def test_recurring_job_duplicated(client): # NOQA
"""
Scenario: test create duplicated recurring jobs
Given recurring job created.
When create same recurring job again.
Then should fail.
"""
recurring_jobs = {
RECURRING_JOB_NAME: {
TASK: BACKUP,
GROUPS: [],
CRON: SCHEDULE_1MIN,
RETAIN: 1,
CONCURRENCY: 2,
LABELS: {},
},
}
create_recurring_jobs(client, recurring_jobs)
check_recurring_jobs(client, recurring_jobs)
with pytest.raises(Exception) as e:
create_recurring_jobs(client, recurring_jobs)
assert "already exists" in str(e.value)
@pytest.mark.recurring_job # NOQA
def test_recurring_job_in_storageclass(set_random_backupstore, client, core_api, storage_class, statefulset): # NOQA
"""
Test create volume with StorageClass contains recurring jobs
1. Create a StorageClass with recurring jobs
2. Create a StatefulSet with PVC template and StorageClass
3. Verify the recurring jobs run correctly.
"""
recurring_jobs = {
SNAPSHOT: {
TASK: SNAPSHOT,
GROUPS: [],
CRON: SCHEDULE_1MIN,
RETAIN: 2,
CONCURRENCY: 1,
LABELS: {},
},
BACKUP: {
TASK: BACKUP,
GROUPS: [],
CRON: "*/2 * * * *",
RETAIN: 1,
CONCURRENCY: 1,
LABELS: {},
},
}
create_recurring_jobs(client, recurring_jobs)
check_recurring_jobs(client, recurring_jobs)
recurring_job_selector = [
{
NAME: SNAPSHOT,
ISGROUP: False,
},
{
NAME: BACKUP,
ISGROUP: False,
},
]
storage_class["parameters"]["recurringJobSelector"] = \
json.dumps(recurring_job_selector)
create_storage_class(storage_class)
# wait until the beginning of an even minute
wait_until_begin_of_an_even_minute()
statefulset_name = 'recurring-job-in-storageclass-test'
update_statefulset_manifests(statefulset, storage_class, statefulset_name)
start_time = datetime.utcnow()
create_and_wait_statefulset(statefulset)
statefulset_creating_duration = datetime.utcnow() - start_time
assert 150 > statefulset_creating_duration.seconds
# We want to write data exactly at the 150th second since the start_time
time.sleep(150 - statefulset_creating_duration.seconds)
pod_info = get_statefulset_pod_info(core_api, statefulset)
volume_info = [p['pv_name'] for p in pod_info]
pod_names = [p['pod_name'] for p in pod_info]
# write random data to volume to trigger recurring snapshot and backup job
volume_data_path = "/data/test"
for pod_name in pod_names:
write_pod_volume_random_data(core_api, pod_name, volume_data_path, 2)
time.sleep(150) # 2.5 minutes
for volume_name in volume_info: # NOQA
volume = client.by_id_volume(volume_name)
wait_for_snapshot_count(volume, 4)
@pytest.mark.recurring_job # NOQA
def test_recurring_job_labels(set_random_backupstore, client, random_labels, volume_name): # NOQA
"""
Scenario: test a recurring job with labels (S3/NFS)
Given a recurring job created,
with `default` in groups,
with random labels.
And volume created and attached.
And write data to volume.
When add another label to the recurring job.
And write data to volume.
And wait after scheduled time.
Then should have 2 snapshots.
And backup should have correct labels.
"""
recurring_job_labels_test(client, random_labels, volume_name) # NOQA
def recurring_job_labels_test(client, labels, volume_name, size=SIZE, backing_image=""): # NOQA
recurring_jobs = {
RECURRING_JOB_NAME: {
TASK: BACKUP,
GROUPS: [DEFAULT],
CRON: SCHEDULE_1MIN,
RETAIN: 1,
CONCURRENCY: 2,
LABELS: labels,
},
}
create_recurring_jobs(client, recurring_jobs)
check_recurring_jobs(client, recurring_jobs)
client.create_volume(name=volume_name, size=size,
numberOfReplicas=2, backingImage=backing_image)
volume = wait_for_volume_detached(client, volume_name)
volume.attach(hostId=get_self_host_id())
volume = wait_for_volume_healthy(client, volume_name)
write_volume_random_data(volume)
time.sleep(75 - WRITE_DATA_INTERVAL) # 1 minute 15 second
labels["we-added-this-label"] = "definitely"
update_recurring_job(client, RECURRING_JOB_NAME,
recurring_jobs[RECURRING_JOB_NAME][GROUPS],
labels)
write_volume_random_data(volume)
time.sleep(135) # 2 minute 15 second
# 1 from Backup, 1 from Volume Head.
wait_for_snapshot_count(volume, 2)
# Verify the Labels on the actual Backup.
bv = client.by_id_backupVolume(volume_name)
wait_for_backup_count(bv, 1)
backups = bv.backupList().data
b = bv.backupGet(name=backups[0].name)
for key, val in iter(labels.items()):
assert b.labels.get(key) == val
assert b.labels.get(RECURRING_JOB_LABEL) == RECURRING_JOB_NAME
# One extra Label from RecurringJob.
assert len(b.labels) == len(labels) + 1
wait_for_backup_volume(client, volume_name, backing_image)
@pytest.mark.csi # NOQA
@pytest.mark.recurring_job
def test_recurring_job_kubernetes_status(set_random_backupstore, client, core_api, volume_name): # NOQA
"""
Scenario: test recurringJob properly backs up the KubernetesStatus (S3/NFS)
Given volume created and detached.
And PV from volume created and verified.
When create backup recurring job to run every 2 minutes.
And attach volume.
And write some data to volume.
And wait 5 minutes.
Then volume have 2 snapshots.
volume have 1 backup.
And backup have the Kubernetes Status labels.
"""
client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2)
volume = wait_for_volume_detached(client, volume_name)
pv_name = "pv-" + volume_name
create_pv_for_volume(client, core_api, volume, pv_name)
ks = {
'pvName': pv_name,
'pvStatus': 'Available',
'namespace': '',
'pvcName': '',
'lastPVCRefAt': '',
'lastPodRefAt': '',
}
wait_volume_kubernetes_status(client, volume_name, ks)
recurring_jobs = {
RECURRING_JOB_NAME: {
TASK: BACKUP,
GROUPS: [DEFAULT],
CRON: "*/2 * * * *",
RETAIN: 1,
CONCURRENCY: 1,
LABELS: {},
},
}
create_recurring_jobs(client, recurring_jobs)
check_recurring_jobs(client, recurring_jobs)
volume.attach(hostId=get_self_host_id())
volume = wait_for_volume_healthy(client, volume_name)
write_volume_random_data(volume)
time.sleep(60 * 5)
# 1 from Backup, 1 from Volume Head.
wait_for_snapshot_count(volume, 2)
# Verify the Labels on the actual Backup.
bv = client.by_id_backupVolume(volume_name)
backups = bv.backupList().data
assert len(backups) == 1
b = bv.backupGet(name=backups[0].name)
status = json.loads(b.labels.get(KUBERNETES_STATUS_LABEL))
assert b.labels.get(RECURRING_JOB_LABEL) == RECURRING_JOB_NAME
assert status == {
'lastPodRefAt': '',
'lastPVCRefAt': '',
'namespace': '',
'pvcName': '',
'pvName': pv_name,
'pvStatus': 'Available',
'workloadsStatus': None
}
# Two Labels: KubernetesStatus and RecurringJob.
assert len(b.labels) == 2
def test_recurring_jobs_maximum_retain(client, core_api, volume_name): # NOQA
"""
Scenario: test recurring jobs' maximum retain
Given set a recurring job retain to 51.
When create recurring job.
Then should fail.
When set recurring job retain to 50.
And create recurring job.
Then recurring job created with retain equals to 50.
When update recurring job retain to 51.
Then should fail.
"""
# set max total number of retain to exceed 50
recurring_jobs = {
RECURRING_JOB_NAME: {
TASK: BACKUP,
GROUPS: [],
CRON: SCHEDULE_1MIN,
RETAIN: 51,
CONCURRENCY: 1,
LABELS: {},
},
}
with pytest.raises(Exception) as e:
create_recurring_jobs(client, recurring_jobs)
assert "Job Can\\'t retain more than 50 snapshots" in str(e.value)
recurring_jobs[RECURRING_JOB_NAME][RETAIN] = 50
create_recurring_jobs(client, recurring_jobs)
recurring_job = client.by_id_recurring_job(RECURRING_JOB_NAME)
assert recurring_job.retain == 50
with pytest.raises(Exception) as e:
update_recurring_job(client, RECURRING_JOB_NAME,
groups=[], labels={}, retain=51)
assert "Job Can\\'t retain more than 50 snapshots" in str(e.value)
@pytest.mark.recurring_job # NOQA
def test_recurring_job_detached_volume(client, batch_v1_beta_api, volume_name): # NOQA
"""
Scenario: test recurring job while volume is detached
Given a volume created, and attached.
And write some data to the volume.
And detach the volume.
When create a recurring job running at 1 minute interval,
and with `default` in groups,
and with `retain` set to `2`.
And 1 cron job should be created.
And wait for 2 minutes.
And attach volume and wait until healthy
Then the volume should have 1 snapshot
When wait for 1 minute.
Then then volume should have only 2 snapshots.
"""
client.create_volume(name=volume_name, size=SIZE)
volume = wait_for_volume_detached(client, volume_name)
self_host = get_self_host_id()
volume.attach(hostId=self_host)
volume = wait_for_volume_healthy(client, volume_name)
write_volume_random_data(volume)
volume.detach()
recurring_jobs = {
RECURRING_JOB_NAME: {
TASK: BACKUP,
GROUPS: [DEFAULT],
CRON: SCHEDULE_1MIN,
RETAIN: 2,
CONCURRENCY: 1,
LABELS: {},
},
}
create_recurring_jobs(client, recurring_jobs)
check_recurring_jobs(client, recurring_jobs)
wait_for_cron_job_count(batch_v1_beta_api, 1)
time.sleep(60 * 2 - WRITE_DATA_INTERVAL)
volume.attach(hostId=self_host)
volume = wait_for_volume_healthy(client, volume_name)
wait_for_snapshot_count(volume, 1)
time.sleep(60)
wait_for_snapshot_count(volume, 2)
def test_recurring_jobs_allow_detached_volume(set_random_backupstore, client, core_api, apps_api, volume_name, make_deployment_with_pvc): # NOQA
"""
Scenario: test recurring jobs for detached volume with
`allow-recurring-job-while-volume-detached` set to true
Context: In the current Longhorn implementation, users cannot do recurring
backup when volumes are detached.
This feature gives the users an option to do recurring backup
even when volumes are detached.
longhorn/longhorn#1509
Given `allow-recurring-job-while-volume-detached` set to `true`.
And volume created and attached.
And 50MB data written to volume.
And volume detached.
When a recurring job created runs every minute.
And wait for backup to complete.
Then volume have 1 backup in 2 minutes retry loop.
When delete the recurring job.
And create a PV from volume.
And create a PVC from volume.
And create a deployment from PVC.
And write 400MB data to the volume from the pod.
And scale deployment replicas to 0.
wait until the volume is detached.
And create a recurring job runs every 2 minutes.
And wait for backup to start.
And scale deployment replicas to 1.
Then volume's frontend is disabled.
And pod cannot start.
When wait until backup complete.
And delete the recurring job.
Then pod can start in 10 minutes retry loop.
"""
common.update_setting(client,
SETTING_RECURRING_JOB_WHILE_VOLUME_DETACHED, "true")
volume = create_and_check_volume(client, volume_name, size=str(1 * Gi))
volume.attach(hostId=get_self_host_id())
volume = wait_for_volume_healthy(client, volume.name)
data = {
'pos': 0,
'content': common.generate_random_data(50 * Mi),
}
common.write_volume_data(volume, data)
# Give sometimes for data to flush to disk
time.sleep(15)
volume.detach(hostId="")
volume = wait_for_volume_detached(client, volume.name)
recurring_jobs = {
RECURRING_JOB_NAME: {
TASK: BACKUP,
GROUPS: [DEFAULT],
CRON: SCHEDULE_1MIN,
RETAIN: 1,
CONCURRENCY: 1,
LABELS: {},
},
}
create_recurring_jobs(client, recurring_jobs)
check_recurring_jobs(client, recurring_jobs)
wait_for_backup_completion(client, volume.name)
for _ in range(4):
bv = client.by_id_backupVolume(volume.name)
wait_for_backup_count(bv, 1)
time.sleep(30)
cleanup_all_recurring_jobs(client)
pv_name = volume_name + "-pv"
create_pv_for_volume(client, core_api, volume, pv_name)
pvc_name = volume_name + "-pvc"
create_pvc_for_volume(client, core_api, volume, pvc_name)
deployment_name = volume_name + "-dep"
deployment = make_deployment_with_pvc(deployment_name, pvc_name)
create_and_wait_deployment(apps_api, deployment)
size_mb = 400
pod_names = common.get_deployment_pod_names(core_api, deployment)
write_pod_volume_random_data(core_api, pod_names[0], "/data/test",
size_mb)
deployment['spec']['replicas'] = 0
apps_api.patch_namespaced_deployment(body=deployment,
namespace='default',
name=deployment["metadata"]["name"])
volume = wait_for_volume_detached(client, volume.name)
recurring_jobs = {
RECURRING_JOB_NAME: {
TASK: BACKUP,
GROUPS: [DEFAULT],
CRON: "*/2 * * * *",
RETAIN: 1,
CONCURRENCY: 1,
LABELS: {},
},
}
create_recurring_jobs(client, recurring_jobs)
check_recurring_jobs(client, recurring_jobs)
wait_for_backup_to_start(client, volume.name)
deployment['spec']['replicas'] = 1
apps_api.patch_namespaced_deployment(body=deployment,
namespace='default',
name=deployment["metadata"]["name"])
deployment_label_name = deployment["metadata"]["labels"]["name"]
common.wait_pod_auto_attach_after_first_backup_completion(
client, core_api, volume.name, deployment_label_name)
cleanup_all_recurring_jobs(client)
pod_names = common.get_deployment_pod_names(core_api, deployment)
common.wait_for_pod_phase(core_api, pod_names[0], pod_phase="Running")
def test_recurring_jobs_when_volume_detached_unexpectedly(set_random_backupstore, client, core_api, apps_api, volume_name, make_deployment_with_pvc): # NOQA
"""
Scenario: test recurring jobs when volume detached unexpectedly
Context: If the volume is automatically attached by the recurring backup
job, make sure that workload pod eventually is able to use the
volume when volume is detached unexpectedly during the backup
process.
Given `allow-recurring-job-while-volume-detached` set to `true`.
And volume created and detached.
And PV created from volume.
And PVC created from volume.
And deployment created from PVC.
And 500MB data written to the volume.
And deployment replica scaled to 0.
And volume detached.
When create a backup recurring job runs every 2 minutes.
And wait for backup to start.
wait for backup progress > 50%.
And kill the engine process of the volume.
Then volume is attached and healthy.
When backup completed.
Then volume is detached with `frontendDisabled=false`.
When deployment replica scaled to 1.
Then the data exist in the deployment pod.
"""
common.update_setting(client,
SETTING_RECURRING_JOB_WHILE_VOLUME_DETACHED, "true")
volume = create_and_check_volume(client, volume_name, size=str(1 * Gi))
volume = wait_for_volume_detached(client, volume.name)
pv_name = volume_name + "-pv"
create_pv_for_volume(client, core_api, volume, pv_name)
pvc_name = volume_name + "-pvc"
create_pvc_for_volume(client, core_api, volume, pvc_name)
deployment_name = volume_name + "-dep"
deployment = make_deployment_with_pvc(deployment_name, pvc_name)
create_and_wait_deployment(apps_api, deployment)
size_mb = 500
pod_names = common.get_deployment_pod_names(core_api, deployment)
write_pod_volume_random_data(core_api, pod_names[0], "/data/test",
size_mb)
data = read_volume_data(core_api, pod_names[0], 'default')
deployment['spec']['replicas'] = 0
apps_api.patch_namespaced_deployment(body=deployment,
namespace='default',
name=deployment["metadata"]["name"])
volume = wait_for_volume_detached(client, volume_name)
recurring_jobs = {
RECURRING_JOB_NAME: {
TASK: BACKUP,
GROUPS: [],
CRON: "*/2 * * * *",
RETAIN: 1,
CONCURRENCY: 1,
LABELS: {},
},
}
create_recurring_jobs(client, recurring_jobs)
check_recurring_jobs(client, recurring_jobs)
volume.recurringJobAdd(name=RECURRING_JOB_NAME, isGroup=False)
wait_for_volume_recurring_job_update(volume,
jobs=[RECURRING_JOB_NAME],
groups=[DEFAULT])
time.sleep(60)
wait_for_recurring_backup_to_start(client, core_api, volume_name,
expected_snapshot_count=1,
minimum_progress=50)
crash_engine_process_with_sigkill(client, core_api, volume_name)
time.sleep(10)
wait_for_volume_healthy_no_frontend(client, volume_name)
# Since the backup state is removed after the backup complete and it
# could happen quickly. Checking for the both in-progress and complete
# state could be hard to catch, thus we only check the complete state
wait_for_backup_completion(client, volume_name)
wait_for_volume_detached(client, volume_name)
deployment['spec']['replicas'] = 1
apps_api.patch_namespaced_deployment(body=deployment,
namespace='default',
name=deployment["metadata"]["name"])
wait_deployment_replica_ready(apps_api, deployment["metadata"]["name"], 1)
pod_names = common.get_deployment_pod_names(core_api, deployment)
assert read_volume_data(core_api, pod_names[0], 'default') == data
# Use fixture to cleanup the backupstore and since we
# crashed the engine replica initiated the backup, it's
# backupstore lock will still be present, so we need
# to wait till the lock is expired, before we can delete
# the backups
volume.recurringJobDelete(name=RECURRING_JOB_NAME, isGroup=False)
backupstore.backupstore_wait_for_lock_expiration()
@pytest.mark.skip(reason="TODO")
def test_recurring_jobs_on_nodes_with_taints(): # NOQA
"""
Test recurring jobs on nodes with taints
Context:
Test the prevention of creation of multiple pods due to
recurring job's pod being rejected by Taint controller
on nodes with taints
Steps:
1. Set taint toleration for Longhorn components
`persistence=true:NoExecute`
2. Taint `node-1` with `persistence=true:NoExecute`
3. Create a volume, vol-1.
Attach vol-1 to node-1
Write some data to vol-1
4. Create a recurring backup job which:
Has retain count 10
Runs every minute
5. Wait for 3 minutes.
Verify that the there is 1 backup created
Verify that the total number of pod in longhorn-system namespace < 50
Verify that the number of pods of the cronjob is <= 2
6. Taint all nodes with `persistence=true:NoExecute`
7. Write some data to vol-1
8. Wait for 3 minutes.
Verify that the there are 2 backups created in total
Verify that the total number of pod in longhorn-system namespace < 50
Verify that the number of pods of the cronjob is <= 2
9. Remove `persistence=true:NoExecute` from all nodes and Longhorn setting
Clean up backups, volumes
"""
pass
@pytest.mark.recurring_job # NOQA
def test_recurring_job_groups(set_random_backupstore, client, batch_v1_beta_api): # NOQA
"""
Scenario: test recurring job groups (S3/NFS)
Given volume `test-job-1` created, attached, and healthy.
volume `test-job-2` created, attached, and healthy.
And create `snapshot` recurring job with `group-1, group-2` in groups.
set cron job to run every 2 minutes.
set retain to 1.
create `backup` recurring job with `group-1` in groups.
set cron job to run every 3 minutes.
set retain to 1
When set `group1` recurring job in volume `test-job-1` label.
set `group2` recurring job in volume `test-job-2` label.
And write some data to volume `test-job-1`.
write some data to volume `test-job-2`.
And wait for 2 minutes.
And write some data to volume `test-job-1`.
write some data to volume `test-job-2`.
And wait for 1 minute.
Then volume `test-job-1` should have 3 snapshots after scheduled time.
volume `test-job-2` should have 2 snapshots after scheduled time.
And volume `test-job-1` should have 1 backup after scheduled time.
volume `test-job-2` should have 0 backup after scheduled time.
"""
volume1_name = "test-job-1"
volume2_name = "test-job-2"
client.create_volume(name=volume1_name, size=SIZE)
client.create_volume(name=volume2_name, size=SIZE)
volume1 = wait_for_volume_detached(client, volume1_name)
volume2 = wait_for_volume_detached(client, volume2_name)
self_id = get_self_host_id()
volume1.attach(hostId=self_id)
volume2.attach(hostId=self_id)
volume1 = wait_for_volume_healthy(client, volume1_name)
volume2 = wait_for_volume_healthy(client, volume2_name)
group1 = "group-1"
group2 = "group-2"
recurring_jobs = {
SNAPSHOT: {
TASK: SNAPSHOT,
GROUPS: [group1, group2],
CRON: "*/2 * * * *",
RETAIN: 1,
CONCURRENCY: 2,
LABELS: {},
},
BACKUP: {
TASK: BACKUP,
GROUPS: [group1],
CRON: "*/3 * * * *",
RETAIN: 1,
CONCURRENCY: 2,
LABELS: {},
},
}
create_recurring_jobs(client, recurring_jobs)
check_recurring_jobs(client, recurring_jobs)
volume1.recurringJobAdd(name=group1, isGroup=True)
volume2.recurringJobAdd(name=group2, isGroup=True)
wait_for_cron_job_count(batch_v1_beta_api, 2)
write_volume_random_data(volume1)
write_volume_random_data(volume2)
time.sleep(60 * 2 - WRITE_DATA_INTERVAL)
write_volume_random_data(volume1)
write_volume_random_data(volume2)
time.sleep(60)
wait_for_snapshot_count(volume1, 3) # volume-head,snapshot,backup-snapshot
wait_for_snapshot_count(volume2, 2) # volume-head,snapshot
wait_for_backup_count(client.by_id_backupVolume(volume1_name), 1)
backup_created = True
try:
wait_for_backup_count(client.by_id_backupVolume(volume2_name), 1,
retry_counts=60)
except AssertionError:
backup_created = False
assert not backup_created
@pytest.mark.recurring_job # NOQA
def test_recurring_job_default(client, batch_v1_beta_api, volume_name): # NOQA
"""
Scenario: test recurring job set with default in groups
Given 1 volume created, attached, and healthy.
# Setting recurring job in volume label should not remove the defaults.
When set `snapshot` recurring job in volume label.
Then should contain `default` job-group in volume labels.
should contain `snapshot` job in volume labels.
# Should be able to remove the default label.
When delete recurring job-group `default` in volume label.
Then volume should have `snapshot` job in job label.
volume should not have `default` group in job label.
# Remove all volume recurring job labels should bring in default
When delete all recurring jobs in volume label.
Then volume should not have `snapshot` job in job label.
volume should have `deault` group in job label.
"""
client.create_volume(name=volume_name, size=SIZE)
volume = wait_for_volume_detached(client, volume_name)
volume.attach(hostId=get_self_host_id())
volume = wait_for_volume_healthy(client, volume_name)
# Setting recurring job in volume label should not remove the defaults.
volume.recurringJobAdd(name=SNAPSHOT, isGroup=False)
wait_for_volume_recurring_job_update(volume,
jobs=[SNAPSHOT], groups=[DEFAULT])
# Should be able to remove the default label.
volume.recurringJobDelete(name=DEFAULT, isGroup=True)
wait_for_volume_recurring_job_update(volume,
jobs=[SNAPSHOT], groups=[])
# Remove all volume recurring job labels should bring in default
volume.recurringJobDelete(name=SNAPSHOT, isGroup=False)
wait_for_volume_recurring_job_update(volume,
jobs=[], groups=[DEFAULT])
@pytest.mark.recurring_job # NOQA
def test_recurring_job_delete(client, batch_v1_beta_api, volume_name): # NOQA
"""
Scenario: test delete recurring job
Given 1 volume created, attached, and healthy.
When create `snapshot1` recurring job with `default, group-1` in groups.
create `snapshot2` recurring job with `default` in groups..
create `snapshot3` recurring job with `` in groups.
create `backup1` recurring job with `default, group-1` in groups.
create `backup2` recurring job with `default` in groups.
create `backup3` recurring job with `` in groups.
Then default `snapshot1` cron job should exist.
default `snapshot2` cron job should exist.
`snapshot3` cron job should exist.
default `backup1` cron job should exist.
default `backup2` cron job should exist.
`backup3` cron job should exist.
# Delete `snapshot2` recurring job should delete the cron job
When delete `snapshot-2` recurring job.
Then default `snapshot1` cron job should exist.
default `snapshot2` cron job should not exist.
`snapshot3` cron job should exist.
default `backup1` cron job should exist.
default `backup2` cron job should exist.
`backup3` cron job should exist.
# Delete multiple recurring jobs should reflect on the cron jobs.
When delete `backup-1` recurring job.
delete `backup-2` recurring job.
delete `backup-3` recurring job.
Then default `snapshot1` cron job should exist.
default `snapshot2` cron job should not exist.
`snapshot3` cron job should exist.
default `backup1` cron job should not exist.
default `backup2` cron job should not exist.
`backup3` cron job should not exist.
# Should be able to delete recurring job while existing in volume label
When add `snapshot1` recurring job to volume label.
add `snapshot3` recurring job to volume label.
And default `snapshot1` cron job should exist.
default `snapshot2` cron job should not exist.
`snapshot3` cron job should exist.
And delete `snapshot1` recurring job.
delete `snapshot3` recurring job.
Then default `snapshot1` cron job should not exist.
default `snapshot2` cron job should not exist.
`snapshot3` cron job should not exist.
And `snapshot1` job should exist in volume recurring job label.
`snapshot2` job should exist in volume recurring job label.
"""
client.create_volume(name=volume_name, size=SIZE)
volume = wait_for_volume_detached(client, volume_name)
volume.attach(hostId=get_self_host_id())
volume = wait_for_volume_healthy(client, volume_name)
snap1 = SNAPSHOT + "1"
snap2 = SNAPSHOT + "2"
snap3 = SNAPSHOT + "3"
back1 = BACKUP + "1"
back2 = BACKUP + "2"
back3 = BACKUP + "3"
group1 = "group-1"
recurring_jobs = {
snap1: {
TASK: SNAPSHOT,
GROUPS: [DEFAULT, group1],
CRON: SCHEDULE_1MIN,
RETAIN: 1,
CONCURRENCY: 2,
LABELS: {},
},
snap2: {
TASK: SNAPSHOT,
GROUPS: [DEFAULT],
CRON: SCHEDULE_1MIN,
RETAIN: 1,
CONCURRENCY: 2,
LABELS: {},
},
snap3: {
TASK: SNAPSHOT,
GROUPS: [],
CRON: SCHEDULE_1MIN,
RETAIN: 1,
CONCURRENCY: 2,
LABELS: {},
},
back1: {
TASK: BACKUP,
GROUPS: [DEFAULT, group1],
CRON: SCHEDULE_1MIN,
RETAIN: 1,
CONCURRENCY: 2,
LABELS: {},
},
back2: {
TASK: BACKUP,
GROUPS: [DEFAULT],
CRON: SCHEDULE_1MIN,
RETAIN: 1,
CONCURRENCY: 2,
LABELS: {},
},
back3: {
TASK: BACKUP,
GROUPS: [],
CRON: SCHEDULE_1MIN,
RETAIN: 1,
CONCURRENCY: 2,
LABELS: {},
},
}
create_recurring_jobs(client, recurring_jobs)
check_recurring_jobs(client, recurring_jobs)
wait_for_cron_job_count(batch_v1_beta_api, 6)
# snapshot
wait_for_cron_job_create(batch_v1_beta_api, JOB_LABEL+"="+snap1)
wait_for_cron_job_create(batch_v1_beta_api, JOB_LABEL+"="+snap2)
wait_for_cron_job_create(batch_v1_beta_api, JOB_LABEL+"="+snap3)
# backup
wait_for_cron_job_create(batch_v1_beta_api, JOB_LABEL+"="+back1)
wait_for_cron_job_create(batch_v1_beta_api, JOB_LABEL+"="+back2)
wait_for_cron_job_create(batch_v1_beta_api, JOB_LABEL+"="+back3)
# Delete `snapshot2` recurring job should delete the cron job
snap2_recurring_job = client.by_id_recurring_job(snap2)
client.delete(snap2_recurring_job)
wait_for_cron_job_count(batch_v1_beta_api, 5)
# snapshot
wait_for_cron_job_create(batch_v1_beta_api, JOB_LABEL+"="+snap1)
wait_for_cron_job_delete(batch_v1_beta_api, JOB_LABEL+"="+snap2)
wait_for_cron_job_create(batch_v1_beta_api, JOB_LABEL+"="+snap3)
# backup
wait_for_cron_job_create(batch_v1_beta_api, JOB_LABEL+"="+back1)
wait_for_cron_job_create(batch_v1_beta_api, JOB_LABEL+"="+back2)
wait_for_cron_job_create(batch_v1_beta_api, JOB_LABEL+"="+back3)
# Delete multiple recurring jobs should reflect on the cron jobs.
back1_recurring_job = client.by_id_recurring_job(back1)
back2_recurring_job = client.by_id_recurring_job(back2)
back3_recurring_job = client.by_id_recurring_job(back3)
client.delete(back1_recurring_job)
client.delete(back2_recurring_job)
client.delete(back3_recurring_job)
wait_for_cron_job_count(batch_v1_beta_api, 2)
# snapshot
wait_for_cron_job_create(batch_v1_beta_api, JOB_LABEL+"="+snap1)
wait_for_cron_job_delete(batch_v1_beta_api, JOB_LABEL+"="+snap2)
wait_for_cron_job_create(batch_v1_beta_api, JOB_LABEL+"="+snap3)
# backup
wait_for_cron_job_delete(batch_v1_beta_api, JOB_LABEL+"="+back1)
wait_for_cron_job_delete(batch_v1_beta_api, JOB_LABEL+"="+back2)
wait_for_cron_job_delete(batch_v1_beta_api, JOB_LABEL+"="+back3)
# Should be able to delete recurring job while existing in volume label
volume.recurringJobAdd(name=snap1, isGroup=False)
volume.recurringJobAdd(name=snap3, isGroup=False)
wait_for_volume_recurring_job_update(volume,
jobs=[snap1, snap3], groups=[DEFAULT])
wait_for_cron_job_count(batch_v1_beta_api, 2)
# snapshot
wait_for_cron_job_create(batch_v1_beta_api, JOB_LABEL+"="+snap1)
wait_for_cron_job_delete(batch_v1_beta_api, JOB_LABEL+"="+snap2)
wait_for_cron_job_create(batch_v1_beta_api, JOB_LABEL+"="+snap3)
snap1_recurring_job = client.by_id_recurring_job(snap1)
snap3_recurring_job = client.by_id_recurring_job(snap3)
client.delete(snap1_recurring_job)
client.delete(snap3_recurring_job)
wait_for_cron_job_count(batch_v1_beta_api, 0)
wait_for_volume_recurring_job_update(volume,
jobs=[snap1, snap3], groups=[DEFAULT])
@pytest.mark.recurring_job # NOQA
def test_recurring_job_volume_labeled_none_existing_recurring_job(client, batch_v1_beta_api, volume_name): # NOQA
"""
Scenario: test volume with a none-existing recurring job label
and later on added back.
Given create `snapshot` recurring job.
create `backup` recurring job.
And 1 volume created, attached, and healthy.
And add `snapshot` recurring job to volume label.
add `backup` recurring job to volume label.
And `snapshot1` cron job exist.
`backup1` cron job exist.
When delete `snapshot` recurring job.
delete `backup` recurring job.
Then `snapshot` cron job should not exist.
`backup` cron job should not exist.
And `snapshot` job should exist in volume recurring job label.
`backup` job should exist in volume recurring job label.
`default` group should exist in volume recurring job label.
# Add back the recurring jobs.
When create `snapshot` recurring job.
create `backup` recurring job.
Then `snapshot` cron job should exist.
`backup` cron job should exist.
"""
recurring_jobs = {
SNAPSHOT: {
TASK: SNAPSHOT,
GROUPS: [],
CRON: SCHEDULE_1MIN,
RETAIN: 1,
CONCURRENCY: 2,
LABELS: {},
},
BACKUP: {
TASK: BACKUP,
GROUPS: [],
CRON: SCHEDULE_1MIN,
RETAIN: 1,
CONCURRENCY: 2,
LABELS: {},
},
}
create_recurring_jobs(client, recurring_jobs)
check_recurring_jobs(client, recurring_jobs)
client.create_volume(name=volume_name, size=SIZE)
volume = wait_for_volume_detached(client, volume_name)
volume.attach(hostId=get_self_host_id())
volume = wait_for_volume_healthy(client, volume_name)
volume.recurringJobAdd(name=SNAPSHOT, isGroup=False)
volume.recurringJobAdd(name=BACKUP, isGroup=False)
wait_for_cron_job_create(batch_v1_beta_api, JOB_LABEL+"="+SNAPSHOT)
wait_for_cron_job_create(batch_v1_beta_api, JOB_LABEL+"="+BACKUP)
snap1_recurring_job = client.by_id_recurring_job(SNAPSHOT)
back1_recurring_job = client.by_id_recurring_job(BACKUP)
client.delete(snap1_recurring_job)
client.delete(back1_recurring_job)
wait_for_cron_job_count(batch_v1_beta_api, 0)
wait_for_volume_recurring_job_update(volume,
jobs=[SNAPSHOT, BACKUP],
groups=[DEFAULT])
wait_for_recurring_jobs_cleanup(client)
# Add back the recurring jobs.
create_recurring_jobs(client, recurring_jobs)
check_recurring_jobs(client, recurring_jobs)
wait_for_cron_job_count(batch_v1_beta_api, 2)
wait_for_cron_job_create(batch_v1_beta_api, JOB_LABEL+"="+SNAPSHOT)
wait_for_cron_job_create(batch_v1_beta_api, JOB_LABEL+"="+BACKUP)
@pytest.mark.recurring_job # NOQA
def test_recurring_job_multiple_volumes(set_random_backupstore, client, batch_v1_beta_api): # NOQA
"""
Scenario: test recurring job with multiple volumes
Given volume `test-job-1` created, attached and healthy.
And create `backup1` recurring job with `default` in groups.
create `backup2` recurring job with `` in groups.
And `default` group exist in `test-job-1` volume recurring job label.
And `backup1` cron job exist.
`backup2` cron job exist.
And write data to `test-job-1` volume.
And 2 snapshot exist in `test-job-1` volume.
And 1 backup exist in `test-job-1` volume.
When create and attach volume `test-job-2`.
wait for volume `test-job-2` to be healthy.
And `default` group exist in `test-job-2` volume recurring job label.
And write data to `test-job-1` volume.
Then 2 snapshot exist in `test-job-2` volume.
1 backup exist in `test-job-2` volume.
When add `backup2` in `test-job-2` volume label.
And `default` group exist in `test-job-1` volume recurring job label.
`default` group exist in `test-job-2` volume recurring job label.
`backup2` group exist in `test-job-2` volume recurring job label.
And write data to `test-job-1`.
write data to `test-job-2`.
Then wait for schedule time.
And 2 backup exist in `test-job-2` volume.
1 backup exist in `test-job-1` volume.
"""
volume1_name = "test-job-1"
client.create_volume(name=volume1_name, size=SIZE)
volume1 = wait_for_volume_detached(client, volume1_name)
volume1.attach(hostId=get_self_host_id())
volume1 = wait_for_volume_healthy(client, volume1_name)
back1 = BACKUP + "1"
back2 = BACKUP + "2"
recurring_jobs = {
back1: {
TASK: BACKUP,
GROUPS: [DEFAULT],
CRON: SCHEDULE_1MIN,
RETAIN: 1,
CONCURRENCY: 2,
LABELS: {},
},
back2: {
TASK: BACKUP,
GROUPS: [],
CRON: SCHEDULE_1MIN,
RETAIN: 1,
CONCURRENCY: 2,
LABELS: {},
},
}
create_recurring_jobs(client, recurring_jobs)
check_recurring_jobs(client, recurring_jobs)
wait_for_volume_recurring_job_update(volume1, jobs=[], groups=[DEFAULT])
wait_for_cron_job_count(batch_v1_beta_api, 2)
wait_for_cron_job_create(batch_v1_beta_api, JOB_LABEL+"="+back1)
wait_for_cron_job_create(batch_v1_beta_api, JOB_LABEL+"="+back2)
write_volume_random_data(volume1)
wait_for_snapshot_count(volume1, 2)
wait_for_backup_count(client.by_id_backupVolume(volume1_name), 1)
volume2_name = "test-job-2"
client.create_volume(name=volume2_name, size=SIZE)
volume2 = wait_for_volume_detached(client, volume2_name)
volume2.attach(hostId=get_self_host_id())
volume2 = wait_for_volume_healthy(client, volume2_name)
wait_for_volume_recurring_job_update(volume2, jobs=[], groups=[DEFAULT])
write_volume_random_data(volume2)
wait_for_snapshot_count(volume2, 2)
wait_for_backup_count(client.by_id_backupVolume(volume2_name), 1)
volume2.recurringJobAdd(name=back2, isGroup=False)
wait_for_volume_recurring_job_update(volume1,
jobs=[], groups=[DEFAULT])
wait_for_volume_recurring_job_update(volume2,
jobs=[back2], groups=[DEFAULT])
write_volume_random_data(volume1)
write_volume_random_data(volume2)
time.sleep(70 - WRITE_DATA_INTERVAL)
wait_for_backup_count(client.by_id_backupVolume(volume2_name), 2)
wait_for_backup_count(client.by_id_backupVolume(volume1_name), 1)
@pytest.mark.recurring_job # NOQA
def test_recurring_job_snapshot(client, batch_v1_beta_api): # NOQA
"""
Scenario: test recurring job snapshot
Given volume `test-job-1` created, attached, and healthy.
volume `test-job-2` created, attached, and healthy.
When create a recurring job with `default` in groups.
Then should have 1 cron job.
And volume `test-job-1` should have volume-head 1 snapshot.
volume `test-job-2` should have volume-head 1 snapshot.
When write some data to volume `test-job-1`.
write some data to volume `test-job-2`.
And wait for cron job scheduled time.
Then volume `test-job-1` should have 2 snapshots after scheduled time.
volume `test-job-2` should have 2 snapshots after scheduled time.
When write some data to volume `test-job-1`.
write some data to volume `test-job-2`.
And wait for cron job scheduled time.
Then volume `test-job-1` should have 3 snapshots after scheduled time.
volume `test-job-2` should have 3 snapshots after scheduled time.
"""
volume1_name = "test-job-1"
volume2_name = "test-job-2"
client.create_volume(name=volume1_name, size=SIZE)
client.create_volume(name=volume2_name, size=SIZE)
volume1 = wait_for_volume_detached(client, volume1_name)
volume2 = wait_for_volume_detached(client, volume2_name)
self_host = get_self_host_id()
volume1.attach(hostId=self_host)
volume2.attach(hostId=self_host)
volume1 = wait_for_volume_healthy(client, volume1_name)
volume2 = wait_for_volume_healthy(client, volume2_name)
recurring_jobs = {
RECURRING_JOB_NAME: {
TASK: SNAPSHOT,
GROUPS: [DEFAULT],
CRON: SCHEDULE_1MIN,
RETAIN: 2,
CONCURRENCY: 2,
LABELS: {},
},
}
create_recurring_jobs(client, recurring_jobs)
check_recurring_jobs(client, recurring_jobs)
wait_for_cron_job_count(batch_v1_beta_api, 1)
# volume-head
wait_for_snapshot_count(volume1, 1)
wait_for_snapshot_count(volume2, 1)
# 1st job
write_volume_random_data(volume1)
write_volume_random_data(volume2)
time.sleep(60)
wait_for_snapshot_count(volume1, 2)
wait_for_snapshot_count(volume2, 2)
# 2nd job
# wait_until_begin_of_a_minute()
write_volume_random_data(volume1)
write_volume_random_data(volume2)
time.sleep(60)
wait_for_snapshot_count(volume1, 3)
wait_for_snapshot_count(volume2, 3)
@pytest.mark.recurring_job # NOQA
def test_recurring_job_backup(set_random_backupstore, client, batch_v1_beta_api): # NOQA
"""
Scenario: test recurring job backup (S3/NFS)
Given volume `test-job-1` created, attached, and healthy.
volume `test-job-2` created, attached, and healthy.
When create a recurring job with `default` in groups.
Then should have 1 cron job.
When write some data to volume `test-job-1`.
write some data to volume `test-job-2`.
And wait for `backup1` cron job scheduled time.
Then volume `test-job-1` should have 1 backups.
volume `test-job-2` should have 1 backups.
When write some data to volume `test-job-1`.
write some data to volume `test-job-2`.
And wait for `backup1` cron job scheduled time.
Then volume `test-job-1` should have 2 backups.
volume `test-job-2` should have 2 backups.
"""
volume1_name = "test-job-1"
volume2_name = "test-job-2"
client.create_volume(name=volume1_name, size=SIZE)
client.create_volume(name=volume2_name, size=SIZE)
volume1 = wait_for_volume_detached(client, volume1_name)
volume2 = wait_for_volume_detached(client, volume2_name)
self_host = get_self_host_id()
volume1.attach(hostId=self_host)
volume2.attach(hostId=self_host)
volume1 = wait_for_volume_healthy(client, volume1_name)
volume2 = wait_for_volume_healthy(client, volume2_name)
recurring_jobs = {
RECURRING_JOB_NAME: {
TASK: BACKUP,
GROUPS: [DEFAULT],
CRON: SCHEDULE_1MIN,
RETAIN: 2,
CONCURRENCY: 2,
LABELS: {},
},
}
create_recurring_jobs(client, recurring_jobs)
check_recurring_jobs(client, recurring_jobs)
wait_for_cron_job_count(batch_v1_beta_api, 1)
# 1st job
write_volume_random_data(volume1)
write_volume_random_data(volume2)
time.sleep(60 - WRITE_DATA_INTERVAL)
wait_for_backup_count(client.by_id_backupVolume(volume1_name), 1)
wait_for_backup_count(client.by_id_backupVolume(volume2_name), 1)
# 2nd job
write_volume_random_data(volume1)
write_volume_random_data(volume2)
time.sleep(60 - WRITE_DATA_INTERVAL)
wait_for_backup_count(client.by_id_backupVolume(volume1_name), 2)
wait_for_backup_count(client.by_id_backupVolume(volume2_name), 2)
| StarcoderdataPython |
3340062 | <filename>backend/assets/create_users.py
AMOUNT_OF_ENTRIES = 3000
LOREM = """
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
"""
from django.contrib.auth.models import User
from blog_service.models import Entry
import os, django, csv
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "blog_test.settings")
django.setup()
def push_users():
with open('users.csv', newline='', encoding='utf-8-sig') as csvfile:
reader = csv.DictReader(csvfile, delimiter=';')
for i in reader:
User(email=str(i['email']),username=str(i['username']),password="<PASSWORD> :D").save()
def push_entries():
for i in range(AMOUNT_OF_ENTRIES):
q = Entry(blog_entry=f'{LOREM}', author_name=User.objects.order_by('?').first())
q.save()
push_users()
push_entries()
| StarcoderdataPython |
3375739 | <filename>snucovery/aws.py
import json
import boto3
from datetime import date, datetime
from snucovery.errors import (
UnknownServiceCall, InvalidServiceMappings,
)
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
raise TypeError("Type %s not serializable" % type(obj))
class AwsServices:
def __init__(self, profile):
"""Object for collecting Aws Assets
Describe multiple services within an Aws profile.
Args:
profile (str): Aws profile name found in `~/.aws/credentials`
Self Params:
profile (str): profile name
session (obj): Aws Session
active_service (str): default('ec2')
client (obj): Aws Service Client
Examples:
>>> from aws import AwsService
>>> aws = AwsServices(profile='test')
>>> print(aws.scan_services())
"""
self.profile = profile
self.session = self.get_session()
self.active_service = 'ec2'
self.client = self.get_service_client()
self.service_mappings = {
'ec2': {
'describe_instances': ".Reservations[].Instances[]? | {Name: (.Tags[]?|select(.Key==\"Name\")|.Value), InstanceId, InstanceType, Region: .Placement.AvailabilityZone, LaunchTime, PrivateDnsName, PrivateIpAddresses: [.NetworkInterfaces[].PrivateIpAddresses[].PrivateIpAddress], PublicIpAddress}",
'describe_vpcs': ".Vpcs[]? | {Name: (.Tags[]|select(.Key==\"Name\")|.Value), VpcId, CidrBlock}"
},
'elb': {
'describe_load_balancers': ".LoadBalancerDescriptions[]? | {LoadBalancerName, DNSName}"
},
'rds': {
'describe_db_instances': ".DBInstances[]? | {DBName, AvailabilityZone, DBInstanceIdentifier, DBInstanceClass, Engine}"
},
'elasticache': {
'describe_cache_clusters': ".CacheClusters[]? | {CacheClusterId, CacheNodeType, Engine, PreferredAvailabilityZone}"
}
}
def set_session(self, profile_name):
"""Set Aws Session
Instantiates an Aws Session based on the `profile_name`. The `profile_name`
must exist within the `~/.aws/credentials` file.
Args:
profile_name (str): Aws profile name
Returns:
Object :: Boto3.session ( profile_name )
"""
self.session = boto3.session.Session(profile_name=profile_name)
if self.session:
return self.session
def set_service_client(self, service):
"""Instantiate a boto3.client based on the current session
Args:
service (str): Valid boto3.client() parameters.
eg: ['ec2', 'elb', 'rds']
Returns:
Object :: boto3.client object
"""
self.client = self.get_session().client(service)
return self.client
def get_session(self):
"""Get the current boto3 session
Attempts to return the current session, if it fails, it will set
a new session based on the current `profile`
Returns:
Object :: boto3.session object
"""
try:
return self.session
except AttributeError:
return self.set_session(self.profile)
def get_service_client(self):
"""Get the current Aws service client object
Attempts to return the current boto3 client. If it's not set, it will
create a new service client based on the `active_service`
Returns:
Object :: boto3.client object
"""
try:
return self.client
except AttributeError:
return self.set_service_client(self.active_service)
def get_service_mappings(self):
"""Get the aws service mappings
THIS IS NOT THE FINAL FORM
Need to think about this but it attempts to return the current service
mappings. Raises an error if invalid. The mapping works as follows:
boto3.client('ec2').describe_instances()
service_mappings = {
<service>: {
<service_attr>
}
}
boto3.client(<service>).<service_attr>()
Examples:
>>> from aws import AwsService
>>> aws = AwsServices(profile='test')
>>> aws.service_mappings = {
... 'ec2': {
... 'describe_instances': ".Reservations[].Instances[] | {Name: (.Tags[]|select(.Key==\"Name\")|.Value), InstanceId, InstanceType, Region: .Placement.AvailabilityZone, LaunchTime, PrivateDnsName, PrivateIpAddresses: .NetworkInterfaces[].PrivateIpAddresses[].PrivateIpAddress, PublicIpAddress}",
... 'describe_vpcs': ".Vpcs[] | {Name: (.Tags[]|select(.Key==\"Name\")|.Value), VpcId, CidrBlock}"
... }
... }
...
>>> aws.get_service_mappings()
"""
if self.service_mappings:
return self.service_mappings
raise InvalidServiceMappings
def scan_services(self):
"""Scan all aws services based on service_mappings
Returns a ordered dictionary of each mapped service
Returns:
Dict :: Dictionary of all mapped aws services
"""
service_response = dict()
for service in self.get_service_mappings():
self.set_service_client(service)
for service_attr in self.service_mappings[service]:
service_items = self.scan_service(service_attr)
if service_items:
try:
service_response[service].update(
{
service_attr: service_items
}
)
except KeyError:
service_response[service] = dict()
service_response[service].update(
{
service_attr: service_items
}
)
return service_response
def scan_service(self, service_attr, service=None):
"""Scan a specific aws service and service_attr
Scan service based on the service_attr. The service attr is the
service clients method. eg: boto3.client('ec2').describe_instances()
Args:
Required:
service_attr (str): String version of a boto3.client('ec2')
method
Optional:
service (str): default(None): If passed, will set a new
session client
"""
if service:
self.set_service_client(service)
try:
return json.loads(
json.dumps(
getattr(self.get_service_client(), service_attr)(),
default=json_serial
)
)
except AttributeError:
raise UnknownServiceCall
| StarcoderdataPython |
3238456 |
"""
Check url xpath
Check content xpath
parallel?
IP switch and headers
"""
import re
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor
from scrapy.selector import Selector
from newscrawler.items import NewsItem
class HurriyetSpider(CrawlSpider):
"""
Spider class to get the urls from search results
"""
name = "hurriyet"
allowed_domains = ["hurriyet.com.tr"]
start_urls = ["https://www.hurriyet.com.tr/arama/#/?page=%d&order=Yeniden Eskiye&where=/&how=Article,NewsVideo," \
"NewsPhotoGallery,Column,Recipe&startDate=01/01/2000&finishDate=11/02/2021&isDetail=true" % i for i
in range(1, 101)]
rules = (
Rule(LxmlLinkExtractor(allow=(), restrict_xpaths='/html/body/div[3]/div[1]/div[1]/div/div[1]/div/div['
'4]/div/div/div/div/div/div/div/div'),
callback="parse", follow=True),
)
def parse(self, response, **kwargs):
"""
:param response: crawler response of the article url
:return: parsed doc pushed to elastic
"""
hxs = Selector(response)
item = NewsItem()
item["link"] = response.request.url
item["lang"] = "tr"
item["source"] = "hurriyet"
date_time = hxs.xpath(
"/html/body/article/div[12]/div/section[1]/header/div[1]/div[2]/div[2]/span[2]/time").extract()
author = hxs.xpath(
"/html/body/article/div[12]/div/section[1]/header/section[1]/div[1]/div/div[2]/a[1]/h6").extract()
title = hxs.xpath("/html/body/article/div[12]/div/section[1]/header/div[2]/div/h1").extract()
intro = hxs.xpath("/html/body/article/div[12]/div/section[3]/div/h2").extract()
new_content = hxs.xpath("/html/body/article/div[12]/div/section[3]/div/div[4]").extract()
new_content = ' '.join(new_content)
#
# Processing outputs
item["intro"] = ' '.join(intro)
item["title"] = ' '.join(title)
item["content"] = re.sub(r'\s{2,}', ' ', new_content)
item["date_time"] = " ".join(date_time)
item["author"] = " ".join(author)
return item
| StarcoderdataPython |
1785833 | <reponame>drobilc/nadlogar
from .generator_nalog import GeneratorNalogSolskiSlovar
from lxml import etree
import random
class NalogaGlasVsiljivec(GeneratorNalogSolskiSlovar):
IME = 'Poišči vsiljivca – glas'
NAVODILA = 'Med navedenimi glasovi (samoglasniki in soglasniki) v vsaki vrstici obkroži tisti glas, ki je drugačne vrste.'
def privzeti_podatki(self):
return { 'beseda': 'glasovi' }
def generiraj_nalogo(self):
self.podatki['primeri'] = self.generiraj_primere(self.podatki['beseda'])
return self.podatki
def generiraj_primere(self, beseda):
return [self.generiraj_primer(glas) for glas in beseda]
def generiraj_primer(self, glas):
glas = glas.lower()
samoglasniki, soglasniki = 'aeiou', 'bcčdfghjklmnprsštvzž'
glasovi = [glas]
if glas in samoglasniki:
glasovi.extend(random.sample(soglasniki, k=4))
else:
glasovi.extend(random.sample(samoglasniki, k=4))
random.shuffle(glasovi)
return { 'glasovi': glasovi } | StarcoderdataPython |
27548 | <filename>backend/accounts/serializers.py
from rest_framework_gis.serializers import GeoFeatureModelSerializer, GeometrySerializerMethodField
from django.contrib.gis.geos import Point
from accounts.models import UserProfile
import datetime
from rest_framework_gis.pagination import GeoJsonPagination
from django.contrib.auth import get_user_model
from django.utils import timezone
from rest_framework import serializers
from rest_framework_simplejwt.settings import api_settings
from rest_framework.reverse import reverse as api_reverse
from rest_framework_gis import serializers as geo_serializers
User = get_user_model()
class UserPublicSerializer(serializers.ModelSerializer):
uri = serializers.SerializerMethodField(read_only=True)
class Meta:
model = User
fields = [
'id',
'username',
'uri'
]
def get_uri(self, obj):
request = self.context.get('request')
return api_reverse("api-user:detail", kwargs={"username": obj.username}, request=request)
class UserRegisterSerializer(serializers.ModelSerializer):
password2 = serializers.CharField(
style={'input_type': 'password'}, write_only=True)
token = serializers.SerializerMethodField(read_only=True)
expires = serializers.SerializerMethodField(read_only=True)
message = serializers.SerializerMethodField(read_only=True)
class Meta:
model = User
fields = [
'username',
'email',
'password',
'<PASSWORD>',
'token',
'expires',
'message',
]
extra_kwargs = {'password': {'<PASSWORD>': True}}
def get_message(self, obj):
return "Thank you for registering. Please verify your email before continuing."
def get_expires(self, obj):
return timezone.now() + expire_delta - datetime.timedelta(seconds=200)
def validate_email(self, value):
qs = User.objects.filter(email__iexact=value)
if qs.exists():
raise serializers.ValidationError(
"User with this email already exists")
return value
def validate_username(self, value):
qs = User.objects.filter(username__iexact=value)
if qs.exists():
raise serializers.ValidationError(
"User with this username already exists")
return value
def validate(self, data):
pw = data.get('password')
pw2 = data.pop('<PASSWORD>')
if pw != pw2:
raise serializers.ValidationError("Passwords must match")
return data
def create(self, validated_data):
#print(validated_data)
user_obj = User(
username=validated_data.get('username'),
email=validated_data.get('email'))
user_obj.set_password(validated_data.get('password'))
user_obj.is_active = False
user_obj.save()
return user_obj
| StarcoderdataPython |
1631868 | <filename>tests/easy/test_valid_parentheses_20.py
from src.easy import valid_parentheses_20
def test_valid_parentheses():
s = valid_parentheses_20.Solution()
assert s.valid_parentheses("()[]{}") is True
assert s.valid_parentheses("({[]})") is True
assert s.valid_parentheses("{[}]()") is False | StarcoderdataPython |
154069 | <reponame>htetmyet/keyword-extraction
__author__ = "<NAME>"
from refo import finditer, Predicate, Plus
from collections import Counter
from StringIO import StringIO
from get_summary import summary
##from nltk.corpus import stopwords
import math
import nltk
import re
import copy
import glob
import numpy as np
import time
start_time = time.time()
##cachedStopWords = stopwords.words("english")
def convert_to_string(text):
get_index = text.index(':')
get_length = len(text)
get_string = text[0:get_index]
return get_string
def remov_stopword(text):
stopwords = open ('nothesmartstoplist.txt', 'r').read().splitlines()
text = ' '.join([word for word in text.split() if word not in stopwords])
return text
def get_title(text):
pre_title = text.splitlines()[0]
return pre_title
def get_first_sen(text):
get_first = text.splitlines()[1]
return get_first
class Word(object):
def __init__(self, token, pos):
self.token = token
self.pos = pos
class W(Predicate):
def __init__(self, token = ".*", pos = ".*"):
self.token = re.compile(token + "$")
self.pos = re.compile(pos + "$")
super(W, self).__init__(self.match)
def match(self,word):
m1 = self.token.match(word.token)
m2 = self.pos.match(word.pos)
return m1 and m2
def term_frequency(w_tf, max_scr):
tf_score = 0.5 + (0.5*(w_tf/max_scr))
return tf_score
def count_total_corpus():
tot_corpus = len(glob.glob1("dataset","doc*.txt"))
return tot_corpus
def count_nterm_doc(word):
num_count = 0
get_total = count_total_corpus()
while (get_total>0):
n_files = str(get_total)
get_doc = open('dataset/doc'+n_files+'.txt', 'r')
raw_doc = get_doc.read()
if word in raw_doc:
num_count += 1
else:
num_count += 0
get_total -= 1
return num_count
def inverse_df(tot_doc, num_of_x_doc):
try:
idf_score = math.log10(1+(tot_doc/num_of_x_doc))
except ZeroDivisionError:
idf_score = 0
return idf_score
def chk_frs_sen(word, file_name):##1 or 0 (binary)
test_file = open(file_name, 'r')
rawtext = test_file.read()
first_sen = get_first_sen(rawtext)
result_this = 0
if word in first_sen:
result_this = 1
else:
result_this = 0
return result_this
def involve_in_title(word, get_title):
result_this = 0
if word in get_title:
result_this = 1
else:
result_this = 0
return result_this
def get_bi_tfidf(bi_dict, bigrams):
i=0
for bigrams[i] in bigrams:
for w in bigrams[i]:
if w in bi_dict:
print bi_dict[w]
def get_val_bipairs(bi_dict, bigrams):
val_pairs = [(bi_dict[x]+bi_dict[y]) for x,y in bigrams]
return val_pairs
def get_val_tripairs(tri_dict, trigrams):
val_pairs = [(tri_dict[x]+tri_dict[y]+tri_dict[z]) for x,y,z in trigrams]
return val_pairs
def get_val_fpairs(fgram_dict, fourgrams):
val_pairs = [(fgram_dict[a]+fgram_dict[b]+fgram_dict[c]+fgram_dict[d]) for a,b,c,d in fourgrams]
return val_pairs
def cal_matrix(ngrams, uni_avgs, name_tfidf, name_fs, name_tit):
file_tfidf = 'matrices/'+name_tfidf
file_fs = 'matrices/'+name_fs
file_tit = 'matrices/'+name_tit
total_vals = []
key_words = []
with open(file_tfidf, 'r') as f1:
data1 = f1.read()
get_tfidf = np.genfromtxt(StringIO(data1), delimiter=" ")
##print get_matx
with open(file_fs, 'r') as f2:
data2 = f2.read()
get_fs = np.genfromtxt(StringIO(data2), delimiter=" ")
with open(file_tit, 'r') as f3:
data3 = f3.read()
get_tit = np.genfromtxt(StringIO(data3), delimiter=" ")
for each in ngrams:
tfidf_val = str(each[1])
avg_val = str(uni_avgs)
ini_matx = np.matrix('"'+tfidf_val+' '+avg_val+'"')
compute_first = ini_matx * get_tfidf
compute_second = compute_first * get_fs
final_result = compute_second * get_tit
##Decide K or NK
get_fir_res = final_result.item((0, 0))
get_sec_res = final_result.item((0, 1))
if (float(get_fir_res) > float(get_sec_res)):
print each[0]+'[K]'
key_words.append(each[0])
else:
print each[0]+'[NK]'
pass
return key_words
def cal_tri_matrix(ngrams, uni_avgs, name_tfidf, name_fs, name_tit):
file_tfidf = 'matrices/'+name_tfidf
file_fs = 'matrices/'+name_fs
file_tit = 'matrices/'+name_tit
total_vals = []
key_words = []
get_max = max(ngrams, key=lambda x:x[1])
get_min = min(ngrams, key=lambda x:x[1])
max_val = float(get_max[1])
min_val = float(get_min[1])
all_value = []
norm_val = max_val - min_val
fir_norm = math.log10(uni_avgs)
with open(file_tfidf, 'r') as f1:
data1 = f1.read()
get_tfidf = np.genfromtxt(StringIO(data1), delimiter=" ")
with open(file_fs, 'r') as f2:
data2 = f2.read()
get_fs = np.genfromtxt(StringIO(data2), delimiter=" ")
with open(file_tit, 'r') as f3:
data3 = f3.read()
get_tit = np.genfromtxt(StringIO(data3), delimiter=" ")
for each in ngrams:
tfidf_val = str(each[1])
avg_val = str(uni_avgs)
ini_matx = np.matrix('"'+tfidf_val+' '+avg_val+'"')
compute_first = ini_matx * get_tfidf
compute_second = compute_first * get_fs
final_result = compute_second * get_tit
## GET ITEMS IN MATRIX
get_fir_res = final_result.item((0, 0))
get_sec_res = final_result.item((0, 1))
get_fin_fir = float(get_fir_res)
get_fin_sec = float((get_sec_res/10)/uni_avgs)
get_fir_val = float(get_fin_fir - get_fin_sec)
get_val = np.matrix('"'+str(get_fir_val)+' '+str(get_fin_sec)+'"')
##GET MAX & MIN
##Decide K or NK
get_fir_res = get_val.item((0, 0))
get_sec_res = get_val.item((0, 1))
if (float(get_fir_res) > float(get_sec_res)):
print each[0]+'[K]'
key_words.append(each[0])
else:
print each[0]+'[NK]'
pass
return key_words
def cal_four_matrix(ngrams, uni_avgs, name_tfidf, name_fs, name_tit):
file_tfidf = 'matrices/'+name_tfidf
file_fs = 'matrices/'+name_fs
file_tit = 'matrices/'+name_tit
total_vals = []
key_words = []
get_max = max(ngrams, key=lambda x:x[1])
get_min = min(ngrams, key=lambda x:x[1])
max_val = float(get_max[1])
min_val = float(get_min[1])
norm_val = max_val - min_val
fir_norm = math.log10(uni_avgs)
with open(file_tfidf, 'r') as f1:
data1 = f1.read()
get_tfidf = np.genfromtxt(StringIO(data1), delimiter=" ")
##print get_matx
with open(file_fs, 'r') as f2:
data2 = f2.read()
get_fs = np.genfromtxt(StringIO(data2), delimiter=" ")
with open(file_tit, 'r') as f3:
data3 = f3.read()
get_tit = np.genfromtxt(StringIO(data3), delimiter=" ")
for each in ngrams:
tfidf_val = str(each[1])
avg_val = str(uni_avgs)
ini_matx = np.matrix('"'+tfidf_val+' '+avg_val+'"')
compute_first = ini_matx * get_tfidf
compute_second = compute_first * get_fs
final_result = compute_second * get_tit
## GET ITEMS IN MATRIX
get_fir_res = final_result.item((0, 0))
get_sec_res = final_result.item((0, 1))
get_fin_fir = float(get_fir_res)
get_fin_sec = float((get_sec_res/10)/uni_avgs)
get_fir_val = float(get_fin_fir - get_fin_sec)
get_val = np.matrix('"'+str(get_fir_val)+' '+str(get_fin_sec)+'"')
##Decide K or NK
get_fir_res = get_val.item((0, 0))
get_sec_res = get_val.item((0, 1))
if (float(get_fir_res) > float(get_sec_res)):
print each[0]+'[K]'
key_words.append(each[0])
else:
##pass
print each[0]+'[NK]'
return key_words
def main():
##set_file_name = raw_input('Enter a file name: ')
file_name = raw_input('Enter a file name: ')
test_file = open(file_name, 'r')
rawtext = test_file.read()
##GET ALL KEYWORDS
get_all_keywords = []
#Extract title from text
title = get_title(rawtext)
first_sen = get_first_sen(rawtext)
#Get paragraph without title
para_list = rawtext.splitlines()[1:] #in list
para_string = ''.join(para_list) #convert to string
#Prettify paragraph
prettify_txt = re.sub(r'[^\w.]', ' ', para_string)
mod_txt = remov_stopword(prettify_txt)
#Tokenizing & POS Tagging
token_txt = nltk.sent_tokenize(mod_txt) #Line Segment
num_sent = len(token_txt) #Number of sentences
token_word = [nltk.word_tokenize(sent) for sent in token_txt]
pos_tag = [nltk.pos_tag(sent) for sent in token_word]
##print title
print "Sentence: ", num_sent
print '\n'
#Chunk and print NP
get_nouns = [[Word(*x) for x in sent] for sent in pos_tag]
#NNP Rules
rule_0 = W(pos = "NNS")| W(pos = "NN") | W(pos = "NNP")
rule_05 = W(pos = "NNP") + W(pos = "NNS")
rule_1 = W(pos = "WP$") + W(pos = "NNS")
rule_2 = W(pos = "CD") + W(pos = "NNS")
rule_3 = W(pos = "NN") + W(pos = "NN")
rule_4 = W(pos = "NN") + W(pos = "NNS")
rule_5 = W(pos = "NNP") + W(pos = "CD")
rule_6 = W(pos = "NNP") + W(pos = "NNP")
rule_7 = W(pos = "NNP") + W(pos = "NNPS")
rule_8 = W(pos = "NNP") + W(pos = "NN")
rule_9 = W(pos = "NNP") + W(pos = "VBZ")
rule_10 = W(pos = "DT") + W(pos = "NNS")
rule_11 = W(pos = "DT") + W(pos = "NN")
rule_12 = W(pos = "DT") + W(pos = "NNP")
rule_13 = W(pos = "JJ") + W(pos = "NN")
rule_14 = W(pos = "JJ") + W(pos = "NNS")
rule_15 = W(pos = "PRP$") + W(pos = "NNS")
rule_16 = W(pos = "PRP$") + W(pos = "NN")
rule_02 = W(pos = "NN") + W(pos = "NN") + W(pos = "NN")
rule_17 = W(pos = "NN") + W(pos = "NNS") + W(pos = "NN")
rule_18 = W(pos = "NNP") + W(pos = "NNP") + W(pos = "NNP")
rule_19 = W(pos = "JJ") + W(pos = "NN") + W(pos = "NNS")
rule_20 = W(pos = "PRP$") + W(pos = "NN") + W(pos = "NN")
rule_21 = W(pos = "DT") + W(pos = "JJ") + W(pos = "NN")
rule_22 = W(pos = "DT") + W(pos = "CD") + W(pos = "NNS")
rule_23 = W(pos = "DT") + W(pos = "VBG") + W(pos = "NN")
rule_24 = W(pos = "DT") + W(pos = "NN") + W(pos = "NN")
rule_25 = W(pos = "NNP") + W(pos = "NNP") + W(pos = "VBZ")
rule_26 = W(pos = "DT") + W(pos = "NNP") + W(pos = "NN")
rule_27 = W(pos = "DT") + W(pos = "NNP") + W(pos = "NNP")
rule_28 = W(pos = "DT") + W(pos = "JJ") + W(pos = "NN")
rule_29 = W(pos = "DT") + W(pos = "NNP") + W(pos = "NNP") + W(pos = "NNP")
rule_30 = W(pos = "DT") + W(pos = "NNP") + W(pos = "NN") + W(pos = "NN")
NP_bi_gram_set = (rule_05)|(rule_1)|(rule_2)|(rule_3)|(rule_4)|(rule_5)|(rule_6)|(rule_7)|(rule_8)|(rule_9)|(rule_10)|(rule_11)|(rule_12)|(rule_13)|(rule_14)|(rule_15)|(rule_16)
NP_tri_gram_set = (rule_02)|(rule_17)|(rule_18)|(rule_19)|(rule_20)|(rule_21)|(rule_22)|(rule_23)|(rule_24)|(rule_25)|(rule_26)|(rule_27)|(rule_28)
NP_quard_gram_set = (rule_29)|(rule_30)
#Rule set function
get_uni_gram = (rule_0)
get_bi_gram = NP_bi_gram_set
get_tri_gram = NP_tri_gram_set
get_quard_gram = NP_quard_gram_set
bag_of_NP = []
bag_of_biNP = []
bag_of_triNP = []
bag_of_fourNP = []
total__tfidf = 0
###################################GET UNIGRAMS###################################
##print "UNIGRAM -->"
for k, s in enumerate(get_nouns):
for match in finditer(get_uni_gram, s):
x, y = match.span() #the match spans x to y inside the sentence s
#print pos_tag[k][x:y]
bag_of_NP += pos_tag[k][x:y]
###############
#Term Frequency for unigrams
##print "\nUnigram Feature Matrices:"
total__tfidf = 0
uni_tfidf_values = ''
str_uni_grams = ''
total_docs = count_total_corpus()
fdist = nltk.FreqDist(bag_of_NP)
print fdist
##STORE UNIGRAMS
unzipped_uni = zip(*bag_of_NP)
str_unigrams = list(unzipped_uni[0])
get_unigrams = zip(str_unigrams,str_unigrams[1:])[::1]
###############
##UNI MAXIMUM TermScore##
scores = []
for word in fdist:
score = fdist[word]
scores.append(score)
max_uni = max(scores)
######################
for word in fdist:
fq_word = fdist[word]
##print '%s->%d' % (word, fq_word)
get_tf = term_frequency(fq_word, max_uni)
### FEATURES ###
##Tuple to String##
to_string = ':'.join(word)
get_this_string = convert_to_string(to_string)
##DF Score
num_of_doc_word = count_nterm_doc(get_this_string)
##
##TF.IDF Score
idf_score = inverse_df(total_docs, num_of_doc_word)
tf_idf_scr = get_tf * idf_score
total__tfidf += tf_idf_scr
##GET EACH UNIGRAMS TFIDF
uni_tfidf_scr = repr(tf_idf_scr)+' '
uni_tfidf_values += uni_tfidf_scr
str_uni_grams += get_this_string+','
##BUILD DICT FOR EACH TERMS
get_uni_float = [float(x) for x in uni_tfidf_values.split()]
get_uni_list = str_uni_grams.split(',')
unigram_dict = dict(zip(get_unigrams, get_uni_float))
###########################
##GET TFIDF FOR UNIGRAMS##
############
uni_avg_tfidf = (sum(map(float,get_uni_float)))/(len(get_uni_float))
###########################
get_zip_str = [''.join(item) for item in str_unigrams]
###Unigrams string with TFIDF###
unigrams_list = zip(get_zip_str, get_uni_float)
###########################
##print '===============***==============='
## print 'Total Unigrams: ', len(fdist)
## print 'Total tfidf', total__tfidf
##print 'Average TF.IDF: ', uni_avg_tfidf
##print '===============***==============='
###########################
##### TFIDF FEATURE MATRIX #####
uni_feat_tfidf = []
for x in unigrams_list:
if float(x[1]) > uni_avg_tfidf:
uni_feat_tfidf.append(1)
else:
uni_feat_tfidf.append(0)
zip_tfidf_feat = zip(get_zip_str, get_uni_float, uni_feat_tfidf)
##print zip_tfidf_feat
###############################
##### First Sentence Feat #####
uni_fir_sen = []
for x in unigrams_list:
get_res = chk_frs_sen(x[0], file_name)
if get_res == 1:
uni_fir_sen.append(1)
else:
uni_fir_sen.append(0)
zip_fir_sen_feat = zip(get_zip_str, get_uni_float, uni_feat_tfidf, uni_fir_sen)
############################
##### Involve in Title #####
uni_title_feat = []
for x in unigrams_list:
get_res = involve_in_title(x[0], title)
if get_res == 1:
uni_title_feat.append(1)
else:
uni_title_feat.append(0)
zip_uni_feats = zip(get_zip_str, get_uni_float, uni_feat_tfidf, uni_fir_sen, uni_title_feat)
################################
##print "\n\n"
###################################GET BIGRAMS###################################
##print "BIGRAM -->"
for k, s in enumerate(get_nouns):
for match in finditer(get_bi_gram, s):
x, y = match.span()
##print pos_tag[k][x:y]
bag_of_biNP += pos_tag[k][x:y]
##Term Frequency for bigrams##
total__tfidf = 0
bi_tfidf_values = ''
str_bi_grams = ''
###############
##STORE BIGRAMS
unzipped = zip(*bag_of_biNP)
str_bigrams = list(unzipped[0])
get_bigrams = zip(str_bigrams,str_bigrams[1:])[::2]
###############
##print "\nBigram Feature Matrices:"
bi_dist = nltk.FreqDist(bag_of_biNP)
##BI MAXIMUM TermScore##
bi_scores = []
for word in bi_dist:
score = bi_dist[word]
bi_scores.append(score)
max_bi = max(bi_scores)
######################
for word in bi_dist:
tq_word = bi_dist[word]
##print '%s-->%d' % (word, tq_word)
get_tf = term_frequency(tq_word, max_bi)
### FEATURES ###
##Tuple to String##
to_string = ':'.join(word)
get_this_string = convert_to_string(to_string)
##DF Score
num_of_doc_word = count_nterm_doc(get_this_string)
##TF.IDF Score
idf_score = inverse_df(total_docs, num_of_doc_word)
tf_idf_scr = get_tf*idf_score
total__tfidf += tf_idf_scr
##GET EACH BIGRAMS TFIDF
get_tfidf_scr = repr(tf_idf_scr)+' '
bi_tfidf_values += get_tfidf_scr
str_bi_grams += get_this_string+','
##BUILD DICT FOR EACH TERMS
get_float = [float(x) for x in bi_tfidf_values.split()]
get_bi_list = str_bi_grams.split(',')
bigram_dict = dict(zip(get_bi_list, get_float))
###########################
##GET TFIDF FOR BIGRAMS##
get_bi_floats = get_val_bipairs(bigram_dict, get_bigrams)
get_zip = dict(zip(get_bigrams, get_bi_floats))
############
real_avg_tfidf = (sum(map(float,get_bi_floats)))/(len(get_bi_floats))
###########################
get_zip_str = [' '.join(item) for item in get_bigrams]
###Bigrams string with TFIDF###
bigrams_list = zip(get_zip_str, get_bi_floats)
###########################
##print '===============***==============='
##print 'Total Bigrams: ', len(get_bi_floats)
##print 'total tfidf: ', sum(map(float,get_bi_floats))
##print 'Average TF.IDF: ', real_avg_tfidf
##print '===============***==============='
##print len(bi_str2_float(bi_tfidf_values))
##print type(bag_of_biNP)
##### TFIDF FEATURE MATRIX #####
feat_tfidf_matx = []
for x in bigrams_list:
if float(x[1]) > real_avg_tfidf:
feat_tfidf_matx.append(1)
else:
feat_tfidf_matx.append(0)
tfidf_feat = zip(get_zip_str, get_bi_floats, feat_tfidf_matx)
#################################
#### FIRST SENTENCE FEATURE ####
feat_fir_sen = []
for x in tfidf_feat:
get_res = chk_frs_sen(x[0], file_name)
if get_res == 1:
feat_fir_sen.append(1)
else:
feat_fir_sen.append(0)
fir_sen_feat = zip (get_zip_str, get_bi_floats, feat_tfidf_matx, feat_fir_sen)
##print fir_sen_feat
#################################
#### INVOLVE IN TITLE FEATURE ###
feat_invol_tit = []
for x in fir_sen_feat:
get_res = involve_in_title(x[0], title)
if get_res == 1:
feat_invol_tit.append(1)
else:
feat_invol_tit.append(0)
invol_tit_feat = zip (get_zip_str, get_bi_floats, feat_tfidf_matx, feat_fir_sen, feat_invol_tit)
invol_tit_feat
#################################
##print "\n\n"
###################################GET TRIGRAMS###################################
##print "TRIGRAM -->"
for k, s in enumerate(get_nouns):
for match in finditer(get_tri_gram, s):
x, y = match.span()
##print pos_tag[k][x:y]
bag_of_triNP += pos_tag[k][x:y]
#Term Frequency for trigrams
total__tfidf = 0
tri_tfidf_values = ''
str_tri_grams = ''
###############
##STORE TRIGRAMS
unzipped_tri = zip(*bag_of_triNP)
str_trigrams = list(unzipped_tri[0])
get_trigrams = zip(str_trigrams,str_trigrams[1:],str_trigrams[2:])[::3]
###############
##print "\nTrigram Feature Matrices:"
tri_dist = nltk.FreqDist(bag_of_triNP)
##TRI MAXIMUM TermScore##
tri_scores = []
for word in tri_dist:
score = tri_dist[word]
tri_scores.append(score)
max_tri = max(tri_scores)
######################
for word in tri_dist:
tr_fq = tri_dist[word]
##print '%s-->%d' % (word, tr_fq)
get_tf = term_frequency(tr_fq, max_tri)
### FEATURES ###
##Tuple to String##
to_string = ':'.join(word)
get_this_string = convert_to_string(to_string)
##DF Score
num_of_doc_word = count_nterm_doc(get_this_string)
##
##TF.IDF Score
idf_score = inverse_df(total_docs, num_of_doc_word)
tf_idf_scr = get_tf * idf_score
total__tfidf += tf_idf_scr
##GET EACH TRIGRAMS TFIDF
get_tfidf_scr = repr(tf_idf_scr)+' '
tri_tfidf_values += get_tfidf_scr
str_tri_grams += get_this_string+','
##BUILD DICT FOR EACH TERMS
get_tri_float = [float(x) for x in tri_tfidf_values.split()]
get_tri_list = str_tri_grams.split(',')
trigram_dict = dict(zip(get_tri_list, get_tri_float))
###########################
##GET TFIDF FOR TRIGRAMS##
get_tri_floats = get_val_tripairs(trigram_dict, get_trigrams)
get_tri_zip = dict(zip(get_trigrams, get_tri_floats))
############
tri_avg_tfidf = (sum(map(float,get_tri_floats)))/(len(get_tri_floats))
###########################
get_ziptri_str = [' '.join(item) for item in get_trigrams]
###Bigrams string with TFIDF###
trigrams_list = zip(get_ziptri_str, get_tri_floats)
###########################
##print '===============***==============='
##print 'Total Trigrams: ', len(get_tri_floats)
##print 'Total tfidf', sum(map(float,get_tri_floats))
##print 'Average TF.IDF: ', tri_avg_tfidf
##print '===============***==============='
##### TFIDF FEATURE MATRIX #####
tri_tfidf_matx = []
for x in trigrams_list:
if float(x[1]) > tri_avg_tfidf:
tri_tfidf_matx.append(1)
else:
tri_tfidf_matx.append(0)
tri_tfidf_feat = zip(get_ziptri_str, get_tri_floats, tri_tfidf_matx)
################################
#### FIRST SENTENCE FEATURE ####
tri_fir_sen = []
for x in tri_tfidf_feat:
get_res = chk_frs_sen(x[0], file_name)
if get_res == 1:
tri_fir_sen.append(1)
else:
tri_fir_sen.append(0)
tri_sen_feat = zip (get_ziptri_str, get_tri_floats, tri_tfidf_matx, tri_fir_sen)
#################################
#### INVOLVE IN TITLE FEATURE ###
tri_invol_tit = []
for x in tri_sen_feat:
get_res = involve_in_title(x[0], title)
if get_res == 1:
tri_invol_tit.append(1)
else:
tri_invol_tit.append(0)
tri_tit_feat = zip (get_ziptri_str, get_tri_floats, tri_tfidf_matx, tri_fir_sen, tri_invol_tit)
#################################
##print "\n\n"
###################################GET 4-GRAMS###################################
##print "4th GRAM -->"
for k, s in enumerate(get_nouns):
for match in finditer(get_quard_gram, s):
x,y = match.span()
##print pos_tag[k][x:y]
bag_of_fourNP += pos_tag[k][x:y]
#Term Frequency for 4-grams
total__tfidf = 0
four_tfidf_values = ''
str_four_grams = ''
###############
if (len(bag_of_fourNP)>0):
##STORE 4-GRAMS
unzipped_four = zip(*bag_of_fourNP)
str_fourgrams = list(unzipped_four[0])
get_fourgrams = zip(str_fourgrams,str_fourgrams[1:],str_fourgrams[2:],str_fourgrams[3:])[::4]
###############
#Term Frequency for 4-grams
total__tfidf = 0
##print "\n4-grams Feature Matrices:"
f_dist = nltk.FreqDist(bag_of_fourNP)
##4 MAXIMUM TermScore##
four_scores = []
for word in f_dist:
score = f_dist[word]
four_scores.append(score)
max_four = max(four_scores)
######################
for word in f_dist:
fr_fq = f_dist[word]
##print '%s-->%d' % (word, fr_fq)
get_tf = term_frequency(fr_fq, max_four)
### FEATURES ###
##Tuple to String##
to_string = ':'.join(word)
get_this_string = convert_to_string(to_string)
##DF Score
num_of_doc_word = count_nterm_doc(get_this_string)
##
##TF.IDF Score
idf_score = inverse_df(total_docs, num_of_doc_word)
tf_idf_scr = get_tf * idf_score
total__tfidf += tf_idf_scr
##GET EACH FOURGRAMS TFIDF
get_tfidf_scr = repr(tf_idf_scr)+' '
four_tfidf_values += get_tfidf_scr
str_four_grams += get_this_string+','
##BUILD DICT FOR EACH TERMS
get_four_float = [float(x) for x in four_tfidf_values.split()]
get_four_list = str_four_grams.split(',')
fourgram_dict = dict(zip(get_four_list, get_four_float))
###########################
##GET TFIDF FOR 4-GRAMS##
get_four_floats = get_val_fpairs(fourgram_dict, get_fourgrams)
get_four_zip = dict(zip(get_fourgrams, get_four_floats))
############
four_avg_tfidf = (sum(map(float,get_four_floats)))/(len(get_four_floats))
###########################
get_zipfour_str = [' '.join(item) for item in get_fourgrams]
###Bigrams string with TFIDF###
fourgrams_list = zip(get_zipfour_str, get_four_floats)
###########################
##print '===============***==============='
##print 'Total 4-grams: ', len(get_four_floats)
##print 'Total tfidf', sum(map(float,get_four_floats))
##print 'Average TF.IDF: ', four_avg_tfidf
##print '===============***==============='
##### TFIDF FEATURE MATRIX #####
four_tfidf_matx = []
for x in fourgrams_list:
if float(x[1]) > four_avg_tfidf:
four_tfidf_matx.append(1)
else:
four_tfidf_matx.append(0)
four_tfidf_feat = zip(get_zipfour_str, get_four_floats, four_tfidf_matx)
#################################
#### FIRST SENTENCE FEATURE ####
four_fir_sen = []
for x in four_tfidf_feat:
get_res = chk_frs_sen(x[0], file_name)
if get_res == 1:
four_fir_sen.append(1)
else:
four_fir_sen.append(0)
four_sen_feat = zip (get_zipfour_str, get_four_floats, four_tfidf_matx, four_fir_sen)
#################################
#### INVOLVE IN TITLE FEATURE ###
four_invol_tit = []
for x in tri_sen_feat:
get_res = involve_in_title(x[0], title)
if get_res == 1:
four_invol_tit.append(1)
else:
four_invol_tit.append(0)
four_tit_feat = zip (get_zipfour_str, get_four_floats,four_tfidf_matx, four_fir_sen, four_invol_tit)
#################################
else:
four_tit_feat = ''
print 'Zero Fourgram\n'
##print zip_uni_feats, invol_tit_feat, tri_tit_feat, four_tit_feat
##print uni_avg_tfidf,real_avg_tfidf, tri_avg_tfidf,four_avg_tfidf
key_unigram = cal_matrix(zip_uni_feats, uni_avg_tfidf,'uni_tf.txt','uni_fs.txt','uni_tit.txt')
print '\n'
key_bigram = cal_matrix(invol_tit_feat, real_avg_tfidf,'bi_tf.txt','bi_fs.txt','bi_tit.txt')
print '\n'
key_trigram = cal_tri_matrix(tri_tit_feat, tri_avg_tfidf,'tri_tf.txt','tri_fs.txt','tri_tit.txt')
print '\n'
if not four_tit_feat:
print 'No 4-grams in document.'
get_all_keywords = key_unigram + key_bigram + key_trigram
print len(get_all_keywords),' keywords for total n-grams.'
get_time = (time.time() - start_time)
get_milli = get_time*1000
print("--- %s seconds ---" % get_time)
else:
key_four = cal_four_matrix(four_tit_feat, four_avg_tfidf,'four_tf.txt','four_fs.txt','four_tit.txt')
##get_all_keywords = key_unigram + key_bigram + key_trigram + key_four
get_all_keywords = key_unigram + key_bigram + key_trigram + key_four
print len(get_all_keywords),' keywords for total n-grams.'
get_time = (time.time() - start_time)
get_milli = get_time*1000
print("--- %s seconds ---" % get_time)
##GET SUMMARY##
##summary(key_unigram, title, prettify_txt)
if __name__ == '__main__':
main()
get_time = (time.time() - start_time)
get_milli = get_time*1000
print("--- %s seconds ---" % get_time)
| StarcoderdataPython |
136741 | <gh_stars>1-10
# Users: Make a class called User. Create two attributes called first_name and last_name,
# and then create several other attributes that are typically stored in a user profile.
# Make a method called describe_user() that prints a summary of the user’s information.
# Make another method called greet_user() that prints a personalized greeting to the user.
# Create several instances representing different users, and call both methods for each user.
class User:
"""Creating a user"""
def __init__(self, first_name, last_name, username, email, location):
self.first_name = first_name
self.last_name = last_name
self.email = email
self.location = location
self.username = username
def describe_user(self):
"""Describing a user"""
print(f' First Name: {self.first_name.title()}')
print(f' Last Name: {self.last_name.title()}')
print(f" Username: {self.username}")
print(f" Email: {self.email}")
print(f" Location: {self.location}")
def greet_user(self):
print(f'\n Hello {self.username}!')
user1 = User('carolina', 'rolo', 'c_rolo', '<EMAIL>', 'floripa')
user1.describe_user()
user1.greet_user()
user2 = User('carla', 'rolo', 'carlinha', '<EMAIL>', 'sao paulo')
user2.describe_user()
user2.greet_user()
| StarcoderdataPython |
11706 | <filename>regtests/webclgl/call_external_method.py
"""external method"""
class myclass:
def __init__(self, i):
self.index = i
def get_index(self):
return self.index
def run(self, n):
self.intarray = new(Int16Array(n))
self.intarray[ self.index ] = 99
@returns( array=n )
@gpu.main
def gpufunc():
int* A = self.intarray
## GLSL compile error: `Index expression must be constant`
#int idx = self.get_index()
#return float( A[idx] )
return float( A[self.get_index()] )
return gpufunc()
def main():
m = myclass(10)
r = m.run(64)
print(r)
TestError( int(r[10])==99 ) | StarcoderdataPython |
3302056 | from api.cases.models import Advice
from api.cases.enums import AdviceLevel, AdviceType
from api.users.tests.factories import GovUserFactory
from test_helpers.clients import DataTestClient
from django.urls import reverse
class DeleteUserAdviceTests(DataTestClient):
def setUp(self):
super().setUp()
self.application = self.create_draft_standard_application(self.organisation)
self.case = self.submit_application(self.application)
self.gov_user_2 = GovUserFactory(baseuser_ptr__email="<EMAIL>", team=self.team)
self.standard_case_url = reverse("cases:user_advice", kwargs={"pk": self.case.id})
def test_delete_current_user_advice(self):
self.create_advice(self.gov_user, self.application, "end_user", AdviceType.APPROVE, AdviceLevel.USER)
self.create_advice(self.gov_user_2, self.application, "good", AdviceType.REFUSE, AdviceLevel.USER)
self.create_advice(self.gov_user, self.application, "good", AdviceType.PROVISO, AdviceLevel.USER)
resp = self.client.delete(self.standard_case_url, **self.gov_headers)
self.assertEqual(resp.status_code, 200)
remaining_records = Advice.objects.all()
self.assertEqual(remaining_records.count(), 1)
self.assertEqual(remaining_records[0].user, self.gov_user_2)
def test_creates_audit_trail(self):
self.create_advice(self.gov_user, self.application, "end_user", AdviceType.APPROVE, AdviceLevel.USER)
self.client.delete(self.standard_case_url, **self.gov_headers)
response = self.client.get(reverse("cases:activity", kwargs={"pk": self.application.id}), **self.gov_headers)
audit_entries = response.json()["activity"]
self.assertEqual(len(audit_entries), 2) # one entry for case creation, one entry for advice deletion
self.assertEqual(len([a for a in audit_entries if a["text"] == "cleared user advice."]), 1)
| StarcoderdataPython |
3357672 | <reponame>glwagner/doublyPeriodicModels
import time, sys
import numpy as np
import matplotlib.pyplot as plt
sys.path.append('../../')
from py2Periodic.physics import twoLayerQG
from numpy import pi
qgParams = {
'f0' : 1e-4,
'Lx' : 1e6,
'beta' : 1.5e-11,
'defRadius' : 1.5e4,
'H1' : 500.0,
'H2' : 2000.0,
'U1' : 5e-2,
'U2' : 1e-2,
'bottomDrag' : 1e-7,
'nx' : 128,
'dt' : 1e3,
'visc' : 1e9,
'viscOrder' : 4.0,
'timeStepper': 'AB3',
'nThreads' : 4,
'useFilter' : False,
}
# Create the two-layer model
qg = twoLayerQG.model(**qgParams)
qg.describe_model()
# Initial condition to seed baroclinic instability
Ro = 1.0e-3
f0 = 1.0e-4
q1 = Ro*f0*np.random.standard_normal(qg.physVarShape)
q2 = Ro*f0*np.random.standard_normal(qg.physVarShape)
qg.set_q1_and_q2(q1, q2)
# Gaussian hill topography
(x0, y0, r) = (qg.Lx/2.0, qg.Ly/2.0, qg.Lx/20.0)
h = 0.1*qg.H2*np.exp( -( (qg.x-x0)**2.0 + (qg.y-y0)**2.0 )/(2.0*r**2.0) )
qg.set_topography(h)
# Run a loop
nt = 1e4
for ii in np.arange(0, 1e1):
qg.step_nSteps(nSteps=nt, dnLog=nt)
qg.update_state_variables()
fig = plt.figure('Perturbation vorticity', figsize=(8, 6)); plt.clf()
plt.subplot(121); plt.imshow(qg.q1)
plt.subplot(122); plt.imshow(qg.q2)
plt.pause(0.01), plt.draw()
# Save the result in an npz archive
np.savez('sampleQGTurb.npz', q1=qg.q1, q2=qg.q2, qgParams=qgParams,
h=h)
| StarcoderdataPython |
4829458 | import datetime
import functools
import logging
import os
import sys
import click
import pkg_resources
from appdirs import AppDirs
from click._termui_impl import Editor
from .. import __version__
from ..aliases import aliases_database
from ..plugins import plugins_registry
from ..projects import ProjectsDb
from ..settings import Settings
from ..timesheet import TimesheetCollection, TimesheetParser
from ..ui.tty import TtyUi
from .types import Date, ExpandedPath, Hostname
logger = logging.getLogger(__name__)
xdg_dirs = AppDirs("taxi", "sephii")
# Disable click 5.0 unicode_literals warnings. See
# http://click.pocoo.org/5/python3/
click.disable_unicode_literals_warning = True
def get_timesheet_collection_for_context(ctx, entries_file=None):
"""
Return a :class:`~taxi.timesheet.TimesheetCollection` object with the current timesheet(s). Since this depends on
the settings (to get the entries files path, the number of previous files, etc) this uses the settings object from
the current command context. If `entries_file` is set, this forces the path of the file to be used.
"""
if not entries_file:
entries_file = ctx.obj['settings'].get_entries_file_path(False)
parser = TimesheetParser(
date_format=ctx.obj['settings']['date_format'],
add_date_to_bottom=ctx.obj['settings'].get_add_to_bottom(),
flags_repr=ctx.obj['settings'].get_flags(),
)
return TimesheetCollection.load(entries_file, ctx.obj['settings']['nb_previous_files'], parser)
def populate_aliases(aliases):
aliases_database.reset()
aliases_database.update(aliases)
def populate_backends(backends, context):
plugins_registry.populate_backends(dict(backends), context)
def create_config_file(filename):
"""
Create main configuration file if it doesn't exist.
"""
import textwrap
from urllib import parse
if not os.path.exists(filename):
old_default_config_file = os.path.join(os.path.dirname(filename),
'.tksrc')
if os.path.exists(old_default_config_file):
upgrade = click.confirm("\n".join(textwrap.wrap(
"It looks like you recently updated Taxi. Some "
"configuration changes are required. You can either let "
"me upgrade your configuration file or do it "
"manually.")) + "\n\nProceed with automatic configuration "
"file upgrade?", default=True
)
if upgrade:
settings = Settings(old_default_config_file)
settings.convert_to_4()
with open(filename, 'w') as config_file:
settings.config.write(config_file)
os.remove(old_default_config_file)
return
else:
print("Ok then.")
sys.exit(0)
welcome_msg = "Welcome to Taxi!"
click.secho(welcome_msg, fg='green', bold=True)
click.secho('=' * len(welcome_msg) + '\n', fg='green', bold=True)
click.echo(click.wrap_text(
"It looks like this is the first time you run Taxi. You will need "
"a configuration file ({}) in order to proceed. Please answer a "
"few questions to create your configuration file.".format(
filename
)
) + '\n')
config = pkg_resources.resource_string('taxi', 'etc/taxirc.sample').decode('utf-8')
context = {}
available_backends = plugins_registry.get_available_backends()
context['backend'] = click.prompt(
"Backend you want to use (choices are %s)" %
', '.join(available_backends),
type=click.Choice(available_backends)
)
context['username'] = click.prompt("Username or token")
context['password'] = parse.quote(
click.prompt("Password (leave empty if you're using"
" a token)", hide_input=True, default=''),
safe=''
)
# Password can be empty in case of token auth so the ':' separator
# is not included in the template config, so we add it if the user
# has set a password
if context['password']:
context['password'] = ':' + context['password']
context['hostname'] = click.prompt(
"Hostname of the backend (eg. timesheets.example.com)",
type=Hostname()
)
editor = Editor().get_editor()
context['editor'] = click.prompt(
"Editor command to edit your timesheets", default=editor
)
templated_config = config.format(**context)
directory = os.path.dirname(filename)
if not os.path.exists(directory):
os.makedirs(directory)
with open(filename, 'w') as f:
f.write(templated_config)
else:
settings = Settings(filename)
conversions = settings.needed_conversions
if conversions:
for conversion in conversions:
conversion()
settings.write_config()
class AliasedCommand(click.Command):
"""
Command that supports a kwarg ``aliases``.
"""
def __init__(self, *args, **kwargs):
self.aliases = set(kwargs.pop('aliases', []))
super(AliasedCommand, self).__init__(*args, **kwargs)
class AliasedGroup(click.Group):
"""
Command group that supports both custom aliases and prefix-matching. The
commands are checked in this order:
* Exact command name
* Command aliases
* Command prefix
"""
def get_command(self, ctx, cmd_name):
rv = super(AliasedGroup, self).get_command(ctx, cmd_name)
# Exact command exists, go with this
if rv is not None:
return rv
# Check in aliases
for name, command in self.commands.items():
if (isinstance(command, AliasedCommand)
and cmd_name in command.aliases):
return super(AliasedGroup, self).get_command(ctx, name)
# Check in prefixes
matches = [x for x in self.list_commands(ctx)
if x.startswith(cmd_name)]
if not matches:
return None
elif len(matches) == 1:
return click.Group.get_command(self, ctx, matches[0])
ctx.fail('Too many matches: %s' % ', '.join(sorted(matches)))
return None
def date_options(func):
"""
Decorator to add support for `--today/--not-today`, `--from` and `--to` options to the given command. The
calculated date is then passed as a parameter named `date`.
"""
@click.option(
'--until', type=Date(), help="Only show entries until the given date."
)
@click.option(
'--since', type=Date(), help="Only show entries starting at the given date.",
)
@click.option(
'--today/--not-today', default=None, help="Only include today's entries (same as --since=today --until=today)"
" or ignore today's entries (same as --until=yesterday)"
)
@functools.wraps(func)
def wrapper(*args, **kwargs):
since, until, today = kwargs.pop('since'), kwargs.pop('until'), kwargs.pop('today')
if today is not None:
if today:
date = datetime.date.today()
else:
date = (None, datetime.date.today() - datetime.timedelta(days=1))
elif since is not None or until is not None:
date = (since, until)
else:
date = None
kwargs['date'] = date
return func(*args, **kwargs)
return wrapper
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo('Taxi %s' % __version__)
ctx.exit()
def get_config_file():
config_file_name = 'taxirc'
config_file = os.path.join(os.path.expanduser('~'), '.' + config_file_name)
if os.path.isfile(config_file):
return config_file
return os.path.join(xdg_dirs.user_config_dir, config_file_name)
def get_data_dir():
data_dir = os.path.join(os.path.expanduser('~'), '.taxi')
if os.path.isdir(data_dir):
return data_dir
return xdg_dirs.user_data_dir
@click.group(cls=AliasedGroup)
@click.option('--config', '-c', default=get_config_file(),
type=ExpandedPath(dir_okay=False),
help="Path to the configuration file to use.")
@click.option('--taxi-dir', default=get_data_dir(),
type=ExpandedPath(file_okay=False), help="Path to the directory "
"that will be used for internal files.")
@click.option('--version', is_flag=True, callback=print_version,
expose_value=False, is_eager=True,
help="Print version number and exit.")
@click.option('--verbose', '-v', count=True,
help="Verbose mode, repeat to increase verbosity (-v = warning,"
" -vv = info, -vvv = debug).")
@click.pass_context
def cli(ctx, config, taxi_dir, verbose):
verbosity_mapping = {
1: logging.WARNING,
2: logging.INFO,
3: logging.DEBUG,
}
if verbose > 0:
try:
logging.basicConfig(level=verbosity_mapping[verbose])
except KeyError:
raise click.ClickException("Max verbosity is -vvv")
logging.debug("Using configuration file in %s", config)
logging.debug("Using data directory %s", taxi_dir)
create_config_file(config)
settings = Settings(config)
if not os.path.exists(taxi_dir):
os.makedirs(taxi_dir)
ctx.obj = {}
ctx.obj['settings'] = settings
ctx.obj['view'] = TtyUi()
ctx.obj['projects_db'] = ProjectsDb(os.path.expanduser(taxi_dir))
populate_aliases(settings.get_aliases())
populate_backends(settings.get_backends(), ctx.obj)
# This can't be called from inside a command because Click will already have built its commands list
plugins_registry.register_commands()
| StarcoderdataPython |
26404 | import numpy as np
import matplotlib.pyplot as plt
import cv2
class LaneIdentifier:
def __init__(self, smooth_factor, filter):
self.left_lane_inds = []
self.right_lane_inds = []
self.lane_gap = []
self.binary_warped = None
self.window_height = None
self.leftx_current = 0
self.rightx_current = 0
self.nonzeroy = None
self.nonzerox = None
self.left_fit = None
self.right_fit = None
self.margin = 100
self.nwindows = 9
self.minpix = 50
self.leftx = []
self.lefty = []
self.rightx = []
self.righty = []
self.smooth_factor = smooth_factor
self.filter = filter
return
def identify_lanes(self, binary):
self.binary_warped = binary
self.window_height = np.int(self.binary_warped.shape[0] // self.nwindows)
nonzero = binary.nonzero()
self.nonzeroy = np.array(nonzero[0])
self.nonzerox = np.array(nonzero[1])
if self.left_fit is None or self.right_fit is None:
self.blind_sliding_window_search()
else:
self.selective_window_search()
ret = self.extract_lane_lines()
if ret is False:
return False, None, None
return True, self.left_fit, self.right_fit
def blind_sliding_window_search(self):
histogram = np.sum(self.binary_warped[self.binary_warped.shape[0] // 2:, :], axis=0)
midpoint = np.int(histogram.shape[0] // 2)
leftx_current = np.argmax(histogram[:midpoint])
rightx_current = np.argmax(histogram[midpoint:]) + midpoint
l_lane_inds = []
r_lane_inds = []
for window in range(self.nwindows):
win_y_low = self.binary_warped.shape[0] - (window + 1) * self.window_height
win_y_high = self.binary_warped.shape[0] - window * self.window_height
win_xleft_low = leftx_current - self.margin
win_xleft_high = leftx_current + self.margin
win_xright_low = rightx_current - self.margin
win_xright_high = rightx_current + self.margin
good_left_inds = ((self.nonzeroy >= win_y_low) &
(self.nonzeroy < win_y_high) &
(self.nonzerox >= win_xleft_low) &
(self.nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((self.nonzeroy >= win_y_low) &
(self.nonzeroy < win_y_high) &
(self.nonzerox >= win_xright_low) &
(self.nonzerox < win_xright_high)).nonzero()[0]
l_lane_inds.append(good_left_inds)
r_lane_inds.append(good_right_inds)
if len(good_left_inds) > self.minpix:
leftx_current = np.int(np.mean(self.nonzerox[good_left_inds]))
if len(good_right_inds) > self.minpix:
rightx_current = np.int(np.mean(self.nonzerox[good_right_inds]))
self.left_lane_inds = np.concatenate(l_lane_inds)
self.right_lane_inds = np.concatenate(r_lane_inds)
return
def selective_window_search(self):
self.left_lane_inds = ((self.nonzerox >
(self.left_fit[0]*(self.nonzeroy**2) + self.left_fit[1]*self.nonzeroy +
self.left_fit[2] - self.margin)) &
(self.nonzerox <
(self.left_fit[0] * (self.nonzeroy ** 2) + self.left_fit[1]*self.nonzeroy +
self.left_fit[2] + self.margin)))
self.right_lane_inds = ((self.nonzerox >
(self.right_fit[0] * (self.nonzeroy ** 2) + self.right_fit[1] * self.nonzeroy +
self.right_fit[2] - self.margin)) &
(self.nonzerox <
(self.right_fit[0] * (self.nonzeroy ** 2) + self.right_fit[1] * self.nonzeroy +
self.right_fit[2] + self.margin)))
return
def extract_lane_lines(self):
# Extract left and right line pixel positions
leftx = self.nonzerox[self.left_lane_inds]
lefty = self.nonzeroy[self.left_lane_inds]
rightx = self.nonzerox[self.right_lane_inds]
righty = self.nonzeroy[self.right_lane_inds]
if leftx.size == 0 or rightx.size == 0:
if self.left_fit is None or self.right_fit is None:
return False
# Outliers filter, delete those that far away from previous
# recognized lane curve.
if self.left_fit is not None:
leftx_trend = self.left_fit[0]*lefty*lefty + self.left_fit[1]*lefty + self.left_fit[2]
range = abs(leftx - leftx_trend)
indices = (range > self.filter).nonzero()
leftx = np.delete(leftx, indices)
lefty = np.delete(lefty, indices)
if self.right_fit is not None:
rightx_trend = self.right_fit[0]*righty*righty + self.right_fit[1]*righty + self.right_fit[2]
range = abs(rightx - rightx_trend)
indices = (range > self.filter).nonzero()
rightx = np.delete(rightx, indices)
righty = np.delete(righty, indices)
# Take previous identified pixels into 2nd order polynomial
# calculation, in order to alleviate oscillation.
self.leftx = np.append(self.leftx, leftx)
self.lefty = np.append(self.lefty, lefty)
self.rightx = np.append(self.rightx, rightx)
self.righty = np.append(self.righty, righty)
self.leftx = self.leftx[-self.smooth_factor:]
self.lefty = self.lefty[-self.smooth_factor:]
self.rightx = self.rightx[-self.smooth_factor:]
self.righty = self.righty[-self.smooth_factor:]
# Fit a second order polynomial to each
self.left_fit = np.polyfit(self.lefty, self.leftx, 2)
self.right_fit = np.polyfit(self.righty, self.rightx, 2)
return True
def visualization(self):
# Generate x and y values for plotting
ploty = np.linspace(0, self.binary_warped.shape[0] - 1, self.binary_warped.shape[0])
left_fitx = self.left_fit[0] * ploty ** 2 + self.left_fit[1] * ploty + self.left_fit[2]
right_fitx = self.right_fit[0] * ploty ** 2 + self.right_fit[1] * ploty + self.right_fit[2]
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((self.binary_warped, self.binary_warped, self.binary_warped)) * 255
fit_img = np.zeros_like(out_img)
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[self.nonzeroy[self.left_lane_inds], self.nonzerox[self.left_lane_inds]] = [255, 0, 0]
out_img[self.nonzeroy[self.right_lane_inds], self.nonzerox[self.right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx - self.margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx + self.margin,
ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx - self.margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx + self.margin,
ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))
result = cv2.addWeighted(fit_img, 1, window_img, 0.3, 0)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
ax1.imshow(out_img)
ax1.set_title('Detected Lane Points', fontsize=30)
ax2.imshow(result)
ax2.set_title('Lane Lines', fontsize=30)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0) | StarcoderdataPython |
1763940 | <gh_stars>0
import numpy as np
import tensorflow as tf
# ------ Say hello
hello = tf.constant('Hello, TensorFlow!')
sess = tf.Session()
print(sess.run(hello))
# ------ Assign the constants
node1 = tf.constant(3.0, dtype=tf.float32)
node2 = tf.constant(4.0) # also tf.float32 implicitly
print(node1, node2)
sess = tf.Session()
print(sess.run([node1, node2]))
# ------ Test the additional function
node3 = tf.add(node1, node2)
print("node3: ", node3)
print("sess.run(node3): ", sess.run(node3))
# ------ Define the variable without constant
n1 = tf.placeholder(tf.float32)
n2 = tf.placeholder(tf.float32)
adder_node = n1 + n2
print("adder_node: ", adder_node)
print("add: ", sess.run(adder_node, {n1: 5, n2: 7}))
print("add array: ", sess.run(adder_node, {n1: [1, 2], n2: [3, 4]}))
# ------ Multiple to 3
add_and_triple = adder_node * 3.
print(sess.run(add_and_triple, {n1: 3, n2:4.5}))
# ------ Initial Values
W = tf.Variable([.3], dtype=tf.float32)
b = tf.Variable([-.3], dtype=tf.float32)
x = tf.placeholder(tf.float32)
linear_model = W * x + b
init = tf.global_variables_initializer()
sess.run(init)
print(sess.run(linear_model, {x:[1,2,3,4]}))
y = tf.placeholder(tf.float32)
squared_deltas = tf.square(linear_model - y)
loss = tf.reduce_sum(squared_deltas)
print(sess.run(loss, {x:[1,2,3,4], y:[0,-1,-2,-3]}))
file_writer = tf.summary.FileWriter('./logs/sample_adder_node', sess.graph)
fixW = tf.assign(W, [-1.])
fixb = tf.assign(b, [1.])
sess.run([fixW, fixb])
print(sess.run(loss, {x:[1,2,3,4], y:[0,-1,-2,-3]}))
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
sess.run(init) # reset values to incorrect defaults.
for i in range(1000):
sess.run(train, {x:[1,2,3,4], y:[0,-1,-2,-3]})
print(sess.run([W, b]))
| StarcoderdataPython |
1611654 | import os
import collections
import lamnfyc.context_managers
import lamnfyc.settings
import lamnfyc.packages.base
import subprocess
def three_two_installer(package, temp, env):
with lamnfyc.context_managers.chdir(os.path.join(temp, 'libffi-{}'.format(package.version))):
command = './configure --prefix={} --enable-shared && make && make instal'
subprocess.call(command.format(lamnfyc.settings.environment_path), shell=True)
VERSIONS = collections.OrderedDict()
VERSIONS['3.2.1'] = lamnfyc.packages.base.TarPacket('ftp://sourceware.org/pub/libffi/libffi-3.2.1.tar.gz',
installer=three_two_installer,
md5_signature='83b89587607e3eb65c70d361f13bab43')
for version, item in VERSIONS.iteritems():
item.name = 'libffi'
item.version = version
| StarcoderdataPython |
3306546 | <reponame>fvaleye/metadata-guardian
from metadata_guardian.source import ColumnMetadata
def test_column_metadata():
column_name = "column_name"
column_comment = "column_comment"
expected = [column_name, column_comment]
column_metadata = ColumnMetadata(
column_name=column_name, column_comment=column_comment
)
assert list(column_metadata.as_list()) == expected
| StarcoderdataPython |
1714878 | # -*- coding: utf-8 -*-
from xml.etree import ElementTree
import sys
import csv
import subprocess
import os
def name_conversion(target):
if target == None:
return 'no_name'
else:
try:
target_name = target[:target.index("(")] # hoge(foo.var.fuga, piyo) -> hoge
target_parameter = target[target.index("(")+1:-1] # hoge(foo.var.fuga, piyo) -> foo.var.fuga, piyo
if target_parameter != '()':
target_parameter_list = []
for param in target_parameter.split(','): # foo.var.fuga, piyo -> [fuga, piyo]
param = param.replace(' ', '')
param = param.split('.')[-1]
param = param.split('$')[-1]
target_parameter_list.append(param)
target_name = target_name + '(' + ','.join(target_parameter_list) + ')'
return target_name
else:
return target_name + '()'
except ValueError:
return target
argvs = sys.argv
xml_file = argvs[1]
output_file = argvs[2]
f = open(output_file, 'w')
csvWriter = csv.writer(f, lineterminator='\n', delimiter='|')
tree = ElementTree.parse(xml_file)
root = tree.getroot()
for e_package in root.findall('package'):
e_package_name = e_package.find('name').text
if e_package_name == None:
e_package_name = 'no_name_package'
#print e_package_name
for e_class in e_package.findall('class'):
e_class_name = e_class.find('name').text
if e_class_name == None:
e_class_name = 'no_class_name'
#print e_package_name + ',' + e_class_name
for e_feature in e_class.findall('feature'):
#tmp = e_feature.find('name').text
e_feature_name = name_conversion(e_feature.find('name').text)
"""
if tmp == None:
e_feature_name = 'no_feature_name'
else:
try:
e_feature_name = tmp[:tmp.index("(")] # hoge(foo.var.fuga, piyo) -> hoge
e_feature_parameter = tmp[tmp.index("(")+1:-1] # hoge(foo.var.fuga, piyo) -> foo.var.fuga, piyo
if e_feature_parameter != '()':
e_feature_parameter_list = []
for param in e_feature_parameter.split(','): # foo.var.fuga, piyo -> [fuga, piyo]
param = param.replace(' ', '')
param = param.split('.')[-1]
param = param.split('$')[-1]
e_feature_parameter_list.append(param)
e_feature_name = e_feature_name + '(' + ','.join(e_feature_parameter_list) + ')'
else:
e_feature_name = e_feature_name + '()'
except ValueError:
e_feature_name = tmp
"""
outbound_list = []
for outbound in e_feature.findall('outbound'):
outbound_list.append(name_conversion(outbound.text))
#os.system('echo "' + str(len(outbound_list)) + '">>hogehoge.txt')
if len(outbound_list) == 0:
info = [e_package_name, e_class_name, e_feature_name, ""]
csvWriter.writerow(info)
else:
for outbound in outbound_list:
info = [e_package_name, e_class_name, e_feature_name, outbound]
csvWriter.writerow(info)
f.close()
| StarcoderdataPython |
67406 | #!/usr/bin/env python
# encoding: utf-8
class Solution:
def singleNumber(self, nums: List[int]) -> int:
# 0001 XOR 0000 = 0001
# a XOR 0 = a
# a XOR a = 0
# a XOR b XOR a = a XOR a XOR b = b
a = 0
for num in nums:
a ^= num
return a
| StarcoderdataPython |
1726720 | # -*- coding: utf-8 -*-
import scrapy
from scrapy_demo.items import AosunItem
class AosunSpider(scrapy.Spider):
name = 'aosun_spider' # 定义爬虫的名称,用于区别spider,该名称必须是唯一的,不可为不同的spider设置相同的名字
allowed_domains = ['aosun.cloud'] # 定义允许爬取的域,若不是该列表内的域名则放弃抓取
custom_settings = {
'ITEM_PIPELINES': {'scrapy_demo.pipelines.AosunPipeline': 303},
}
form_data = {
'page': '1',
'rows': '4',
'isPrivate': 'false'
}
total = int(form_data['page']) * int(form_data['rows'])
base_url = 'http://aosun.cloud/api/article/getArticleList'
start_urls = [base_url] # spider在启动时爬取的入口url列表,后续的url从初始的url抓取到的数据中提取
# headers = {
# 'Host': 'aosun.cloud',
# 'Origin': 'http://aosun.cloud',
# 'Referer': 'http://aosun.cloud/',
# 'User-Agent:': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36',
# 'Content-Type': 'application/x-www-form-urlencoded',
# 'Content-Length': '1149',
# 'Connection': 'keep-alive',
# 'Accept': 'application/json, text/plain, */*',
# 'Accept-Encoding': 'gzip, deflate',
# 'Accept-Language': 'zh-CN,zh;q=0.9'
# }
def start_requests(self):
# 发送post请求
for url in self.start_urls:
yield scrapy.FormRequest(url=url, method='POST', formdata=self.form_data, callback=self.parse)
def parse(self, response): # 定义回调函数,每个初始url完成下载后生成的response对象会作为唯一参数传递给parse()函数。负责解析数据、提取数据(生成Item)、以及生成需要进一步处理的url
total = response.json()['total']
articles = response.json()['info']
for article in articles:
item = AosunItem()
item['id'] = article['id']
item['title'] = article['title']
item['modifyrq'] = article['modifyrq']
item['publish_time'] = article['publishTime']
item['info'] = article['info']
item['views'] = article['views']
item['type_id'] = article['typeId']
item['is_private'] = article['isPrivate']
item['state'] = article['state']
item['info_text'] = article['infoText']
item['menu_info'] = article['menuInfo']
item['type'] = article['type']
yield item
if int(self.form_data['page']) * int(self.form_data['rows']) < total:
self.form_data['page'] = str(int(self.form_data['page']) + 1)
yield scrapy.FormRequest(url=self.start_urls[0], method='POST', formdata=self.form_data, callback=self.parse) | StarcoderdataPython |
143870 | """
Load volumes into vpv from a toml config file. Just load volumes and no overlays
Examples
--------
Example toml file
orientation = 'sagittal'
[top]
specimens = [
'path1.nrrd',
'path2.nrrd',
'path3.nrrd']
[bottom]
specimens = [
'path1.nrrd',
'path2.nrrd',
'path3.nrrd']
"""
import sys
from pathlib import Path
from itertools import chain
import toml
from PyQt5 import QtGui
from vpv.vpv import Vpv
from vpv.common import Layers
from typing import Dict
def load(config: Dict):
top_vols = config['top']['specimens']
bottom = config['bottom']['specimens']
if bottom:
bottom_vols = config['bottom']['specimens']
else: # We allow only top vier visible
bottom_specs = []
bottom_vols = []
bottom_labels = []
app = QtGui.QApplication([])
ex = Vpv()
p2s = lambda x: [str(z) for z in x]
all_vols = top_vols + bottom_vols
ex.load_volumes(chain(p2s(top_vols), p2s(bottom_vols)), 'vol')
# Set the top row of views
for i in range(3):
try:
vol_id = Path(top_vols[i]).stem
ex.views[i].layers[Layers.vol1].set_volume(vol_id)
except IndexError:
continue
if bottom:
# Set the top row of views
for i in range(3):
try:
vol_id = Path(bottom_vols[i]).stem
ex.views[i + 3].layers[Layers.vol1].set_volume(vol_id)
except IndexError:
continue
print('Finished loading')
# Show two rows
ex.data_manager.show2Rows(True if bottom else False)
# Set orientation
ex.data_manager.on_orientation(config['orientation'])
sys.exit(app.exec_())
if __name__ == '__main__':
file_ = sys.argv[1]
config = toml.load(file_)
load(config) | StarcoderdataPython |
3393506 | <filename>Assignment_Ch03-3.py<gh_stars>1-10
print("Enter [R]ock, [P]aper, or [S]cissor")
player1 = input("Player 1: ")
print("Enter [R]ock, [P]aper, or [S]cissor")
player2 = input("Player 2: ")
try:
validInput = ["r", "R", "p", "P", "s", "S"]
if player1 not in validInput:
raise
elif player2 not in validInput:
raise
except Exception:
print("Invalid Input")
quit()
if player1 == "r" or player1 == "R":
if player2 == "r" or player2 == "R":
print("Nobody WINS!")
elif player2 == "p" or player2 == "P":
print("Paper covers rock.\nPlayer 2 WINS!")
elif player2 == "s" or player2 == "S":
print("Rock smashes scissor.\nPlayer 1 WINS")
elif player1 == "p" or player1 == "P":
if player2 == "p" or player2 == "P":
print("Nobody WINS!")
elif player2 == "r" or player2 == "R":
print("Paper covers rock.\nPlayer 1 WINS!")
elif player2 == "s" or player2 == "S":
print("Scissors cut paper.\nPlayer 2 WINS")
elif player1 == "s" or player1 == "S":
if player2 == "s" or player2 == "S":
print("Nobody WINS!")
elif player2 == "r" or player2 == "R":
print("Rock smashes scissor.\nPlayer 1 WINS!")
elif player2 == "p" or player2 == "P":
print("Paper covers rock.\nPlayer 2 WINS")
| StarcoderdataPython |
4842297 | from pathlib import Path
from typing import List
from qaz.managers import git, shell
from qaz.modules.base import Module
class Vim(Module):
name = "vim"
# Configuration files
zshrc_file = None
symlinks = {".vimrc": "~"}
# Other
vscode_extensions: List[str] = []
@classmethod
def install_action(cls):
repo_path = Path.home() / ".vim/bundle/Vundle.vim"
git.clone(
repo_url="https://github.com/VundleVim/Vundle.vim.git", repo_path=repo_path
)
shell.run("vim +PluginInstall +qall")
@classmethod
def upgrade_action(cls):
repo_path = Path.home() / ".vim/bundle/Vundle.vim"
git.pull(repo_path)
shell.run("vim +PluginInstall +qall")
| StarcoderdataPython |
3321222 | """
Unit tests for `apetest.robots`.
"""
from logging import ERROR, INFO, WARNING, getLogger
from typing import Iterable, Mapping, Sequence, Tuple
from pytest import LogCaptureFixture, mark
from apetest.robots import (
lookup_robots_rules,
parse_robots_txt,
path_allowed,
scan_robots_txt,
)
logger = getLogger(__name__)
# Modified example from the Internet-Draft.
EXAMPLE_LINES = """
User-agent: unhipbot
Disallow: /
User-agent: webcrawler
User-agent: excite # comment
Disallow:
User-agent: *
Disallow: /org/plans.html
Allow: /org/
Allow: /serv
# Comment-only lines do not end record.
Allow: /~mak
Disallow: /
""".split(
"\n"
)
EXAMPLE_RECORDS = [
[(2, "user-agent", "unhipbot"), (3, "disallow", "/")],
[(5, "user-agent", "webcrawler"), (6, "user-agent", "excite"), (7, "disallow", "")],
[
(9, "user-agent", "*"),
(10, "disallow", "/org/plans.html"),
(11, "allow", "/org/"),
(12, "allow", "/serv"),
(14, "allow", "/~mak"),
(15, "disallow", "/"),
],
]
EXAMPLE_MAP: Mapping[str, Iterable[Tuple[bool, str]]] = {
"*": [
(False, "/org/plans.html"),
(True, "/org/"),
(True, "/serv"),
(True, "/~mak"),
(False, "/"),
],
"unhipbot": [(False, "/")],
"webcrawler": [],
"excite": [],
}
@mark.parametrize(
"lines",
(
[],
[""],
["", ""],
[" ", "\t"],
["#comment"],
),
)
def test_scan_robots_empty(lines: Sequence[str], caplog: LogCaptureFixture) -> None:
"""Test scanning of files that contain no records."""
with caplog.at_level(INFO, logger=__name__):
assert list(scan_robots_txt(lines, logger)) == []
assert not caplog.records
def test_scan_robots_example(caplog: LogCaptureFixture) -> None:
"""Test scanning of example file."""
with caplog.at_level(INFO, logger=__name__):
assert list(scan_robots_txt(EXAMPLE_LINES, logger)) == EXAMPLE_RECORDS
assert not caplog.records
def test_scan_robots_warn_leading_whitespace(caplog: LogCaptureFixture) -> None:
"""Test scanning of files with leading whitespace."""
with caplog.at_level(INFO, logger=__name__):
assert list(
scan_robots_txt(
[
# Whitespace before field
" User-agent: *",
"Disallow: /",
],
logger,
)
) == [
[(1, "user-agent", "*"), (2, "disallow", "/")],
]
assert caplog.record_tuples == [
("test_robots", WARNING, "Line 1 has whitespace before field")
]
def test_scan_robots_error_missing_colon(caplog: LogCaptureFixture) -> None:
"""Test scanning of files with missing colon."""
with caplog.at_level(INFO, logger=__name__):
assert list(
scan_robots_txt(
[
# Non-empty line without ":"
"User-agent: *",
"Foo",
"Disallow: /",
],
logger,
)
) == [
[(1, "user-agent", "*"), (3, "disallow", "/")],
]
assert caplog.record_tuples == [
("test_robots", ERROR, 'Line 2 contains no ":"; ignoring line')
]
def test_parse_robots_empty(caplog: LogCaptureFixture) -> None:
"""Test parsing of empty record set."""
with caplog.at_level(INFO, logger=__name__):
assert parse_robots_txt((), logger) == {}
assert not caplog.records
def test_parse_robots_example(caplog: LogCaptureFixture) -> None:
"""Test parsing of example records."""
with caplog.at_level(INFO, logger=__name__):
assert parse_robots_txt(EXAMPLE_RECORDS, logger) == EXAMPLE_MAP
assert not caplog.records
def test_parse_robots_unknown(caplog: LogCaptureFixture) -> None:
"""Test handling of unknown fields."""
records = [[(1, "user-agent", "*"), (2, "foo", "bar"), (3, "disallow", "/")]]
with caplog.at_level(INFO, logger=__name__):
assert parse_robots_txt(records, logger) == {"*": [(False, "/")]}
assert caplog.record_tuples == [
("test_robots", INFO, 'Unknown field "foo" (line 2)')
]
def test_parse_robots_user_argent_after_rules(caplog: LogCaptureFixture) -> None:
"""Test handling of user agents specified after rules."""
records = [
[
(1, "user-agent", "smith"),
(2, "disallow", "/m"),
(3, "user-agent", "bender"),
(4, "disallow", "/casino"),
]
]
with caplog.at_level(INFO, logger=__name__):
assert parse_robots_txt(records, logger) == {
"smith": [(False, "/m")],
"bender": [(False, "/casino")],
}
assert caplog.record_tuples == [
(
"test_robots",
ERROR,
"Line 3 specifies user agent after rules; assuming new record",
)
]
def test_parse_robots_rules_before_user_agent(caplog: LogCaptureFixture) -> None:
"""Test handling of rules specified before user agent."""
records = [
[
(1, "disallow", "/m"),
(2, "user-agent", "smith"),
(3, "user-agent", "bender"),
(4, "disallow", "/casino"),
]
]
with caplog.at_level(INFO, logger=__name__):
assert parse_robots_txt(records, logger) == {
"smith": [(False, "/casino")],
"bender": [(False, "/casino")],
}
assert caplog.record_tuples == [
(
"test_robots",
ERROR,
"Line 1 specifies disallow rule without a preceding user agent line; "
"ignoring line",
)
]
def test_parse_robots_duplicate_user_agent(caplog: LogCaptureFixture) -> None:
"""Test handling of multiple rules for the same user agent."""
records = [
[(1, "user-agent", "smith"), (2, "disallow", "/m2")],
[
(3, "user-agent", "smith"),
(4, "disallow", "/m3"),
],
]
with caplog.at_level(INFO, logger=__name__):
assert parse_robots_txt(records, logger) == {"smith": [(False, "/m2")]}
assert caplog.record_tuples == [
(
"test_robots",
ERROR,
'Line 3 specifies user agent "smith", which was already addressed '
"in an earlier record; ignoring new record",
),
]
def test_parse_robots_unescape_valid(caplog: LogCaptureFixture) -> None:
"""Test unescaping of correctly escaped paths."""
records = [
[
(1, "user-agent", "*"),
(2, "disallow", "/a%3cd.html"),
(3, "disallow", "/%7Ejoe/"),
(4, "disallow", "/a%2fb.html"),
(5, "disallow", "/%C2%A2"),
(6, "disallow", "/%e2%82%ac"),
(7, "disallow", "/%F0%90%8d%88"),
]
]
with caplog.at_level(INFO, logger=__name__):
assert parse_robots_txt(records, logger) == {
"*": [
(False, "/a<d.html"),
(False, "/~joe/"),
(False, "/a%2fb.html"),
(False, "/\u00A2"),
(False, "/\u20AC"),
(False, "/\U00010348"),
]
}
assert not caplog.records
@mark.parametrize(
"bad_path, reason",
(
("/%", 'incomplete escape, expected 2 characters after "%"'),
("/%1", 'incomplete escape, expected 2 characters after "%"'),
("/%1x", 'incorrect escape: expected 2 hex digits after "%", got "1x"'),
("/%-3", 'incorrect escape: expected 2 hex digits after "%", got "-3"'),
(
"/%80",
"invalid percent-encoded UTF8: expected 0xC0..0xF7 for first byte, "
"got 0x80",
),
(
"/%e2%e3",
"invalid percent-encoded UTF8: expected 0x80..0xBF for non-first byte, "
"got 0xE3",
),
("/%e2%82", "incomplete escaped UTF8 character, expected 1 more escaped bytes"),
("/%e2%82%a", 'incomplete escape, expected 2 characters after "%"'),
(
"/%e2%82ac",
"incomplete escaped UTF8 character, expected 1 more escaped bytes",
),
),
)
def test_parse_robots_unescape_invalid(
bad_path: str, reason: str, caplog: LogCaptureFixture
) -> None:
"""Test handling of incorrect escaped paths."""
with caplog.at_level(INFO, logger=__name__):
assert (
parse_robots_txt(
[
[
(1, "user-agent", "*"),
(2, "disallow", bad_path),
(3, "allow", "/good"),
]
],
logger,
)
== {"*": [(True, "/good")]}
)
assert caplog.record_tuples == [
("test_robots", ERROR, f"Bad escape in disallow URL on line 2: {reason}")
]
@mark.parametrize(
"name, entry",
(
# Exact match.
("excite", "excite"),
# Prefix match.
("web", "webcrawler"),
# Case-insensitive match.
("UnHipBot", "unhipbot"),
# Default.
("unknown-bot", "*"),
),
)
def test_parse_robots_lookup(name: str, entry: str) -> None:
"""Test lookup of rules for a specific user agent."""
assert lookup_robots_rules(EXAMPLE_MAP, name) == EXAMPLE_MAP[entry]
@mark.parametrize(
"path, expected",
(
("/", False),
("/index.html", False),
("/server.html", True),
("/services/fast.html", True),
("/orgo.gif", False),
("/org/about.html", True),
("/org/plans.html", False),
("/~jim/jim.html", False),
("/~mak/mak.html", True),
),
)
def test_parse_robots_match_path(path: str, expected: bool) -> None:
"""Test the `path_allowed` function."""
assert path_allowed(path, EXAMPLE_MAP["excite"])
assert not path_allowed(path, EXAMPLE_MAP["unhipbot"])
assert path_allowed(path, EXAMPLE_MAP["*"]) == expected
| StarcoderdataPython |
1732136 |
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel
class MyMainWindow(QMainWindow):
def __init__(self):
super(QMainWindow, self).__init__()
self.title = 'PyQt5 Absolute positioning example'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.init_ui()
def init_ui(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
label1 = QLabel('python', self)
label1.move(50, 50)
label2 = QLabel('PyQt5', self)
label2.move(100, 100)
label3 = QLabel('Example', self)
label3.move(150, 150)
label4 = QLabel('pythonspot.com', self)
label4.move(200, 200)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
w = MyMainWindow()
w.show()
sys.exit(app.exec_())
| StarcoderdataPython |
1635528 | <reponame>coocos/advent-of-code-2018<filename>day9.py
from collections import defaultdict
from typing import DefaultDict
def play(player_count: int, last_marble: int) -> int:
"""
An awfully inefficient solution to the marble problem. What
makes this solution bad is the fact that Python lists do not
provide O(1) insertions or removals so this solution should
really be rewritten using a data structure which does, e.g
a custom linked list or perhaps collections.deque.
"""
position = 0
scores: DefaultDict[int, int] = defaultdict(int)
board = [0]
for marble in range(1, last_marble + 1):
if marble % 23 != 0:
if len(board) > 1:
position = (position + 2)
if position > len(board):
position = position % len(board)
else:
position = 1
board.insert(position, marble)
else:
scores[marble % player_count] += marble
position = (position - 7) % len(board)
scores[marble % player_count] += board[position]
del board[position]
return max(scores.values())
if __name__ == '__main__':
assert play(9, 25) == 32
assert play(10, 1618) == 8317
assert play(13, 7999) == 146373
assert play(17, 1104) == 2764
assert play(21, 6111) == 54718
assert play(30, 5807) == 37305
assert play(427, 70723) == 399745
assert play(427, 70723 * 100) == 3349098263
| StarcoderdataPython |
185354 | from rstem.mcpi import minecraft, control, block
import time
from random import randint
control.show(hide_at_exit=True)
mc = minecraft.Minecraft.create()
ARENA_WIDTH = 3
GOLD_DEPTH = 0
gold_pos = mc.player.getTilePos()
gold_pos.x += randint(-ARENA_WIDTH, ARENA_WIDTH)
gold_pos.z += randint(-ARENA_WIDTH, ARENA_WIDTH)
gold_pos.y = mc.getHeight(gold_pos.x, gold_pos.z) - GOLD_DEPTH
mc.setBlock(gold_pos, block.GOLD_BLOCK)
gold_exists = True
while gold_exists:
player_pos = mc.player.getTilePos()
gold_exists = block.Block(mc.getBlock(gold_pos)) == block.GOLD_BLOCK
time.sleep(0.01)
mc.postToChat("You found the gold!")
time.sleep(3)
| StarcoderdataPython |
48539 | from corehq.apps.commtrack.helpers import make_supply_point
from corehq.apps.commtrack.tests.util import CommTrackTest, make_loc
from corehq.apps.commtrack.const import DAYS_IN_MONTH
from corehq.apps.locations.models import Location
from corehq.apps.locations.bulk import import_location
from mock import patch
from corehq.apps.consumption.shortcuts import get_default_consumption
from corehq.apps.commtrack.models import Product
class LocationImportTest(CommTrackTest):
def setUp(self):
# set up a couple locations that make tests a little more DRY
self.test_state = make_loc('sillyparentstate', type='state')
self.test_village = make_loc('sillyparentvillage', type='village')
return super(LocationImportTest, self).setUp()
def names_of_locs(self):
return [loc.name for loc in Location.by_domain(self.domain.name)]
def test_import_new_top_level_location(self):
data = {
'name': 'importedloc'
}
import_location(self.domain.name, 'state', data)
self.assertTrue(data['name'] in self.names_of_locs())
def test_import_with_existing_parent_by_site_code(self):
data = {
'name': 'importedloc',
'parent_site_code': self.test_state.site_code
}
result = import_location(self.domain.name, 'district', data)
if result['id'] is None:
self.fail('import failed with error: %s' % result['message'])
self.assertTrue(data['name'] in self.names_of_locs())
new_loc = Location.get(result['id'])
self.assertEqual(new_loc.parent_id, self.test_state._id)
def test_id_of_invalid_parent_type(self):
# state can't have outlet as child
data = {
'name': 'oops',
'parent_site_code': self.test_state.site_code
}
original_count = len(list(Location.by_domain(self.domain.name)))
result = import_location(self.domain.name, 'village', data)
self.assertEqual(result['id'], None)
self.assertEqual(len(list(Location.by_domain(self.domain.name))), original_count)
self.assertTrue('Invalid parent type' in result['message'])
def test_invalid_parent_site_code(self):
data = {
'name': 'oops',
'parent_site_code': 'banana'
}
result = import_location(self.domain.name, 'district', data)
self.assertTrue(
'Parent with site code banana does not exist' in result['message'],
result['message']
)
def test_invalid_parent_domain(self):
parent = make_loc('someparent', domain='notright', type='village')
data = {
'name': '<NAME>',
'outlet_type': 'SHG',
'site_code': 'wat',
'parent_site_code': parent.site_code,
}
original_count = len(list(Location.by_domain(self.domain.name)))
result = import_location(self.domain.name, 'outlet', data)
self.assertEqual(result['id'], None)
self.assertEqual(len(list(Location.by_domain(self.domain.name))), original_count)
self.assertTrue('does not exist in this project' in result['message'])
def test_change_parent(self):
parent = make_loc('originalparent', type='village')
existing = make_loc('existingloc', type='outlet', parent=parent)
new_parent = make_loc('new parent', type='village')
self.assertNotEqual(parent._id, new_parent._id)
data = {
'site_code': existing.site_code,
'name': existing.name,
'outlet_type': 'SHG',
'parent_site_code': new_parent.site_code,
}
result = import_location(self.domain.name, 'outlet', data)
new_loc = Location.get(result['id'])
self.assertEqual(existing._id, new_loc._id)
self.assertEqual(new_loc.parent_id, new_parent._id)
def test_change_to_invalid_parent(self):
parent = make_loc('original parent', type='village')
existing = make_loc('existingloc1', type='outlet', parent=parent)
new_parent = make_loc('new parent', type='state')
data = {
'site_code': existing.site_code,
'name': existing.name,
'outlet_type': 'SHG',
'parent_site_code': new_parent.site_code,
}
result = import_location(self.domain.name, 'outlet', data)
self.assertEqual(None, result['id'])
self.assertTrue('Invalid parent type' in result['message'])
new_loc = Location.get(existing._id)
self.assertEqual(existing._id, new_loc._id)
self.assertEqual(new_loc.parent_id, parent._id)
def test_updating_existing_location_properties(self):
existing = make_loc('existingloc2', type='state', domain=self.domain.name)
existing.save()
data = {
'site_code': existing.site_code,
'name': 'new_name',
}
self.assertNotEqual(existing.name, data['name'])
result = import_location(self.domain.name, 'state', data)
loc_id = result.get('id', None)
self.assertIsNotNone(loc_id, result['message'])
new_loc = Location.get(loc_id)
self.assertEqual(existing._id, loc_id)
self.assertEqual(new_loc.name, data['name'])
def test_given_id_matches_type(self):
existing = make_loc('existingloc', type='state')
data = {
'site_code': existing.site_code,
'name': 'new_name',
}
result = import_location(self.domain.name, 'outlet', data)
self.assertEqual(result['id'], None)
self.assertTrue('Existing location type error' in result['message'])
def test_shouldnt_save_if_no_changes(self):
existing = make_loc('existingloc', type='outlet', parent=self.test_village)
existing.site_code = 'wat'
existing.outlet_type = 'SHG'
existing.save()
data = {
'site_code': existing.site_code,
'name': existing.name,
'outlet_type': 'SHG',
}
with patch('corehq.apps.locations.forms.LocationForm.save') as save:
result = import_location(self.domain.name, 'outlet', data)
self.assertEqual(save.call_count, 0)
self.assertEqual(result['id'], existing._id)
def test_should_still_save_if_name_changes(self):
# name isn't a dynamic property so should test these still
# get updated alone
existing = make_loc('existingloc', type='outlet', parent=self.test_village)
existing.site_code = 'wat'
existing.outlet_type = 'SHG'
existing.save()
data = {
'site_code': existing.site_code,
'name': 'newname',
'outlet_type': 'SHG',
}
with patch('corehq.apps.locations.forms.LocationForm.save') as save:
result = import_location(self.domain.name, 'outlet', data)
self.assertEqual(save.call_count, 1)
# id isn't accurate because of the mock, but want to make
# sure we didn't actually return with None
self.assertTrue(result['id'] is not None)
def test_should_import_consumption(self):
existing = make_loc('existingloc', type='state')
sp = make_supply_point(self.loc.domain, existing)
data = {
'site_code': existing.site_code,
'name': 'existingloc',
'default_pp': 77
}
import_location(self.domain.name, 'state', data)
self.assertEqual(
float(get_default_consumption(
self.domain.name,
Product.get_by_code(self.domain.name, 'pp')._id,
'state',
sp._id,
)),
77 / DAYS_IN_MONTH
)
def test_import_coordinates(self):
data = {
'name': 'importedloc',
'latitude': 55,
'longitude': -55,
}
loc_id = import_location(self.domain.name, 'state', data)['id']
loc = Location.get(loc_id)
self.assertEqual(data['latitude'], loc.latitude)
self.assertEqual(data['longitude'], loc.longitude)
| StarcoderdataPython |
1644377 | #!/usr/bin/python3
import argparse
import socket
DEFAULT_TARGET_PORT = 9
DEFAULT_TARGET_IP = '255.255.255.255'
def is_magic_packet(data):
"""
Checks if a packet is a magic packet, returns
True or False.
Args:
data (bytes): the payload from a packet
"""
# convert data to lowercase hex string
data = data.hex().lower()
# magic packets begin with 'f'*12 (called a synchronization stream)
sync = data[:12]
if sync == 'f'*12:
# the mac address follows (next 12 chars)
mac = data[12:24]
# and the mac address is repeated 16 times
magic = sync + mac*16
if len(data) == len(magic):
return magic == data
else:
# allow for a SecureON password, which adds another
# 12-character hex string to the end of the packet
return magic == data[:-12]
else:
return False
def listen(listen_port,
target_port=DEFAULT_TARGET_PORT,
target_ip=DEFAULT_TARGET_IP):
"""
Listens for activity on UDP port specified by listen_port and
forwards WOL packets to (target_ip, target_port).
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('', listen_port))
while True:
data, _ = s.recvfrom(1024)
if is_magic_packet(data):
forward(data, target_ip, target_port)
def forward(data, ip, port):
"""
Forwards the data to the specified IP address.
Args:
data (bytes): data from payload of the magic packet
ip (str): target ip address
port (int): target port number
"""
# create a UDP socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# allow it to broadcast
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.connect((ip, port))
s.send(data)
s.close()
def main(argv=None):
parser = argparse.ArgumentParser(
description='Forward wake-on-lan packets to the '
'broadcast IP address.'
)
parser.add_argument(
'port',
metavar='listen port',
type=int,
help='The UDP port number you want to listen to.'
)
parser.add_argument(
'-p',
metavar='forward port',
default=DEFAULT_TARGET_PORT,
dest='fp',
type=int,
help='The UDP port number you want to forward WOL '
'packets to (default 9).'
)
parser.add_argument(
'-i',
metavar='forward IP',
default=DEFAULT_TARGET_IP,
dest='i',
help='The IP address you want to forward WOL packets '
'to (default 255.255.255.255).'
)
args = parser.parse_args(argv)
listen(listen_port=args.port, target_port=args.fp, target_ip=args.i)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1616731 | <reponame>Shiro-Nakamura/Hacking
import socket
HOST = '127.0.0.1'
PORT = 5000
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
print("[+] start listen on {}".format((HOST,PORT)))
s.listen(1)
conn, addr = s.accept()
banner = 'simple tcp server'
conn.send(banner.encode())
print("[+] connect from address: {}".format(addr))
while 1:
data = conn.recv(1024)
if not data: break
conn.send(data)
conn.close() | StarcoderdataPython |
1788862 | from base import BaseTrain
from tqdm import trange
import numpy as np
import logging
import time
from utils.utils import denorm
import skimage.io
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
# TODO: change tensorboard summaries to appropriate tags, generate sample of image and write to disk every _ epochs
class SAGANTrainer(BaseTrain):
def __init__(self, sess, model, config, logger):
super(SAGANTrainer, self).__init__(sess, model, config, logger)
self.config.sample_dir = 'SAGAN_sample'
def train(self):
"""overrode default base function for custom function
- logs total duration of training in seconds
"""
tik = time.time()
for it in trange(self.config.num_iter):
g_loss, d_loss = self.train_step()
if it % self.config.save_iter == 0:
self.model.save(self.sess)
if it % self.config.sample_iter == 0:
images = self.sess.run([self.model.sample_image])
for i, image in enumerate(images[0]):
image = denorm(np.squeeze(image))
sample_path = os.path.join(self.config.sample_dir, '{}-{}-sample.jpg'.format(i, it))
skimage.io.imsave(sample_path, image)
if it % 100 == 0:
summaries_dict = {}
summaries_dict['g_loss'] = g_loss
summaries_dict['d_loss'] = d_loss
self.logger.summarize(it, summaries_dict=summaries_dict)
tok = time.time()
logging.info('Duration: {} seconds'.format(tok - tik))
def train_epoch(self):
pass
# def train_epoch(self):
# """logging summary to tensorboard and executing training steps per epoch"""
# g_losses = []
# d_losses = []
# for it in trange(self.config.num_iter_per_epoch):
# cur_it = self.model.global_step_tensor.eval(self.sess)
# g_loss, d_loss = self.train_step(cur_it+it+1)
# g_losses.append(g_loss)
# d_losses.append(d_loss)
# if cur_it % self.config.save_iter == 0:
# self.model.save(self.sess)
# if cur_it % self.config.sample_iter == 0:
# image = self.sess.run([self.model.sample_image])
# image = denorm(np.squeeze(image))
# sample_path = os.path.join(self.config.sample_dir, '{}-{}sample.jpg'.format(cur_it, it))
# skimage.io.imsave(sample_path, image)
# if cur_it % 100 == 0:
# summaries_dict = {}
# summaries_dict['g_loss'] = g_loss
# summaries_dict['d_loss'] = d_loss
# self.logger.summarize(cur_it, summaries_dict=summaries_dict)
#
# g_loss = np.mean(g_losses)
# d_loss = np.mean(d_losses)
#
# cur_it = self.model.global_step_tensor.eval(self.sess)
#
# summaries_dict = {}
# summaries_dict['average_epoch_g_loss'] = g_loss
# summaries_dict['average_epoch_d_loss'] = d_loss
# self.logger.summarize(cur_it, summaries_dict=summaries_dict)
def train_step(self):
"""using `tf.data` API, so no feed-dict required"""
_, d_loss = self.sess.run([self.model.d_optim, self.model.d_loss])
_, g_loss = self.sess.run([self.model.g_optim, self.model.g_loss])
return g_loss, d_loss
| StarcoderdataPython |
1774375 | from .cardPoolItemConfig import CardPoolItem
from .cardPoolProbabilityConfig import CardPoolProbability
from .userInfo import UserInfo
| StarcoderdataPython |
3221473 | <reponame>xbabka01/retdec-regression-tests<filename>tools/fileinfo/bugs/sha1-hash-segfault/test.py
from regression_tests import *
class Test(Test):
settings = TestSettings(
tool='fileinfo',
args='--json --verbose',
input='08A2437BFE40329F132CD98CA1988CB13B0F32E8CE57FEBDD3CA0A6B641219D4.dat'
)
def test_has_certificate_table(self):
assert self.fileinfo.succeeded
assert 'digitalSignatures' in self.fileinfo.output
self.assertEqual(5, len(
self.fileinfo.output['digitalSignatures']['signatures'][0]['allCertificates']))
self.assertEqual('CN=Microsoft Corporation,O=Microsoft Corporation,L=Redmond,ST=Washington,C=US',
self.fileinfo.output['digitalSignatures']['signatures'][0]['signer']['chain'][0]['subject'])
| StarcoderdataPython |
1640391 | <gh_stars>0
"""
@Project : DuReader
@Module : tokenizer.py
@Author : Deco [<EMAIL>]
@Created : 5/22/18 5:00 PM
@Desc :
"""
from nltk.tokenize import word_tokenize
from nltk.tokenize import sent_tokenize
# import nltk
#
# nltk.download('punkt')
if __name__ == '__main__':
print(word_tokenize('Hello World.'))
print(word_tokenize("this’s a test"))
text = ("this’s a sent tokenize test. this is sent two. "
"is this sent three? sent 4 is cool! Now it’s your turn.")
sent_tokenize_list = sent_tokenize(text)
print(sent_tokenize_list)
| StarcoderdataPython |
1735847 | <reponame>MathiasSeguy-Android2EE/MigrationProjetsA2ee
#https://dl.google.com/dl/android/maven2/index.html
findreplace = [
('materialVersion','2.0.0-rc01'),
('core_commonVersion','2.0.0-rc01'),
('coreVersion','2.0.0-rc01'),
('core_testingVersion','2.0.0-rc01'),
('core_runtimeVersion','2.0.0-rc01'),
('lifecycle_commonVersion','2.0.0-rc01'),
('lifecycle_common_java8Version','2.0.0-rc01'),
('lifecycle_compilerVersion','2.0.0-rc01'),
('lifecycle_extensionsVersion','2.0.0-rc01'),
('lifecycle_livedataVersion','2.0.0-rc01'),
('lifecycle_livedata_coreVersion','2.0.0-rc01'),
('lifecycle_reactivestreamsVersion','2.0.0-rc01'),
('lifecycle_runtimeVersion','2.0.0-rc01'),
('lifecycle_viewmodelVersion','2.0.0-rc01'),
('paging_commonVersion','2.0.0-rc01'),
('paging_runtimeVersion','2.0.0-rc01'),
('paging_rxjava2Version','2.0.0-rc01'),
('room_commonVersion','2.1.0-alpha07'),
('room_compilerVersion','2.1.0-alpha07'),
('room_guavaVersion','2.1.0-alpha07'),
('room_migrationVersion','2.1.0-alpha07'),
('room_runtimeVersion','2.1.0-alpha07'),
('room_rxjava2Version','2.1.0-alpha07'),
('room_testingVersion','2.1.0-alpha07'),
('sqliteVersion','2.0.0-rc01'),
('sqlite_frameworkVersion','2.0.0-rc01'),
('constraintlayoutVersion','1.1.2'),
('constraintlayout_solverVersion','1.1.2'),
('idling_concurrentVersion','3.1.0'),
('idling_netVersion','3.1.0'),
('espresso_accessibilityVersion','3.1.0'),
('espresso_contribVersion','3.1.0'),
('espresso_coreVersion','3.1.0'),
('espresso_idling_resourceVersion','3.1.0'),
('espresso_intentsVersion','3.1.0'),
('espresso_remoteVersion','3.1.0'),
('espresso_webVersion','3.1.0'),
('janktesthelperVersion','1.0.1'),
('test_servicesVersion','1.1.0'),
('uiautomatorVersion','2.2.0'),
('monitorVersion','1.1.0'),
('orchestratorVersion','1.1.0'),
('rulesVersion','1.1.0'),
('runnerVersion','1.1.0'),
('vectordrawable_animatedVersion','1.0.0'),
('appcompatVersion','1.0.0'),
('asynclayoutinflaterVersion','1.0.0'),
('cardviewVersion','1.0.0'),
('carVersion','1.0.0-alpha5'),
('collectionVersion','1.0.0'),
('coordinatorlayoutVersion','1.0.0'),
('cursoradapterVersion','1.0.0'),
('browserVersion','1.0.0'),
('customviewVersion','1.0.0'),
('materialVersion','1.0.0-rc01'),
('documentfileVersion','1.0.0'),
('drawerlayoutVersion','1.0.0'),
('exifinterfaceVersion','1.0.0'),
('gridlayoutVersion','1.0.0'),
('heifwriterVersion','1.0.0'),
('interpolatorVersion','1.0.0'),
('leanbackVersion','1.0.0'),
('loaderVersion','1.0.0'),
('localbroadcastmanagerVersion','1.0.0'),
('media2Version','1.0.0-alpha03'),
('media2_exoplayerVersion','1.0.0-alpha01'),
('mediarouterVersion','1.0.0'),
('multidexVersion','2.0.0'),
('multidex_instrumentationVersion','2.0.0'),
('paletteVersion','1.0.0'),
('percentlayoutVersion','1.0.0'),
('leanback_preferenceVersion','1.0.0'),
('legacy_preference_v14Version','1.0.0'),
('preferenceVersion','1.0.0'),
('printVersion','1.0.0'),
('recommendationVersion','1.0.0'),
('recyclerview_selectionVersion','1.0.0'),
('recyclerviewVersion','1.0.0'),
('slice_buildersVersion','1.0.0'),
('slice_coreVersion','1.0.0'),
('slice_viewVersion','1.0.0'),
('slidingpanelayoutVersion','1.0.0'),
('annotationVersion','1.0.0'),
('coreVersion','1.0.0'),
('contentpagerVersion','1.0.0'),
('legacy_support_core_uiVersion','1.0.0'),
('legacy_support_core_utilsVersion','1.0.0'),
('dynamicanimationVersion','1.0.0'),
('emojiVersion','1.0.0'),
('emoji_appcompatVersion','1.0.0'),
('emoji_bundledVersion','1.0.0'),
('fragmentVersion','1.0.0'),
('mediaVersion','1.0.0'),
('tvproviderVersion','1.0.0'),
('legacy_support_v13Version','1.0.0'),
('legacy_support_v4Version','1.0.0'),
('vectordrawableVersion','1.0.0'),
('swiperefreshlayoutVersion','1.0.0'),
('textclassifierVersion','1.0.0'),
('transitionVersion','1.0.0'),
('versionedparcelableVersion','1.0.0'),
('viewpagerVersion','1.0.0'),
('wearVersion','1.0.0'),
('webkitVersion','1.0.0'),
('over:over:over:choupy','job:is:done')
]
| StarcoderdataPython |
3302559 | <reponame>tombh/deis
# -*- coding: utf-8 -*-
"""
Django admin app configuration for Deis API models.
"""
from __future__ import unicode_literals
from django.contrib import admin
from guardian.admin import GuardedModelAdmin
from .models import App
from .models import Build
from .models import Config
from .models import Container
from .models import Flavor
from .models import Formation
from .models import Key
from .models import Layer
from .models import Node
from .models import Provider
from .models import Release
class AppAdmin(GuardedModelAdmin):
"""Set presentation options for :class:`~api.models.App` models
in the Django admin.
"""
date_hierarchy = 'created'
list_display = ('id', 'owner', 'formation')
list_filter = ('owner', 'formation')
admin.site.register(App, AppAdmin)
class BuildAdmin(admin.ModelAdmin):
"""Set presentation options for :class:`~api.models.Build` models
in the Django admin.
"""
date_hierarchy = 'created'
list_display = ('sha', 'owner', 'app')
list_filter = ('owner', 'app')
admin.site.register(Build, BuildAdmin)
class ConfigAdmin(admin.ModelAdmin):
"""Set presentation options for :class:`~api.models.Config` models
in the Django admin.
"""
date_hierarchy = 'created'
list_display = ('version', 'owner', 'app')
list_filter = ('owner', 'app')
admin.site.register(Config, ConfigAdmin)
class ContainerAdmin(admin.ModelAdmin):
"""Set presentation options for :class:`~api.models.Container` models
in the Django admin.
"""
date_hierarchy = 'created'
list_display = ('short_name', 'owner', 'formation', 'app', 'status')
list_filter = ('owner', 'formation', 'app', 'status')
admin.site.register(Container, ContainerAdmin)
class FlavorAdmin(admin.ModelAdmin):
"""Set presentation options for :class:`~api.models.Flavor` models
in the Django admin.
"""
date_hierarchy = 'created'
list_display = ('id', 'owner', 'provider')
list_filter = ('owner', 'provider')
admin.site.register(Flavor, FlavorAdmin)
class FormationAdmin(admin.ModelAdmin):
"""Set presentation options for :class:`~api.models.Formation` models
in the Django admin.
"""
date_hierarchy = 'created'
list_display = ('id', 'owner')
list_filter = ('owner',)
admin.site.register(Formation, FormationAdmin)
class KeyAdmin(admin.ModelAdmin):
"""Set presentation options for :class:`~api.models.Key` models
in the Django admin.
"""
date_hierarchy = 'created'
list_display = ('id', 'owner', '__str__')
list_filter = ('owner',)
admin.site.register(Key, KeyAdmin)
class LayerAdmin(admin.ModelAdmin):
"""Set presentation options for :class:`~api.models.Layer` models
in the Django admin.
"""
date_hierarchy = 'created'
list_display = ('id', 'owner', 'formation', 'flavor', 'proxy', 'runtime', 'config')
list_filter = ('owner', 'formation', 'flavor')
admin.site.register(Layer, LayerAdmin)
class NodeAdmin(admin.ModelAdmin):
"""Set presentation options for :class:`~api.models.Node` models
in the Django admin.
"""
date_hierarchy = 'created'
list_display = ('id', 'owner', 'formation', 'fqdn')
list_filter = ('owner', 'formation')
admin.site.register(Node, NodeAdmin)
class ProviderAdmin(admin.ModelAdmin):
"""Set presentation options for :class:`~api.models.Provider` models
in the Django admin.
"""
date_hierarchy = 'created'
list_display = ('id', 'owner', 'type')
list_filter = ('owner', 'type')
admin.site.register(Provider, ProviderAdmin)
class ReleaseAdmin(admin.ModelAdmin):
"""Set presentation options for :class:`~api.models.Release` models
in the Django admin.
"""
date_hierarchy = 'created'
list_display = ('owner', 'app', 'version')
list_filter = ('owner', 'app')
admin.site.register(Release, ReleaseAdmin)
| StarcoderdataPython |
1627456 | <reponame>wemerson-henrique/kivy<filename>kivyeExemplosDocumentacao/view/exemplo1.py
#codigo ganbiarra
import os
os.environ['KIVY_GL_BACKEND'] = 'angle_sdl2'
#codigo ganbiarra
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
class LoginScreen(GridLayout):
def __init__(self, **kwargs):
super(LoginScreen, self).__init__(**kwargs)
self.cols = 1
self.add_widget(Label(text='Quais dados meu aplicativo processa?'))
self.add_widget(Label(text='Como eu represento visualmente esses dados?'))
self.add_widget(Label(text='Como o usuário interage com esses dados?'))
class MyApp(App):
def build(self):
return LoginScreen()
if __name__ == '__main__':
MyApp().run() | StarcoderdataPython |
3273255 | import jax.numpy as np
def chebval(x, c, tensor=True):
"""
Evaluate a Chebyshev series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
chebval2d, chebgrid2d, chebval3d, chebgrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
"""
c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
x2 = 2*x
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
c0 = c[-i] - c1
c1 = tmp + c1*x2
return c0 + c1*x
def polycalc(coef,inwave):
# define obs wave on normalized scale
x = inwave - inwave.min()
x = 2.0*(x/x.max())-1.0
# build poly coef
# c = np.insert(coef[1:],0,0)
poly = chebval(x,coef)
# epoly = np.exp(coef[0]+poly)
# epoly = coef[0]+poly
return poly
def airtovacuum(inwave):
"""
Using the relationship from Ciddor (1996) and transcribed in Shetrone et al. 2015
"""
inwave = inwave * (1E-4)
a = 0.0
b1 = 5.792105E-2
b2 = 1.67917E-3
c1 = 238.0185
c2 = 57.362
deltawave = a + (b1/(c1-(1.0/inwave**2.0))) + (b2/(c2-(1.0/inwave**2.0)))
return (inwave*(deltawave+1)) * (1E4)
def vacuumtoair(inwave):
s = (10**4) / inwave
s2 = s**2.0
n = 1.0 + 0.0000834254 + 0.02406147 / (130.0 - s2) + 0.00015998 / (38.9 - s2)
return inwave / n
| StarcoderdataPython |
157284 | # Generated by Django 3.2.3 on 2021-09-15 12:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ledgerapp', '0005_auto_20210909_1505'),
]
operations = [
migrations.AlterField(
model_name='entry',
name='account',
field=models.CharField(choices=[('APAY', 'Accounts Payable'), ('COGM', 'Cost of Goods Manufactured'), ('COGS', 'Cost of Goods Sold'), ('CASH', 'Cash Account'), ('IRAW', 'Inventory Raw Materials'), ('IMER', 'Inventory Merchandise'), ('ILOS', 'Inventory Loss'), ('OWEQ', 'Owner Equity')], max_length=4),
),
]
| StarcoderdataPython |
3289563 | <reponame>WTRMQDev/leer<gh_stars>1-10
import functools
from secp256k1_zkp import PublicKey, ALL_FLAGS
import hashlib, base64
class Excess:
'''
Excess is public key with proof that it is public key (signature).
'''
def __init__(self, recoverable_signature=None, message=b'', version=0, raw=None):
self.recoverable_signature = recoverable_signature
self.message = message
self.version = version
if raw:
self.deserialize_raw(raw)
self.drop_cached()
def drop_cached(self):
self.cached_pubkey = None
self.cached_nonrec = None
self.serialized = None
def calc_pubkey(self):
unrelated = PublicKey(flags=ALL_FLAGS)
self.cached_pubkey = PublicKey(pubkey=unrelated.ecdsa_recover(self.message, self.recoverable_signature))
@property
def pubkey(self):
if not self.cached_pubkey:
self.calc_pubkey()
return self.cached_pubkey
@property
def serialized_pubkey(self):
return self.pubkey.serialize()
def from_private_key(self, privkey, message=b'', version=None):
self.drop_cached()
self.message = message if message else self.message
if version:
self.version = version
else:
if self.message==b"":
self.version = 0
else:
self.version = 1
self.recoverable_signature = privkey.ecdsa_sign_recoverable(self.message)
return self
def calc_nonrec(self):
unrelated = PublicKey(flags=ALL_FLAGS)
self.cached_nonrec = unrelated.ecdsa_recoverable_convert(self.recoverable_signature)
@property
def nonrec_signature(self):
if not self.cached_nonrec:
self.calc_nonrec()
return self.cached_nonrec
def verify(self):
return self.pubkey.ecdsa_verify(self.message, self.nonrec_signature)
def serialize(self):
if self.serialized:
return self.serialized
if self.version==0 and len(self.message):
raise
unrelated = PublicKey(flags=ALL_FLAGS)
if self.version==0:
self.serialized = unrelated.ecdsa_recoverable_serialize_raw(self.recoverable_signature)
elif self.version==1:
rec_sig_serialized = unrelated.ecdsa_recoverable_serialize_raw(self.recoverable_signature)
rec_sig_serialized = (rec_sig_serialized[0] | 128).to_bytes(1,"big") + rec_sig_serialized[1:]
mes_serialized = len(self.message).to_bytes(2,"big")+self.message
self.serialized = rec_sig_serialized+mes_serialized
return self.serialized
def deserialize_raw(self, serialized_data):
self.drop_cached()
consumed = b""
if len(serialized_data)<65:
raise Exception("Not enough bytes to encode recovery signature")
rec_sig, serialized_data = serialized_data[:65], serialized_data[65:]
consumed += rec_sig
unrelated = PublicKey(flags=ALL_FLAGS)
if rec_sig[0] & 128 ==0:
self.version = 0
self.message = b""
self.recoverable_signature = unrelated.ecdsa_recoverable_deserialize_raw(rec_sig)
if rec_sig[0] & 128 ==128:
self.version = 1
rec_sig = (rec_sig[0] - 128).to_bytes(1,"big") + rec_sig[1:]
self.recoverable_signature = unrelated.ecdsa_recoverable_deserialize_raw(rec_sig)
if len(serialized_data)<2:
raise Exception("Not enough bytes to encode message len")
mlen_ser, serialized_data = serialized_data[:2], serialized_data[2:]
mlen = int.from_bytes(mlen_ser, 'big')
if len(serialized_data)<mlen:
raise Exception("Not enough bytes to encode message")
self.message, serialized_data = serialized_data[:mlen], serialized_data[mlen:]
consumed += mlen_ser+self.message
self.serialized = consumed
return serialized_data
@property
def index(self):
"""
Special index which is used for building excesses merkle tree.
Same as with tree for outputs, we want to be able to both access
the sum of excesses as sum of points on curve and to validate tree.
Thus index contains public key and hash of serialized excess.
"""
m=hashlib.sha256()
m.update(self.message)
return self.pubkey.serialize() + m.digest()
@classmethod
@functools.lru_cache(maxsize=40)
def from_serialized(cls, serialized_address):
address = cls()
address.deserialize_raw(serialized_address)
return address
class Address(Excess):
def verify(self):
return super().verify() and (self.message==b"")
def from_text(self, serialized):
_address = Address()
raw = base64.b64decode(serialized.encode())
self.deserialize_raw(raw[:-4])
m=hashlib.sha256()
m.update(raw[:-4])
checksum = m.digest()[:4]
assert checksum==raw[-4:]
self = _address
return True
def to_text(self):
m=hashlib.sha256()
m.update(self.serialize())
checksum = m.digest()[:4]
return base64.b64encode(self.serialize()+checksum).decode()
'''
def __init__(self, pubkey=None, signature=None, rec_sig=None, raw=False):
self.pubkey = pubkey
self.signature = signature
if rec_sig:
if raw:
self.deserialize(rec_sig)
else:
self.from_recoverable_signature(rec_sig)
def from_b58(self, _adr):
pass
def to_b58(self, _adr):
pass
def from_recoverable_signature(self, rec_sig):
unrelated = PublicKey(flags=ALL_FLAGS)
#rec_sig, rec_id = unrelated.ecdsa_recoverable_serialize(raw_rec_sig)
self.pubkey = PublicKey(pubkey=unrelated.ecdsa_recover(b"", rec_sig))
self.signature = unrelated.ecdsa_recoverable_convert(rec_sig)
self.rec_sig = rec_sig
def from_private_key(self, privkey):
rec_sig = privkey.ecdsa_sign_recoverable(b"")
self.from_recoverable_signature(rec_sig)
return self
def verify(self):
unrelated = PublicKey(flags=ALL_FLAGS)
return self.pubkey.ecdsa_verify(b"", self.signature)
def serialize(self):
unrelated = PublicKey(flags=ALL_FLAGS)
return unrelated.ecdsa_recoverable_serialize_raw(self.rec_sig)
def deserialize(self, ser_rec_sig):
unrelated = PublicKey(flags=ALL_FLAGS)
self.from_recoverable_signature(unrelated.ecdsa_recoverable_deserialize_raw(ser_rec_sig))
'''
def excess_from_private_key(private_key, apc):
e= Excess(message=apc)
e.from_private_key(private_key)
return e
def address_from_private_key(private_key):
a= Address(message=b"")
a.from_private_key(private_key)
return a
#class Excess(Address, PublicKey):
# def from_address(self, adr):
# PublicKey.__init__(self, pubkey=adr.pubkey.public_key, raw=False)
| StarcoderdataPython |
3380698 | #!/usr/bin/env python3
# via https://github.com/nicodv/kmodes/blob/master/kmodes.py
# copy-pasted on 2014-06-18
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__license__ = 'MIT'
__version__ = '0.8'
import random
import numpy as np
from collections import defaultdict
class KModes(object):
def __init__(self, k):
"""k-modes clustering algorithm for categorical data.
See:
Huang, Z.: Extensions to the k-modes algorithm for clustering large data sets with
categorical values, Data Mining and Knowledge Discovery 2(3), 1998.
Inputs: k = number of clusters
Attributes: clusters = cluster numbers [no. points]
centroids = centroids [k * no. attributes]
membership = membership matrix [k * no. points]
cost = clustering cost, defined as the sum distance of
all points to their respective clusters
"""
assert k > 1, "Choose at least 2 clusters."
self.k = k
# generalized form with alpha. alpha > 1 for fuzzy k-modes
self.alpha = 1
# init some variables
self.membership = self.clusters = self.centroids = self.cost = None
def cluster(self, x, pre_runs=10, pre_pctl=20, *args, **kwargs):
"""Shell around _perform_clustering method that tries to ensure a good clustering
result by choosing one that has a relatively low clustering cost compared to the
costs of a number of pre-runs. (Huang [1998] states that clustering cost can be
used to judge the clustering quality.)
"""
if pre_runs and 'init_method' in kwargs and kwargs['init_method'] == 'Cao':
print("Initialization method and algorithm are deterministic. Disabling preruns...")
pre_runs = None
if pre_runs:
precosts = np.empty(pre_runs)
for pr in range(pre_runs):
self._perform_clustering(x, *args, verbose=0, **kwargs)
precosts[pr] = self.cost
print("Prerun {0} / {1}, Cost = {2}".format(pr+1, pre_runs, precosts[pr]))
goodcost = np.percentile(precosts, pre_pctl)
else:
goodcost = np.inf
while True:
self._perform_clustering(x, *args, verbose=1, **kwargs)
if self.cost <= goodcost:
break
def _perform_clustering(self, x, init_method='Huang', max_iters=100, verbose=1):
"""Inputs: x = data points [no. points * no. attributes]
init_method = initialization method ('Huang' for the one described in
Huang [1998], 'Cao' for the one in Cao et al. [2009])
max_iters = maximum no. of iterations
verbose = 0 for no and 1 for normal algorithm progress information,
2 for internal algorithm details
"""
# convert to numpy array, if needed
x = np.asanyarray(x)
npoints, nattrs = x.shape
assert self.k < npoints, "More clusters than data points?"
self.initMethod = init_method
# ----------------------
# INIT
# ----------------------
if verbose:
print("Init: initializing centroids")
self.init_centroids(x)
if verbose:
print("Init: initializing clusters")
self.membership = np.zeros((self.k, npoints), dtype='int64')
# self._clustAttrFreq is a list of lists with dictionaries that contain the
# frequencies of values per cluster and attribute
self._clustAttrFreq = [[defaultdict(int) for _ in range(nattrs)] for _ in range(self.k)]
for ipoint, curpoint in enumerate(x):
# initial assigns to clusters
cluster = np.argmin(self.get_dissim(self.centroids, curpoint))
self.membership[cluster, ipoint] = 1
# count attribute values per cluster
for iattr, curattr in enumerate(curpoint):
self._clustAttrFreq[cluster][iattr][curattr] += 1
# perform an initial centroid update
for ik in range(self.k):
for iattr in range(nattrs):
self.centroids[ik, iattr] = self.get_mode(self._clustAttrFreq[ik][iattr])
# ----------------------
# ITERATION
# ----------------------
if verbose:
print("Starting iterations...")
itr = 0
converged = False
while itr <= max_iters and not converged:
itr += 1
moves = 0
for ipoint, curpoint in enumerate(x):
cluster = np.argmin(self.get_dissim(self.centroids, curpoint))
# if necessary: move point, and update old/new cluster frequencies and centroids
if not self.membership[cluster, ipoint]:
moves += 1
oldcluster = np.argwhere(self.membership[:, ipoint])[0][0]
self._add_point_to_cluster(curpoint, ipoint, cluster)
self._remove_point_from_cluster(curpoint, ipoint, oldcluster)
# update new and old centroids by choosing most likely attribute
for iattr, curattr in enumerate(curpoint):
for curc in (cluster, oldcluster):
self.centroids[curc, iattr] = self.get_mode(
self._clustAttrFreq[curc][iattr])
if verbose == 2:
print("Move from cluster {0} to {1}".format(oldcluster, cluster))
# in case of an empty cluster, reinitialize with a random point
# that is not a centroid
if sum(self.membership[oldcluster, :]) == 0:
while True:
rindx = np.random.randint(npoints)
if not np.all(x[rindx] == self.centroids).any():
break
self._add_point_to_cluster(x[rindx], rindx, oldcluster)
fromcluster = np.argwhere(self.membership[:, rindx])[0][0]
self._remove_point_from_cluster(x[rindx], rindx, fromcluster)
# all points seen in this iteration
converged = (moves == 0)
if verbose:
print("Iteration: {0}/{1}, moves: {2}".format(itr, max_iters, moves))
self.calculate_clustering_cost(x)
self.clusters = np.array([np.argwhere(self.membership[:, pt])[0] for pt in range(npoints)])
def init_centroids(self, x):
assert self.initMethod in ('Huang', 'Cao')
npoints, nattrs = x.shape
self.centroids = np.empty((self.k, nattrs))
if self.initMethod == 'Huang':
# determine frequencies of attributes
for iattr in range(nattrs):
freq = defaultdict(int)
for curattr in x[:, iattr]:
freq[curattr] += 1
# sample centroids using the probabilities of attributes
# (I assume that's what's meant in the Huang [1998] paper; it works, at least)
# note: sampling using population in static list with as many choices as
# frequency counts this works well since (1) we re-use the list k times here,
# and (2) the counts are small integers so memory consumption is low
choices = [chc for chc, wght in freq.items() for _ in range(wght)]
for ik in range(self.k):
self.centroids[ik, iattr] = random.choice(choices)
# the previously chosen centroids could result in empty clusters,
# so set centroid to closest point in x
for ik in range(self.k):
ndx = np.argsort(self.get_dissim(x, self.centroids[ik]))
# and we want the centroid to be unique
while np.all(x[ndx[0]] == self.centroids, axis=1).any():
ndx = np.delete(ndx, 0)
self.centroids[ik] = x[ndx[0]]
elif self.initMethod == 'Cao':
# Note: O(N * at * k**2), so watch out with k
# determine densities points
dens = np.zeros(npoints)
for iattr in range(nattrs):
freq = defaultdict(int)
for val in x[:, iattr]:
freq[val] += 1
for ipoint in range(npoints):
dens[ipoint] += freq[x[ipoint, iattr]] / float(nattrs)
dens /= npoints
# choose centroids based on distance and density
self.centroids[0] = x[np.argmax(dens)]
dissim = self.get_dissim(x, self.centroids[0])
self.centroids[1] = x[np.argmax(dissim * dens)]
# for the reamining centroids, choose max dens * dissim to the (already assigned)
# centroid with the lowest dens * dissim
for ik in range(2, self.k):
dd = np.empty((ik, npoints))
for ikk in range(ik):
dd[ikk] = self.get_dissim(x, self.centroids[ikk]) * dens
self.centroids[ik] = x[np.argmax(np.min(dd, axis=0))]
return
def _add_point_to_cluster(self, point, ipoint, cluster):
self.membership[cluster, ipoint] = 1
# update frequencies of attributes in cluster
for iattr, curattr in enumerate(point):
self._clustAttrFreq[cluster][iattr][curattr] += 1
return
def _remove_point_from_cluster(self, point, ipoint, cluster):
self.membership[cluster, ipoint] = 0
# update frequencies of attributes in cluster
for iattr, curattr in enumerate(point):
self._clustAttrFreq[cluster][iattr][curattr] -= 1
return
@staticmethod
def get_dissim(a, b):
# simple matching dissimilarity
return (a != b).sum(axis=1)
@staticmethod
def get_mode(dic):
# Fast method (supposedly) to get key for maximum value in dict.
v = list(dic.values())
k = list(dic.keys())
if len(v) == 0:
pass
return k[v.index(max(v))]
def calculate_clustering_cost(self, x):
self.cost = 0
for ipoint, curpoint in enumerate(x):
self.cost += np.sum(self.get_dissim(self.centroids, curpoint) *
(self.membership[:, ipoint] ** self.alpha))
return
class KPrototypes(KModes):
def __init__(self, k):
"""k-protoypes clustering algorithm for mixed numeric and categorical data.
<NAME>.: Clustering large data sets with mixed numeric and categorical values,
Proceedings of the First Pacific Asia Knowledge Discovery and Data Mining Conference,
Singapore, pp. 21-34, 1997.
Inputs: k = number of clusters
Attributes: clusters = cluster numbers [no. points]
centroids = centroids, two lists (num. and cat.) with [k * no. attributes]
membership = membership matrix [k * no. points]
cost = clustering cost, defined as the sum distance of
all points to their respective clusters
gamma = weighing factor that determines relative importance of
num./cat. attributes (see discussion in Huang [1997])
"""
super(KPrototypes, self).__init__(k)
self.gamma = None
def _perform_clustering(self, x, gamma=None, init_method='Huang', max_iters=100, verbose=1):
"""Inputs: xnum = numeric data points [no. points * no. numeric attributes]
xcat = categorical data points [no. points * no. numeric attributes]
gamma = weighing factor that determines relative importance of
num./cat. attributes (see discussion in Huang [1997])
initMethod = initialization method ('Huang' for the one described in
Huang [1998], 'Cao' for the one in Cao et al. [2009])
max_iters = maximum no. of iterations
verbose = 0 for no and 1 for normal algorithm progress information,
2 for internal algorithm details
"""
# convert to numpy arrays, if needed
xnum, xcat = x[0], x[1]
xnum = np.asanyarray(xnum)
xcat = np.asanyarray(xcat)
nnumpoints, nnumattrs = xnum.shape
ncatpoints, ncatattrs = xcat.shape
assert nnumpoints == ncatpoints, "More numerical points than categorical?"
npoints = nnumpoints
assert self.k < npoints, "More clusters than data points?"
self.initMethod = init_method
# estimate a good value for gamma, which determines the weighing of
# categorical values in clusters (see Huang [1997])
if gamma is None:
gamma = 0.5 * np.std(xnum)
self.gamma = gamma
# ----------------------
# INIT
# ----------------------
if verbose:
print("Init: initializing centroids")
# list where [0] = numerical part of centroid and [1] = categorical part
self.init_centroids(xcat)
self.centroids = [np.mean(xnum, axis=0) + np.random.randn(self.k, nnumattrs) *
np.std(xnum, axis=0), self.centroids]
if verbose:
print("Init: initializing clusters")
self.membership = np.zeros((self.k, npoints), dtype='int64')
# keep track of the sum of attribute values per cluster
self._clustAttrSum = np.zeros((self.k, nnumattrs), dtype='float')
# self._clustAttrFreq is a list of lists with dictionaries that contain
# the frequencies of values per cluster and attribute
self._clustAttrFreq = [[defaultdict(int) for _ in range(ncatattrs)] for _ in range(self.k)]
for ipoint in range(npoints):
# initial assigns to clusters
cluster = np.argmin(self.get_dissim_num(self.centroids[0], xnum[ipoint]) +
self.gamma * self.get_dissim(self.centroids[1], xcat[ipoint]))
self.membership[cluster, ipoint] = 1
# count attribute values per cluster
for iattr, curattr in enumerate(xnum[ipoint]):
self._clustAttrSum[cluster, iattr] += curattr
for iattr, curattr in enumerate(xcat[ipoint]):
self._clustAttrFreq[cluster][iattr][curattr] += 1
for ik in range(self.k):
# in case of an empty cluster, reinitialize with a random point
# that is not a centroid
if sum(self.membership[ik, :]) == 0:
while True:
rindex = np.random.randint(npoints)
if not np.all(np.vstack((np.all(xnum[rindex] == self.centroids[0], axis=1),
np.all(xcat[rindex] == self.centroids[1], axis=1))),
axis=0).any():
break
self._add_point_to_cluster(xnum[rindex], xcat[rindex], rindex, ik)
fromcluster = np.argwhere(self.membership[:, rindex])[0][0]
self._remove_point_from_cluster(xnum[rindex], xcat[rindex], rindex, fromcluster)
# perform an initial centroid update
for ik in range(self.k):
for iattr in range(nnumattrs):
# TODO: occasionally "invalid value encountered in double_scalars" in following line
self.centroids[0][ik, iattr] = \
self._clustAttrSum[ik, iattr] / sum(self.membership[ik, :])
for iattr in range(ncatattrs):
self.centroids[1][ik, iattr] = self.get_mode(self._clustAttrFreq[ik][iattr])
# ----------------------
# ITERATION
# ----------------------
if verbose:
print("Starting iterations...")
itr = 0
converged = False
while itr <= max_iters and not converged:
itr += 1
moves = 0
for ipoint in range(npoints):
cluster = np.argmin(self.get_dissim_num(self.centroids[0], xnum[ipoint]) +
self.gamma * self.get_dissim(self.centroids[1], xcat[ipoint]))
# if necessary: move point, and update old/new cluster frequencies and centroids
if not self.membership[cluster, ipoint]:
moves += 1
oldcluster = np.argwhere(self.membership[:, ipoint])[0][0]
self._add_point_to_cluster(xnum[ipoint], xcat[ipoint], ipoint, cluster)
self._remove_point_from_cluster(xnum[ipoint], xcat[ipoint], ipoint, oldcluster)
# update new and old centroids by choosing mean for numerical and
# most likely for categorical attributes
for iattr in range(len(xnum[ipoint])):
for curc in (cluster, oldcluster):
if sum(self.membership[curc, :]):
self.centroids[0][curc, iattr] = \
self._clustAttrSum[curc, iattr] / sum(self.membership[curc, :])
else:
self.centroids[0][curc, iattr] = 0
for iattr in range(len(xcat[ipoint])):
for curc in (cluster, oldcluster):
self.centroids[1][curc, iattr] = \
self.get_mode(self._clustAttrFreq[curc][iattr])
if verbose == 2:
print("Move from cluster {0} to {1}".format(oldcluster, cluster))
# in case of an empty cluster, reinitialize with a random point
# that is not a centroid
if sum(self.membership[oldcluster, :]) == 0:
while True:
rindex = np.random.randint(npoints)
if not np.all(np.vstack((
np.all(xnum[rindex] == self.centroids[0], axis=1),
np.all(xcat[rindex] == self.centroids[1], axis=1))),
axis=0).any():
break
self._add_point_to_cluster(xnum[rindex], xcat[rindex], rindex, oldcluster)
fromcluster = np.argwhere(self.membership[:, rindex])[0][0]
self._remove_point_from_cluster(
xnum[rindex], xcat[rindex], rindex, fromcluster)
# all points seen in this iteration
converged = (moves == 0)
if verbose:
print("Iteration: {0}/{1}, moves: {2}".format(itr, max_iters, moves))
self.calculate_clustering_cost(xnum, xcat)
self.clusters = np.array([np.argwhere(self.membership[:, pt])[0] for pt in range(npoints)])
def _add_point_to_cluster(self, point_num, point_cat, ipoint, cluster):
self.membership[cluster, ipoint] = 1
# update sums of attributes in cluster
for iattr, curattr in enumerate(point_num):
self._clustAttrSum[cluster][iattr] += curattr
# update frequencies of attributes in cluster
for iattr, curattr in enumerate(point_cat):
self._clustAttrFreq[cluster][iattr][curattr] += 1
return
def _remove_point_from_cluster(self, point_num, point_cat, ipoint, cluster):
self.membership[cluster, ipoint] = 0
# update sums of attributes in cluster
for iattr, curattr in enumerate(point_num):
self._clustAttrSum[cluster][iattr] -= curattr
# update frequencies of attributes in cluster
for iattr, curattr in enumerate(point_cat):
self._clustAttrFreq[cluster][iattr][curattr] -= 1
return
@staticmethod
def get_dissim_num(anum, b):
# Euclidean distance
return np.sum((anum - b)**2, axis=1)
def calculate_clustering_cost(self, xnum, xcat):
ncost = 0
ccost = 0
for ipoint, curpoint in enumerate(xnum):
ncost += np.sum(self.get_dissim_num(self.centroids[0], curpoint) *
(self.membership[:, ipoint] ** self.alpha))
for ipoint, curpoint in enumerate(xcat):
ccost += np.sum(self.get_dissim(self.centroids[1], curpoint) *
(self.membership[:, ipoint] ** self.alpha))
self.cost = ncost + self.gamma * ccost
if np.isnan(self.cost):
pass
return
class FuzzyKModes(KModes):
def __init__(self, k, alpha=1.5):
"""Fuzzy k-modes clustering algorithm for categorical data.
Uses traditional, hard centroids, following <NAME>., <NAME>.:
A fuzzy k-modes algorithm for clustering categorical data,
IEEE Transactions on Fuzzy Systems 7(4), 1999.
Inputs: k = number of clusters
alpha = alpha coefficient
Attributes: clusters = cluster numbers with max. membership [no. points]
membership = membership matrix [k * no. points]
centroids = centroids [k * no. attributes]
cost = clustering cost
"""
super(FuzzyKModes, self).__init__(k)
assert alpha > 1, "alpha should be > 1 (alpha = 1 equals regular k-modes)."
self.alpha = alpha
self.omega = None
def _perform_clustering(self, x, init_method='Huang', max_iters=200, tol=1e-5,
cost_inter=1, verbose=1):
"""Inputs: x = data points [no. points * no. attributes]
initMethod = initialization method ('Huang' for the one described in
Huang [1998], 'Cao' for the one in Cao et al. [2009]).
max_iters = maximum no. of iterations
tol = tolerance for termination criterion
cost_inter = frequency with which to check the total cost
(for speeding things up, since it is computationally expensive)
verbose = 0 for no and 1 for normal algorithm progress information,
2 for internal algorithm details
"""
# convert to numpy array, if needed
x = np.asanyarray(x)
npoints, nattrs = x.shape
assert self.k < npoints, "More clusters than data points?"
self.initMethod = init_method
# ----------------------
# INIT
# ----------------------
if verbose:
print("Init: initializing centroids")
self.init_centroids(x)
# store for all attributes which points have a certain attribute value
self._domAttrPoints = [defaultdict(list) for _ in range(nattrs)]
for ipoint, curpoint in enumerate(x):
for iattr, curattr in enumerate(curpoint):
self._domAttrPoints[iattr][curattr].append(ipoint)
# ----------------------
# ITERATION
# ----------------------
if verbose:
print("Starting iterations...")
itr = 0
converged = False
lastcost = np.inf
while itr <= max_iters and not converged:
self.update_membership(x)
self.update_centroids()
# computationally expensive, only check every N steps
if itr % cost_inter == 0:
self.calculate_clustering_cost(x)
converged = self.cost >= lastcost * (1-tol)
lastcost = self.cost
if verbose:
print("Iteration: {0}/{1}, cost: {2}".format(itr, max_iters, self.cost))
itr += 1
self.clusters = np.array([int(np.argmax(self.membership[:, pt])) for pt in range(npoints)])
def update_membership(self, x, threshold=1e-3):
npoints = x.shape[0]
self.membership = np.empty((self.k, npoints))
for ipoint, curpoint in enumerate(x):
dissim = self.get_dissim(self.centroids, curpoint)
if np.any(dissim <= threshold):
self.membership[:, ipoint] = np.where(dissim <= threshold, 1, threshold)
else:
for ik in range(len(self.centroids)):
factor = 1. / (self.alpha - 1)
self.membership[ik, ipoint] = 1 / np.sum((float(dissim[ik]) / dissim)**factor)
return
def update_centroids(self):
self.centroids = np.empty((self.k, len(self._domAttrPoints)))
for ik in range(self.k):
for iattr in range(len(self._domAttrPoints)):
# return attribute that maximizes the sum of the memberships
v = list(self._domAttrPoints[iattr].values())
k = list(self._domAttrPoints[iattr].keys())
memvar = [sum(self.membership[ik, x]**self.alpha) for x in v]
self.centroids[ik, iattr] = k[np.argmax(memvar)]
return
class FuzzyCentroidsKModes(KModes):
def __init__(self, k, alpha=1.5):
"""Fuzzy k-modes clustering algorithm for categorical data.
Uses fuzzy centroids, following and <NAME>., <NAME>., <NAME>.:
Fuzzy clustering of categorical data using fuzzy centroids, Pattern
Recognition Letters 25, 1262-1271, 2004.
Inputs: k = number of clusters
alpha = alpha coefficient
Attributes: clusters = cluster numbers with max. membership [no. points]
membership = membership matrix [k * no. points]
omega = fuzzy centroids [dicts with element values as keys,
element memberships as values, inside lists for
attributes inside list for centroids]
cost = clustering cost
"""
super(FuzzyCentroidsKModes, self).__init__(k)
assert k > 1, "Choose at least 2 clusters."
self.k = k
assert alpha > 1, "alpha should be > 1 (alpha = 1 equals regular k-modes)."
self.alpha = alpha
def _perform_clustering(self, x, max_iters=100, tol=1e-5, cost_inter=1, verbose=1):
"""Inputs: x = data points [no. points * no. attributes]
max_iters = maximum no. of iterations
tol = tolerance for termination criterion
cost_inter = frequency with which to check the total cost
(for speeding things up, since it is computationally expensive)
verbose = 0 for no and 1 for normal algorithm progress information,
2 for internal algorithm details
"""
# convert to numpy array, if needed
x = np.asanyarray(x)
npoints, nattrs = x.shape
assert self.k < npoints, "More clusters than data points?"
# ----------------------
# INIT
# ----------------------
if verbose:
print("Init: initializing centroids")
# count all attributes
freqattrs = [defaultdict(int) for _ in range(nattrs)]
for curpoint in x:
for iattr, curattr in enumerate(curpoint):
freqattrs[iattr][curattr] += 1
# omega = fuzzy set (as dict) for each attribute per cluster
self.omega = [[{} for _ in range(nattrs)] for _ in range(self.k)]
for ik in range(self.k):
for iattr in range(nattrs):
# a bit unclear form the paper, but this is how they do it in their code
# give a random attribute 1.0 membership and the rest 0.0
randint = np.random.randint(len(freqattrs[iattr]))
for iVal, curVal in enumerate(freqattrs[iattr]):
self.omega[ik][iattr][curVal] = float(iVal == randint)
# ----------------------
# ITERATION
# ----------------------
if verbose:
print("Starting iterations...")
itr = 0
converged = False
lastcost = np.inf
while itr <= max_iters and not converged:
# O(k*N*at*no. of unique values)
self.update_membership(x)
# O(k*N*at)
self.update_centroids(x)
# computationally expensive, only check every N steps
if itr % cost_inter == 0:
self.calculate_clustering_cost(x)
converged = self.cost >= lastcost * (1-tol)
lastcost = self.cost
if verbose:
print("Iteration: {0}/{1}, cost: {2}".format(itr, max_iters, self.cost))
itr += 1
self.clusters = np.array([int(np.argmax(self.membership[:, pt])) for pt in range(npoints)])
def update_membership(self, x, threshold=1e-3):
# Eq. 20 from Kim et al. [2004]
npoints = x.shape[0]
self.membership = np.empty((self.k, npoints))
for ipoint, curpoint in enumerate(x):
dissim = self.get_fuzzy_dissim(curpoint)
if np.any(dissim <= threshold):
self.membership[:, ipoint] = np.where(dissim <= threshold, 1, threshold)
else:
# NOTE: squaring the distances is not mentioned in the paper, but it is
# in the code of Kim et al.; seems to improve performance
dissim **= 2
for ik in range(len(self.omega)):
factor = 1. / (self.alpha - 1)
self.membership[ik, ipoint] = 1 / np.sum((float(dissim[ik]) / dissim)**factor)
return
def update_centroids(self, x):
self.omega = [[defaultdict(float) for _ in range(x.shape[1])] for _ in range(self.k)]
for ik in range(self.k):
for iattr in range(x.shape[1]):
for ipoint, curpoint in enumerate(x[:, iattr]):
self.omega[ik][iattr][curpoint] += self.membership[ik, ipoint] ** self.alpha
# normalize so that sum omegas is 1, analogous to k-means
# (see e.g. Yang et al. [2008] who explain better than the original paper)
sumomg = sum(self.omega[ik][iattr].values())
for key in self.omega[ik][iattr].keys():
self.omega[ik][iattr][key] /= sumomg
return
def get_fuzzy_dissim(self, x):
# TODO: slow, could it be faster?
# dissimilarity = sums of all omegas for non-matching attributes
# see Eqs. 13-15 of Kim et al. [2004]
dissim = np.zeros(len(self.omega))
for ik in range(len(self.omega)):
for iattr, curattr in enumerate(self.omega[ik]):
nonmatch = [v for k, v in curattr.items() if k != x[iattr]]
# dissim[ik] += sum(nonmatch)
# following the code of Kim et al., seems to work better
dissim[ik] += sum(nonmatch) / np.sqrt(np.sum(np.array(list(curattr.values())) ** 2))
return dissim
def calculate_clustering_cost(self, x):
self.cost = 0
for ipoint, curpoint in enumerate(x):
self.cost += np.sum(self.get_fuzzy_dissim(curpoint) *
(self.membership[:, ipoint] ** self.alpha))
return
def soybean_test():
# reproduce results on small soybean data set
x = np.genfromtxt('./soybean.csv', dtype=int, delimiter=',')[:, :-1]
y = np.genfromtxt('./soybean.csv', dtype=str, delimiter=',', usecols=35)
# drop columns with single value
x = x[:, np.std(x, axis=0) > 0.]
kmodes_huang = KModes(4)
kmodes_huang.cluster(x, init_method='Huang')
kmodes_cao = KModes(4)
kmodes_cao.cluster(x, init_method='Cao')
kproto = KPrototypes(4)
kproto.cluster([np.random.randn(x.shape[0], 3), x], init_method='Huang')
fkmodes = FuzzyKModes(4, alpha=1.1)
fkmodes.cluster(x)
ffkmodes = FuzzyCentroidsKModes(4, alpha=1.8)
ffkmodes.cluster(x)
for result in (kmodes_huang, kmodes_cao, kproto, fkmodes, ffkmodes):
classtable = np.zeros((4, 4), dtype=int)
for ii, _ in enumerate(y):
classtable[int(y[ii][-1])-1, result.clusters[ii]] += 1
print("\n")
print(" | Cl. 1 | Cl. 2 | Cl. 3 | Cl. 4 |")
print("----|-------|-------|-------|-------|")
for ii in range(4):
prargs = tuple([ii+1] + list(classtable[ii, :]))
print(" D{0} | {1:>2} | {2:>2} | {3:>2} | {4:>2} |".format(*prargs))
if __name__ == "__main__":
soybean_test()
| StarcoderdataPython |
1690779 | <reponame>PFedak/DAKompiler<filename>Cform.py
from symbolify import InstrResult as IR, Context
import basicTypes
import algebra
indent = ' '*4
def cformat(exp, spec):
if exp.op == '.' and isinstance(exp.args[0].type, basicTypes.Pointer):
return '{}->{}'.format(exp.args[0], exp.args[1])
def dummy(*args):
return str(args)
def renderReg(state):
return '{0.name} = {0.value};'.format(state) if state.explicit else None
def renderFunc(title, args, val):
return ('{0} = {1}({2});' if val.type != basicTypes.bad else '{1}({2});').format(val, title,
', '.join('{}'.format(v) for r,v in args))
def renderWrite(value, target):
return '{} = {};'.format(target, value)
def renderReturn(value = None):
return 'return {};'.format(value) if value else None
renderList = {
IR.register : renderReg,
IR.write : renderWrite,
IR.function : renderFunc,
IR.end : renderReturn,
IR.unhandled: lambda x:'unhandled opcode: {}'.format(x)
}
def renderFunctionToC(name, codeTree, history, booleans):
algebra.Expression.specialFormat = cformat
text = ['void {}({}){{'.format(name,', '.join(history.states[arg][0].value.name for arg in history.argList))]
text.extend(renderToC(codeTree,booleans,1))
text.append('}')
return text
def renderToC(codeTree, booleans, level = 0):
text = []
for line in codeTree.code:
result = renderList[line[0]](*line[1:])
if result:
text.append((indent*level)+result)
previousWasShown = False
for block in codeTree.children:
if block.relative.isTrivial():
newLevel = level
prefix = None
else:
newLevel = level + 1
if block.elseRelative and previousWasShown:
keyword = '{}else {{' if block.elseRelative.isTrivial() else '{}else if ({}) {{'
toShow = block.elseRelative
else:
keyword = '{}if ({}) {{'
toShow = block.relative
prefix = keyword.format(indent*level,
' || '.join(
' && '.join(
('{}' if val else '{:!}').format(booleans[ch]) for ch, val in br.items()
)
for br in toShow.cnf)
)
inner = renderToC(block, booleans, newLevel)
if inner:
previousWasShown = True
if prefix:
text.append(prefix)
text.extend(inner)
if prefix:
text.append('{}}}'.format(indent*level))
else:
previousWasShown = False
return text
| StarcoderdataPython |
1623019 | from random import random, shuffle
from copy import deepcopy
from math import sqrt
from utils.func import get_k_idx
class GAMutation(object):
"""
Class for providing an interface to easily extend the behavior of mutation operation.
"""
def __init__(self, mutation_rate=0.02):
self.mutation_rate = mutation_rate
def mutate(self, individual):
"""
Called when an individual to be mutated.
"""
raise NotImplementedError
class FlipBitMutation(GAMutation):
def mutate(self, individual):
num_genes = len(individual.genes)
mutator_t = self.mutation_rate * num_genes
mutator_i = 0
while mutator_i < mutator_t:
idx1, idx2 = get_k_idx(0, max_val=num_genes)
individual.swap(idx1, idx2)
mutator_i += 4
class InverseMutation(GAMutation):
def mutate(self, individual):
num_genes = len(individual.genes)
mutator_t = self.mutation_rate * num_genes
mutator_i = 0
while mutator_i < mutator_t:
start_at, finish_at = get_k_idx(0, num_genes, k=2, sort=True)
while start_at < finish_at:
t = individual.genes[start_at]
individual.genes[start_at] = individual.genes[finish_at]
individual.genes[finish_at] = t
start_at += 1
finish_at -= 1
mutator_i += 2
class FlipInverseMutation(GAMutation):
def mutate(self, individual):
num_genes = len(individual.genes)
mutator_t = self.mutation_rate * num_genes
mutator_i = 0
while mutator_i < mutator_t:
if random() < 0.5:
idx1, idx2 = get_k_idx(0, max_val=num_genes)
individual.swap(idx1, idx2)
mutator_i += 4
else:
start_at, finish_at = get_k_idx(0, num_genes, k=2, sort=True)
while start_at < finish_at:
t = individual.genes[start_at]
individual.genes[start_at] = individual.genes[finish_at]
individual.genes[finish_at] = t
start_at += 1
finish_at -= 1
mutator_i += 2
class WarmUpFlipInverseMutation(GAMutation):
def __init__(self, mutation_rate, warm_up=800, base=5):
super().__init__(mutation_rate)
self.warm_up = warm_up
self.step = 1
self.base = base
def adjust_mutation_rate(self):
i = self.step
warm_up = self.warm_up
base = self.base
self.mutation_rate = base * min(1. / sqrt(i), i / (warm_up * sqrt(warm_up)))
def mutate(self, individual):
num_genes = len(individual.genes)
mutator_t = self.mutation_rate * num_genes
mutator_i = 0
while mutator_i < mutator_t:
if random() < 0.5:
idx1, idx2 = get_k_idx(0, max_val=num_genes)
individual.swap(idx1, idx2)
mutator_i += 4
else:
start_at, finish_at = get_k_idx(0, num_genes, k=2, sort=True)
while start_at < finish_at:
t = individual.genes[start_at]
individual.genes[start_at] = individual.genes[finish_at]
individual.genes[finish_at] = t
start_at += 1
finish_at -= 1
mutator_i += 2
self.adjust_mutation_rate()
self.step += 1
class InsertMutation(GAMutation):
def mutate(self, individual):
num_genes = len(individual.genes)
mutator_t = self.mutation_rate * num_genes
mutator_i = 0
while mutator_i < mutator_t:
start_at, finish_at = get_k_idx(0, num_genes, k=2, sort=True)
fraction_1 = [g for g in individual.genes[:start_at + 1]]
fraction_2 = [individual.genes[finish_at]]
fraction_3 = [g for g in individual.genes[start_at + 1:finish_at]]
fraction_4 = [g for g in individual.genes[finish_at + 1:]]
cnt = 0
for g in fraction_1:
individual.genes[cnt] = g
cnt += 1
for g in fraction_2:
individual.genes[cnt] = g
cnt += 1
for g in fraction_3:
individual.genes[cnt] = g
cnt += 1
for g in fraction_4:
individual.genes[cnt] = g
cnt += 1
mutator_i += 2
class ScrambleMutation(GAMutation):
def mutate(self, individual):
num_genes = len(individual.genes)
mutator_t = self.mutation_rate * num_genes
mutator_i = 0
while mutator_i < mutator_t:
start_at, finish_at = get_k_idx(0, num_genes, k=2, sort=True)
rand_idx = [i for i in range(start_at, finish_at + 1)]
shuffle(rand_idx)
old_genes = [g for g in individual.genes[start_at:finish_at]]
cnt = 0
for i in range(start_at, finish_at):
individual.genes[i] = old_genes[cnt]
cnt += 1
mutator_i += 2
name2mutation = {
'FlipBitMutation': FlipBitMutation,
'InverseMutation': InverseMutation,
'FlipInverseMutation': FlipInverseMutation,
'WarmUpFlipInverseMutation': WarmUpFlipInverseMutation,
'InsertMutation': InsertMutation,
'ScrambleMutation': ScrambleMutation,
}
def get_mutation(args):
mutation_type = args.mutation_type
if mutation_type not in name2mutation:
raise ValueError('Only support mutation type: %s' ','.join(list(name2mutation.keys())))
print('Using Mutation: %s' % mutation_type)
Mutation = name2mutation[mutation_type]
if mutation_type == 'WarmUpFlipInverseMutation':
return Mutation(mutation_rate=args.mr, warm_up=args.m_warm_up, base=args.m_base)
return Mutation(args.mr)
| StarcoderdataPython |
3266109 | #!/usr/bin/env python
"""
createCopyNumberStatsFacetedPlot.py
5 April 2011
dent earl dearl (a) soe ucsc edu
used in the assemblathon report project to
create a plot of excess, deficient and total copy bases
from a single copy stats xml file.
"""
##############################
# Copyright (C) 2009-2011 by
# <NAME> (<EMAIL>, <EMAIL>)
# <NAME> (<EMAIL>, <EMAIL>)
# <NAME> (<EMAIL>)
# ... and other members of the Reconstruction Team of David Haussler's
# lab (BME Dept. UCSC).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##############################
import glob
import libAssemblySubset as las
import libGeneral as lgn
from libMafGffPlot import Data
import libPlotting as lpt
import matplotlib.lines as lines
import matplotlib.patches as patches
import matplotlib.pylab as pylab
from matplotlib.ticker import MultipleLocator, FormatStrFormatter, LogLocator, LogFormatter # minor tick marks
import numpy
from optparse import OptionParser
import os
import sys
import xml.etree.ElementTree as ET
import xml.parsers.expat as expat # exception handling for empty xml
class CopyNumberStat:
def __init__( self ):
self.name = ''
self.defUpper = -1.0
self.defLower = -1.0
self.excUpper = -1.0
self.excLower = -1.0
self.sumUpper = -1.0
self.sumLower = -1.0
def initOptions( parser ):
parser.add_option( '--dir', dest='dir',
type='string',
help=('Location of all upper (_0.xml) and lower (_1000.xml) files.'))
parser.add_option( '--title', dest='title',
type='string', default='Copy Statistics',
help='Title placed at the top of the plot. default=%default' )
parser.add_option( '--log', dest='log', default=False, action='store_true',
help='Turns on log scale y axes. default=%default')
parser.add_option( '--outputRanks', dest='outputRanks', default=False, action='store_true',
help='Prints out rankings in tab delimited format. default=%default')
parser.add_option( '--markers', dest='markers', default=False, action='store_true',
help='Turns on filled markers for lower values, open markers for uppers. default=%default')
def checkOptions( args, options, parser ):
if len( args ) > 0:
parser.error('unanticipated arguments: %s.\n' % args )
if options.dir is None:
parser.error( 'specify --dir\n' )
if not os.path.exists( options.dir ):
parser.error('--dir %s does not exist!\n' % ( options.dir ))
if not os.path.isdir( options.dir ):
parser.error('--dir %s is not a directory!\n' % ( options.dir ))
options.dir = os.path.abspath( options.dir )
def establishAxes( fig, options, data ):
axDict = {}
options.axLeft = 0.09
options.axRight = 0.97
options.axWidth = options.axRight - options.axLeft
options.axBottom = 0.08
options.axTop = 0.96
options.axHeight = options.axTop - options.axBottom
margin = 0.11
facetHeight = ( options.axHeight - 2.0 * margin) / 3.0
yPos = 0.0
for ax in [ 'def', 'exc', 'sum' ]:
axDict[ ax ] = fig.add_axes( [ options.axLeft, options.axBottom + yPos,
options.axWidth, facetHeight ] )
axDict[ ax ].yaxis.set_major_locator( pylab.NullLocator() )
axDict[ ax ].xaxis.set_major_locator( pylab.NullLocator() )
yPos += facetHeight + margin
#plt.box( on=False )
for ax in axDict:
for loc, spine in axDict[ ax ].spines.iteritems():
if loc in ['left', 'bottom']:
spine.set_position(('outward',10)) # outward by 10 points
elif loc in ['right','top']:
spine.set_color('none') # don't draw spine
else:
raise ValueError('unknown spine location: %s' % loc )
# turn off ticks where there is no spine
axDict[ ax ].xaxis.set_ticks_position('bottom')
if options.log:
axDict[ ax ].yaxis.set_ticks_position('both')
else:
axDict[ ax ].yaxis.set_ticks_position('left')
data.axDict = axDict
return ( axDict )
def drawLegend( options, data ):
pass
def drawAxisLabels( axDict, cDict, options, data ):
pass
def setAxisLimits( axDict, options, data ):
pass
def value( s, v ):
""" returns the correct attribute of s by v=string
"""
if v == 'sumUpper':
return s.sumUpper
elif v == 'sumLower':
return s.sumLower
elif v == 'excUpper':
return s.excUpper
elif v == 'excLower':
return s.excLower
elif v == 'defUpper':
return s.defUpper
elif v == 'defLower':
return s.defLower
else:
sys.stderr.write('Unrecognized value: %s\n' % v)
sys.exit( 1 )
def drawData( axDict, sList, options, data ):
lGray = ( 0.8, 0.8, 0.8 )
for ax in [ 'sum', 'exc', 'def' ]:
xNames = []
yMax = 0
yMin = sys.maxint
if options.log:
axDict[ ax ].set_yscale('log')
for s in sList:
if yMax < float( value(s, '%sUpper'%ax) ):
yMax = float( value(s, '%sUpper'%ax) )
if yMin > float( value(s, '%sLower'%ax) ):
yMin = float( value(s, '%sLower'%ax) )
for i in xrange( 1, len( sList ) + 1 ):
if not i % 5:
axDict[ ax ].add_line( lines.Line2D( xdata=[ i, i ],
ydata=[ yMin, yMax * 1.1 ],
color=lGray,
linestyle='dotted'))
i=0
for s in sList:
i += 1
if options.markers:
axDict[ ax ].add_line( lines.Line2D( xdata=[ i ],
ydata=[ value(s, '%sLower' % ax) ],
marker='o',
markerfacecolor='#1f77b4',
markeredgecolor='#1f77b4',
markersize=4.0))
axDict[ ax ].add_line( lines.Line2D( xdata=[ i ],
ydata=[ value(s, '%sUpper' % ax) ],
markeredgecolor='#1f77b4',
marker='o',
markerfacecolor='none',
markersize=4.0))
axDict[ ax ].add_line( lines.Line2D( xdata=[ i, i ],
ydata=[ value(s, '%sLower' % ax), value(s, '%sUpper' % ax) ],
color='#1f77b4', linewidth=4.0, solid_capstyle='round'))
if options.subsetFile:
xNames.append( lgn.idMap[ s.name[0] ] )
else:
xNames.append( lgn.idMap[ s.name[0] ] + '.'+s.name[1:] )
axDict[ ax ].set_xlim( 0, len( xNames ) + 1 )
if ax != 'exc':
axDict[ ax ].set_xticks( range( 1, len(xNames) + 1 ))
axDict[ ax ].set_xticklabels( xNames )
for tick in axDict[ ax ].xaxis.get_major_ticks():
if options.subsetFile:
tick.label1.set_fontsize( 12 )
else:
tick.label1.set_fontsize( 6 )
for label in axDict[ ax ].xaxis.get_ticklabels():
label.set_rotation( 45 )
else:
axDict[ ax ].set_xticks( range( 1, len(xNames) + 1 ))
axDict[ ax ].set_xticklabels( [] )
axDict[ ax ].set_ylim( [ yMin * 0.9, yMax * 1.1] )
# grid
mts = axDict[ax].yaxis.get_majorticklocs()
for m in mts:
axDict[ax].add_line( lines.Line2D( xdata=[ 1, len( sList ) ],
ydata=[ m, m ],
linewidth=1,
color=lGray,
linestyle='dotted'))
axDict['sum'].set_title('Sum of Proportional Copy Errors')
axDict['exc'].set_title('Proportional Excess Copy Errors')
axDict['def'].set_title('Proportional Deficient Copy Errors')
def readFiles( options ):
ups = glob.glob( os.path.join( options.dir, '*_0.xml'))
los = glob.glob( os.path.join( options.dir, '*_1000.xml'))
stats = {}
for u in ups:
c = CopyNumberStat()
c.name = os.path.basename( u ).split('.')[0]
if options.subsetFile:
if c.name not in options.assemblySubset:
continue
try:
xmlTree = ET.parse( u )
except expat.ExpatError: # empty xml file
continue
root=xmlTree.getroot()
elm = root.find( 'excessCopyNumberCounts' )
c.excUpper = float( elm.attrib['totalProportionOfColumns'] )
elm = root.find( 'deficientCopyNumberCounts' )
c.defUpper = float( elm.attrib['totalProportionOfColumns'] )
c.sumUpper = c.excUpper + c.defUpper
stats[ c.name ] = c
for l in los:
name = os.path.basename( l ).split('.')[0]
if name not in stats:
continue
try:
xmlTree = ET.parse( l )
except expat.ExpatError: # empty xml file
continue
root=xmlTree.getroot()
elm = root.find( 'excessCopyNumberCounts' )
stats[name].excLower = float( elm.attrib['totalProportionOfColumns'] )
elm = root.find( 'deficientCopyNumberCounts' )
stats[name].defLower = float( elm.attrib['totalProportionOfColumns'] )
stats[name].sumLower = stats[name].excLower + stats[name].defLower
validStats = {}
for s in stats:
if stats[s].excLower == -1.0 or stats[s].defLower == -1 or stats[s].sumLower == -1.0:
continue
validStats[s] = stats[s]
return validStats
def rankings( sortedOrder, options, data ):
print ('#Assembly\tSum Errors Lower\tSum Errors Upper\t'
'Excess Errors Lower\tExcess Errors Upper\t'
'Deficient Errors Lower\tDeficient Errors Upper')
for s in sortedOrder:
sys.stdout.write('%s' % s.name )
for v in [ s.sumLower, s.sumUpper, s.excLower, s.excUpper,
s.defLower, s.defUpper, ]:
sys.stdout.write('\t%s' % v )
sys.stdout.write('\n')
def main():
usage = ( 'usage: %prog [options] --dir=path/to/dir/\n\n'
'%prog takes in a copy statistics file\n'
'and creates an image file.' )
data = Data()
parser = OptionParser( usage=usage )
initOptions( parser )
las.initOptions( parser )
lpt.initOptions( parser )
options, args = parser.parse_args()
checkOptions( args, options, parser )
las.checkOptions( options, parser )
lpt.checkOptions( options, parser )
if not options.outputRanks:
fig, pdf = lpt.initImage( 8.0, 10.0, options, data )
stats = readFiles( options )
sortedOrder = sorted( stats.values(), key=lambda x: x.sumLower, reverse=False )
if options.outputRanks:
rankings( sortedOrder, options, data )
return
axDict = establishAxes( fig, options, data )
drawData( axDict, sortedOrder, options, data )
drawLegend( options, data )
drawAxisLabels( axDict, stats, options, data )
setAxisLimits( axDict, options, data )
lpt.writeImage( fig, pdf, options )
if __name__ == '__main__':
main()
| StarcoderdataPython |
3382974 | from collections import defaultdict
import csv
import json
import sys
def load_csv(file):
with open(file) as f:
yield from csv.DictReader(f)
def json_schema(schema_path):
fields = load_csv(schema_path + "field.csv")
enums = defaultdict(list)
for enum in load_csv(schema_path + "enum.csv"):
enums[enum["field"]].append(enum["value"])
output_fields = []
for field in fields:
if field["datatype"] == "uri":
type_ = "string"
format_ = "uri"
else:
type_ = field["datatype"]
format_ = None
new_row = {
"name": field["field"],
"title": field["name"],
"description": field["description"],
"type": type_,
}
if format_:
new_row["format"] = format_
if field["field"] in enums:
new_row["constraints"] = {"enum": enums[field["field"]]}
output_fields.append(new_row)
json.dump({"fields": output_fields}, sys.stdout)
| StarcoderdataPython |
170309 | <gh_stars>1-10
# Use dictionary
# 1-) Herhangi bir düğümü seçin, bitişikteki ziyaret edilmemiş köşeyi ziyaret edin,
# ziyaret edildi olarak işaretleyin, görüntüleyin ve bir sıraya ekleyin.
# 2-) Kalan bitişik tepe noktası yoksa, ilk tepe noktasını kuyruktan çıkarın
# 3-) Sıra boşalana veya istenen düğüm bulunana kadar 1. ve 2. adımları tekrarlayın.
# Time Complexity
# Since all of the nodes and vertices are visited, the time complexity for BFS on a graph is O(V + E);
# where V is the number of vertices and E is the number of edges.
graph = {
'A': ['B', 'C'],
'B': ['D', 'E'],
'C': ['F'],
'D': [],
'E': ['F'],
'F': []
}
visited = [] # List to keep track of visited nodes.
queue = [] # Initialize a queue
def bfs(node):
visited.append(node)
queue.append(node)
while queue:
s = queue.pop(0)
print(s, end=" ")
for neighbour in graph[s]:
if neighbour not in visited:
visited.append(neighbour)
queue.append(neighbour)
# Driver Code
bfs("d".upper())
| StarcoderdataPython |
1618572 | '''
Created on Apr 24, 2013
@author: Chris
'''
import group, unit, trait, hexcrawl.player_type as player_type
import random
HORDE_RANGE = 3
HORDE_AGGRESSION = 3
HORDE_DELAY_BY_LEVEL = [3, 8, 14, 20, 24, 28, 32, 38]
horde_types_by_name = {}
class HordeType(object):
def __init__(self, name, leader_name, level, unit_name_list):
self.name = name
self.leader_name = leader_name
self.level = level
self.unit_type_names = unit_name_list
def get_goal_site_name(self):
return self.goal_name
def horde_spawnable(level, turn):
return turn.week >= HORDE_DELAY_BY_LEVEL[level - 1]
# certain site types have chance to throw off a horde
class Horde(group.ActiveAIGroup):
def __init__(self, site, game_map, actors):
self.site = site
owner = site.get_owner()
level = site.get_level()
super(Horde, self).__init__(owner, HORDE_RANGE, HORDE_AGGRESSION, owner.get_goal_funcs(),
constraint_funcs = player_type.horde_constraints)
self.type = random.choice([horde_type for horde_type in horde_types_by_name.values() if horde_type.level == level])
self.update_goal(game_map, actors)
# self.range = HORDE_RANGE
self.leader = unit.Unit(unit.unit_types_by_name[self.type.leader_name])
self.leader.set_trait(trait.MOUNTAINEER, True)
self.add_unit(self.leader)
self.add_unit_packet()
self.reputation_value = self.get_level() * 2
# self.set_hex(site.hex_loc)
# site.hex_loc.add_group(self)
def add_unit_packet(self):
prev_leader_index = self.num_units() - 1
for unit_type_name in self.type.unit_type_names:
if not self.is_full():
new_unit = unit.Unit(unit.unit_types_by_name[unit_type_name])
new_unit.set_trait(trait.MOUNTAINEER, True)
self.add_unit(new_unit)
new_leader_index = self.num_units() - 1
# keep leader at end of line
self.move_unit(prev_leader_index, new_leader_index)
def get_center_hex(self):
return self.curr_hex
def has_goal(self):
return self.goal.is_active()
def get_site(self):
return self.site
def get_goal(self):
return self.goal.hex_loc
def update_goal(self, game_map, actors):
self.goal = random.choice(game_map.get_sites(filter_func = lambda curr_site : curr_site.get_owner() in actors))
def get_target_player(self):
return self.goal.get_owner()
def get_level(self):
return self.type.level
def start_turn(self, turn_number, hex_map):
super(Horde, self).start_turn(turn_number, hex_map)
self.clear_dead_units()
# as long as leader is alive, new units flock to the horde each turn
if self.leader.is_alive():
self.add_unit_packet()
| StarcoderdataPython |
3268624 | # -- coding:utf-8--
def hi(name,age,sex = "无性别的"):
print("hello!" + name)
print("你今年" + age + "岁了")
print("你是个" + sex + "孩子")
hi("宫逸君","15","女")
hi(age = "150" , name = '汪文博', sex = "狼")#先列出没有预设的
hi("汪文博","233333")
| StarcoderdataPython |
127214 | import _curses
from yacui import View
from fatjira import IncrementalSearch
from fatjira.views import IssueView
def extract_issue(issue):
"Experimental issue extractor"
f = issue['fields']
parts = [
issue['key'],
"k=" + issue['key'],
f['summary'] or "",
f['description'] or "",
"@" + f['assignee']['name'] if f['assignee'] else "none",
"rep=" + f['reporter']['name'] if f['reporter'] else "none",
"st=" + f['status']['name'].upper().replace(" ", ""),
"t=" + f['issuetype']['name'],
# Components?
]
return " ".join(parts)
class SearchView(View):
"""
Main application view
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Index is not "sticky" when selection changes.
self.selected_idx = 0
self.query = ""
self.results = []
# TEMP
with self.app.debug.time("Load all issues"):
self.all_issues = self.app.jira.all_cached_issues()
with self.app.debug.time("Initiate search"):
self.search = IncrementalSearch(self.all_issues, extract_issue)
def _update_search_state(self):
self.search.search(self.query)
self.results = self.search.get_results()
if self.selected_idx > len(self.results):
self.selected_idx = len(self.results)
self.app.debug.log("cached queries:" + repr(list(self.search.cache.keys())))
def redraw(self, wnd: _curses.window):
"Refresh the view display"
lines, cols = wnd.getmaxyx()
wnd.erase()
msg = "Incremental search: " + self.query
wnd.addstr(0, 0, msg)
cursor_position = len(msg)
self._update_search_state()
msg = "{}/{}".format(len(self.results), len(self.all_issues))
wnd.addstr(0, cols - len(msg), msg)
if self.selected_idx >= len(self.results):
self.selected_idx = max(0, len(self.results) - 1)
line = 1
max_summary = cols - 10 - 5
for i, result in enumerate(self.results):
if i == self.selected_idx:
th_key = self.app.theme.ISSUE_KEY_SELECTED
th_summary = self.app.theme.ISSUE_SUMMARY_SELECTED
else:
th_key = self.app.theme.ISSUE_KEY
th_summary = self.app.theme.ISSUE_SUMMARY
# TODO: Unified table generator
j = result['fields']
summary = j["summary"]
if len(summary) > max_summary:
summary = summary[:max_summary] + "…"
summary += " " * (cols - len(summary) - 16)
msg = f"{result['key']:10s} {summary}"
wnd.addstr(line, 0, "{:15}".format(result['key']), th_key)
wnd.addstr(line, 15, summary[:cols - 15 - 1], th_summary)
line += 1
if line == lines:
break
wnd.move(0, cursor_position)
def on_enter(self):
self.app.bindings.push()
self.app.bindings.register("M-q", "Back", self.app.display.back)
self.app.bindings.register("RET", "Select", self.action_select)
self.app.bindings.register(["C-n", "DOWN"], "Next", self.action_next)
self.app.bindings.register(["C-p", "UP"], "Previous", self.action_prev)
self.app.bindings.add_hint("Type to search incrementally")
self.app.bindings.add_hint("@assignee, rep=reporter, st=status")
self.app.bindings.add_hint("k=key t=type")
msg = "You are " + ("online" if self.app.jira.is_connected() else "offline")
self.app.bindings.add_hint(msg)
self.app.console.set_cursor(True)
def on_leave(self):
self.app.bindings.pop()
self.app.console.set_cursor(False)
def action_select(self):
# Get selected issue
if not self.results:
self.app.display.status("No issue selected.")
return
try:
result = self.results[self.selected_idx]
except IndexError:
self.selected_idx = 0
self.app.display.redraw()
return
key = result['key']
view = IssueView(self.app, key)
self.app.display.navigate(view)
def action_next(self):
self.selected_idx += 1
self.app.display.redraw()
def action_prev(self):
self.selected_idx = max(0, self.selected_idx - 1)
self.app.display.redraw()
def keypress(self, key):
# TODO: Move to shared editor
if key == "BACKSPACE":
self.query = self.query[:-1]
elif key == "M-BACKSPACE":
# Delete last word.
pos = self.query.rstrip().rfind(" ")
if pos == -1:
pos = 0
else:
pos += 1
self.query = self.query[:pos]
elif len(key) == 1:
self.query += key
else:
self.app.debug.log(f"Unhandled key: {key}")
self.app.display.redraw()
| StarcoderdataPython |
1784235 | import os
from mkultra.soft_prompt import SoftPrompt
import torch
import json
def test_json(inference_resources):
model, tokenizer = inference_resources
# Arrange
sp_a = SoftPrompt.from_string(" a b c d e f g", model=model, tokenizer=tokenizer)
# Act
sp_str = sp_a.to_json()
sp_b = SoftPrompt.from_json(sp_str)
# Assert
assert torch.equal(sp_a._tensor, sp_b._tensor)
assert sp_a._metadata['name'] == sp_b._metadata['name']
assert sp_a._metadata['description'] == sp_b._metadata['description']
def test_json_embedding(inference_resources):
model, tokenizer = inference_resources
# Arrange
sp_a = SoftPrompt.from_string(" a b c d e f g", model=model, tokenizer=tokenizer)
# Act
with open("TEST.json", mode='w') as file:
sp_str = sp_a.to_json()
sp_dict = json.dump(
{ 'additional_data' : "The quick brown fox jumps over the lazy dog",
'sp' : sp_str }, file)
with open("TEST.json", mode='r') as file:
sp_dict = json.load(file)
sp_b = SoftPrompt.from_json(sp_dict['sp'])
# Assert
assert torch.equal(sp_a._tensor, sp_b._tensor)
assert sp_a._metadata['name'] == sp_b._metadata['name']
assert sp_a._metadata['description'] == sp_b._metadata['description']
# Teardown
os.remove("TEST.json")
def test_file_io(inference_resources):
model, tokenizer = inference_resources
# Arrange
sp_a = SoftPrompt.from_string(" a b c d e f g", model=model, tokenizer=tokenizer)
# Act
sp_a.to_file("TEST.json")
sp_b = SoftPrompt.from_file("TEST.json")
# Assert
assert torch.equal(sp_a._tensor, sp_b._tensor)
assert sp_a._metadata['name'] == sp_b._metadata['name']
assert sp_a._metadata['description'] == sp_b._metadata['description']
# Teardown
os.remove("TEST.json")
def test_file_input_only(inference_resources):
model, tokenizer = inference_resources
# How to recreate the test file:
#sp = SoftPrompt.from_string("TEST", model, tokenizer)
#sp.to_file("sample_sps/testing/iotest.json")
# Arrange
exp_string = "TEST"
exp_tensor = model.get_input_embeddings()(tokenizer(exp_string, return_tensors="pt").input_ids)
# Act
sp_a = SoftPrompt.from_file("sample_sps/testing/iotest.json")
# Assert
assert torch.equal(sp_a._tensor, exp_tensor) | StarcoderdataPython |
4828005 | <reponame>mil-ad/stui<filename>stui/views/nodes.py
import urwid
import stui.widgets as widgets
class NodesTab(object):
def __init__(self, cluster):
super().__init__()
self.cluster = cluster
w = urwid.Text("Nodes: Under Construction ...")
w = urwid.Filler(w)
self.view = widgets.FancyLineBox(w, "Nodes")
| StarcoderdataPython |
89700 | <gh_stars>1-10
#coding:utf-8
#
# id: bugs.core_6144
# title: Inconsistent behaviour of the NEW context variable in AFTER UPDATE OR DELETE triggers
# decription:
# Confirmed problem on: 4.0.0.1607, 3.0.5.33171 and 2.5.9.27143: new.v was assigned to 1 in AD trigger.
# Checked on:
# build 4.0.0.1614: OK, 1.472s.
# build 3.0.5.33172: OK, 0.802s.
# build 2.5.9.27144: OK, 0.376s.
#
# tracker_id: CORE-6144
# min_versions: ['2.5.9']
# versions: 2.5.9
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5.9
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
create table test (id integer not null primary key);
commit;
insert into test (id) values (1);
commit;
alter table test add v integer default 1 not null;
commit;
insert into test (id) values (2);
commit;
create exception exc_not_null_in_AD_trigger 'new.v is NOT null in AD trigger ?!';
commit;
set term ^;
create or alter trigger test_null for test after update or delete as
begin
if (new.v is not null) then -- new.v should be NULL if the trigger runs after DELETE statement
begin
rdb$set_context('USER_SESSION', 'AD_TRIGGER_NEW_V', new.v);
exception exc_not_null_in_AD_trigger;
end
end
^
set term ;^
commit;
delete from test where id = 2; -- no errors
delete from test where id = 1; -- trigger throws exception
set list on;
select rdb$get_context('USER_SESSION', 'AD_TRIGGER_NEW_V') as "new_v value in AD trigger:"
from rdb$database;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
new_v value in AD trigger: <null>
"""
@pytest.mark.version('>=2.5.9')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
| StarcoderdataPython |
147631 | <reponame>JasonJW/SinbadCogs
import os
import discord
import asyncio
from discord.ext import commands
from cogs.utils.dataIO import dataIO
from .utils import checks
from cogs.utils.chat_formatting import box, pagify
class PermHandler:
"""
Save myself time with managing an alliance discord
"""
__author__ = "mikeshardmind"
__version__ = "1.5"
def __init__(self, bot):
self.bot = bot
self.settings = dataIO.load_json('data/permhandler/settings.json')
def save_json(self):
dataIO.save_json("data/permhandler/settings.json", self.settings)
@checks.admin_or_permissions(Manage_server=True)
@commands.group(name="permhandle", aliases=["phandle"],
pass_context=True, no_pm=True)
async def permhandle(self, ctx):
if ctx.invoked_subcommand is None:
await self.bot.send_cmd_help(ctx)
def initial_config(self, server_id):
"""ensures the server has an entry"""
if server_id not in self.settings:
self.settings[server_id] = {'chans': [],
'roles': [],
'activated': False,
'proles': []
}
if 'chans' not in self.settings[server_id]:
self.settings[server_id]['chans'] = []
if 'roles' not in self.settings[server_id]:
self.settings[server_id]['roles'] = []
if 'activated' not in self.settings[server_id]:
self.settings[server_id]['chans'] = False
if 'proles' not in self.settings[server_id]:
self.settings[server_id]['proles'] = []
if 'floor' not in self.settings[server_id]:
self.settings[server_id]['floor'] = None
self.save_json()
@checks.admin_or_permissions(Manage_server=True)
@permhandle.command(name="newrole", pass_context=True, no_pm=True)
async def make_role(self, ctx, *, name):
"""makes a new role and adds it to the priveleged role list"""
rname = str(name)
if not all(x.isalpha() or x.isspace() for x in rname):
return await self.bot.say("Guild names must be made "
"of letters and spaces")
server = ctx.message.server
r = await self.bot.create_role(server, name=rname, hoist=True,
mentionable=True)
self.settings[server.id]['roles'].append(r.id)
self.save_json()
await self.validate(server)
await self.reorder_roles(server)
await self.bot.say("Guild role made and configured")
@checks.admin_or_permissions(Manage_server=True)
@permhandle.command(name="roleinfo", pass_context=True, no_pm=True)
async def r_info(self, ctx, *, role):
"""get info about a role by name or ID"""
nameID = str(role).strip().lower()
rn = [r for r in ctx.message.server.roles
if r.name.strip().lower() == nameID or r.id == nameID]
if rn:
if len(rn) == 1:
return await self.bot.say("Role name: `{}`\nRole ID: `{}`"
"".format(rn[0].name, rn[0].id))
elif len(rn) > 1:
output = "Multiple Matches found: "
for r in rn:
output += "\nRole name: `{}` | Role ID: `{}`" \
"".format(r.name, r.id)
return await self.bot.say(output)
await self.bot.say("There was not a match.")
@checks.admin_or_permissions(Manage_server=True)
@permhandle.command(name="configdump", pass_context=True,
no_pm=True, hidden=True)
async def configdump(self, ctx):
"""lists current config info"""
server = ctx.message.server
self.initial_config(server.id)
chans = self.settings[server.id]['chans']
channels = server.channels
channels = [c for c in channels if c.id in chans]
roles = self.settings[server.id]['roles']
proles = self.settings[server.id]['proles']
role_list = server.roles
prole_list = server.roles
rls = [r.name for r in role_list if r.id in roles]
pcs = [r.name for r in prole_list if r.id in proles]
vcs = [c.name for c in channels if c.type == discord.ChannelType.voice]
tcs = [c.name for c in channels if c.type == discord.ChannelType.text]
floor_role = [r.name for r in role_list
if r.id == self.settings[server.id]['floor']]
output = ""
output += "Priveleged Roles: {}".format(rls)
output += "\nProtected Roles: {}".format(pcs)
output += "\nProtected Voice Chats: {}".format(vcs)
output += "\nProtected Text Chats: {}".format(tcs)
output += "\nFloor Role: {}".format(floor_role)
for page in pagify(output, delims=["\n", ","]):
await self.bot.send_message(ctx.message.author, box(page))
@checks.admin_or_permissions(Manage_server=True)
@permhandle.command(name="roledump", pass_context=True, no_pm=True)
async def roledump(self, ctx):
""" lists roles and their IDs"""
server = ctx.message.server
role_list = server.roles
output = ""
for r in role_list:
output += "\n{} : {}".format(r.name, r.id)
for page in pagify(output, delims=["\n"]):
await self.bot.send_message(ctx.message.author, box(page))
@checks.admin_or_permissions(Manage_server=True)
@permhandle.command(name="toggleactive", pass_context=True, no_pm=True)
async def toggleactive(self, ctx):
"""Does what it says """
server = ctx.message.server
self.initial_config(server.id)
self.settings[server.id]['activated'] = \
not self.settings[server.id]['activated']
self.save_json()
await self.validate(server)
await self.bot.say(
"Active: {}".format(self.settings[server.id]['activated']))
@checks.admin_or_permissions(Manage_server=True)
@permhandle.command(name="addrole", pass_context=True, no_pm=True)
async def addrole(self, ctx, role_id: str):
"""add a priveleged role"""
server = ctx.message.server
self.initial_config(server.id)
r = [r for r in server.roles if r.id == role_id]
if not r:
return await self.bot.say("No such role")
if role_id in self.settings[server.id]['roles']:
return await self.bot.say("Already in roles")
self.settings[server.id]['roles'].append(role_id)
self.save_json()
await self.validate(server)
await self.reorder_roles(server)
await self.bot.say("Role added.")
@checks.admin_or_permissions(Manage_server=True)
@permhandle.command(name="addprole", pass_context=True, no_pm=True)
async def addprole(self, ctx, role_id: str):
"""add role that can only be held by users with priveleged roles"""
server = ctx.message.server
self.initial_config(server.id)
r = [r for r in server.roles if r.id == role_id]
if not r:
return await self.bot.say("No such role")
if role_id in self.settings[server.id]['proles']:
return await self.bot.say("Already in roles")
self.settings[server.id]['proles'].append(role_id)
self.save_json()
await self.validate(server)
await self.bot.say("Role added.")
@checks.admin_or_permissions(Manage_server=True)
@permhandle.command(name="remrole", pass_context=True, no_pm=True)
async def remrole(self, ctx, role_id: str):
"""remove a priveleged role"""
server = ctx.message.server
self.initial_config(server.id)
r = [r for r in server.roles if r.id == role_id][0]
if not r:
return await self.bot.say("No such role")
if role_id not in self.settings[server.id]['roles']:
return await self.bot.say("Not in roles")
self.settings[server.id]['roles'].remove(role_id)
self.save_json()
await self.validate(server)
await self.bot.say("Role removed.")
if self.settings[server.id]['floor'] is not None:
if server.me.top_role > r:
await self.bot.move_role(server, r, 1)
@checks.admin_or_permissions(Manage_server=True)
@permhandle.command(name="remprole", pass_context=True, no_pm=True)
async def remprole(self, ctx, role_id: str):
"""remove a protected role"""
server = ctx.message.server
self.initial_config(server.id)
r = [r for r in server.roles if r.id == role_id]
if not r:
return await self.bot.say("No such role")
if role_id not in self.settings[server.id]['proles']:
return await self.bot.say("Not in roles")
self.settings[server.id]['proles'].remove(role_id)
self.save_json()
await self.validate(server)
await self.bot.say("Role removed.")
@checks.admin_or_permissions(Manage_server=True)
@permhandle.command(name="addchan", pass_context=True, no_pm=True)
async def addchan(self, ctx, chan_id: str):
"""add a restricted channel"""
server = ctx.message.server
self.initial_config(server.id)
c = [c for c in server.channels if c.id == chan_id]
if not c:
return await self.bot.say("No such channel")
if chan_id in self.settings[server.id]['chans']:
return await self.bot.say("Already in channels")
self.settings[server.id]['chans'].append(chan_id)
self.save_json()
await self.validate(server)
await self.bot.say("Channel added.")
@checks.admin_or_permissions(Manage_server=True)
@permhandle.command(name="remchan", pass_context=True, no_pm=True)
async def remchan(self, ctx, chan_id: str):
"""remove a restricted channel"""
server = ctx.message.server
self.initial_config(server.id)
c = [c for c in server.channels if c.id == chan_id]
if not c:
return await self.bot.say("No such role")
if chan_id not in self.settings[server.id]['chans']:
return await self.bot.say("Not in channels")
self.settings[server.id]['chans'].remove(chan_id)
self.save_json()
await self.validate(server)
await self.bot.say("Channel removed")
@checks.admin_or_permissions(Manage_server=True)
@permhandle.command(name="setfloor", pass_context=True, no_pm=True)
async def set_floor(self, ctx, role_id: str):
"""sets the role all protected and priveleged roles should be above"""
server = ctx.message.server
self.initial_config(server.id)
r = [r for r in server.roles if r.id == role_id][0]
if not r:
return await self.bot.say("No such role")
self.settings[server.id]['floor'] = r.id
self.save_json()
await self.bot.say("Floor set, I will now validate settings.")
await self.validate(server)
await self.bot.say("Settings validated: you are good to go.")
@checks.admin_or_permissions(Manage_server=True)
@permhandle.command(name="validate", pass_context=True, no_pm=True)
async def manual_validate(self, ctx):
"""manually revalidate everything"""
await self.validate(ctx.message.server)
await self.reorder_roles(ctx.message.server)
await self.bot.say("Permissions Verified")
@checks.admin_or_permissions(Manage_server=True)
@permhandle.command(name="audit", pass_context=True,
no_pm=True, hidden=True)
async def manual_audit(self, ctx):
await self.bot.say("Verifying role permissions...")
await self.validate(ctx.message.server)
await self.bot.say("Permissions Verified")
await self.bot.say("Checking that nobody has roles they shouldn't\n"
"This may take a few minutes")
await self.audit(ctx.message.server)
await self.reorder_roles(ctx.message.server)
await self.bot.say("Audit complete.")
async def validate(self, server):
if not self.settings[server.id]['activated']:
return
chans = self.settings[server.id]['chans']
channels = server.channels
channels = [c for c in channels if c.id in chans]
roles = self.settings[server.id]['roles']
role_list = [r for r in server.roles if r.id in roles]
vchans = [c for c in channels if c.type == discord.ChannelType.voice]
tchans = [c for c in channels if c.type == discord.ChannelType.text]
for vchan in vchans:
e_overwrites = vchan.overwrites
e_roles = [e[0] for e in e_overwrites]
for e_role in e_roles:
if e_role not in role_list:
overwrite = discord.PermissionOverwrite()
overwrite.connect = None
await self.bot.edit_channel_permissions(vchan, e_role,
overwrite)
for role in role_list:
overwrite = discord.PermissionOverwrite()
overwrite.connect = True
await self.bot.edit_channel_permissions(vchan, role,
overwrite)
for tchan in tchans:
e_overwrites = tchan.overwrites
e_roles = [e[0] for e in e_overwrites]
for e_role in e_roles:
if e_role not in role_list:
overwrite = discord.PermissionOverwrite()
overwrite.read_messages = None
await self.bot.edit_channel_permissions(tchan, e_role,
overwrite)
for role in role_list:
overwrite = discord.PermissionOverwrite()
overwrite.read_messages = True
await self.bot.edit_channel_permissions(tchan, role,
overwrite)
async def audit(self, server):
if not self.settings[server.id]['activated']:
return
await self.validate(server)
roles = self.settings[server.id]['roles']
role_list = [r for r in server.roles if r.id in roles]
proles = self.settings[server.id]['proles']
await self.bot.request_offline_members(server)
members = list(server.members)
for member in members:
if set(role_list).isdisjoint(member.roles):
rms = [r for r in member.roles if r.id in proles]
await self.bot.remove_roles(member, *rms)
await asyncio.sleep(1)
async def reorder_roles(self, server):
if self.settings[server.id]['floor'] is None:
return
roles = self.settings[server.id]['roles']
role_list = [r for r in server.roles if r.id in roles]
floor_role = [r for r in server.roles
if r.id == self.settings[server.id]['floor']][0]
bot_top_role = server.me.top_role
for role in role_list:
await asyncio.sleep(1)
if role < floor_role:
if floor_role < bot_top_role:
await self.bot.move_role(server, role, floor_role.position)
def check_folder():
f = 'data/permhandler'
if not os.path.exists(f):
os.makedirs(f)
def check_file():
f = 'data/permhandler/settings.json'
if dataIO.is_valid_json(f) is False:
dataIO.save_json(f, {})
def setup(bot):
check_folder()
check_file()
n = PermHandler(bot)
bot.add_cog(n)
| StarcoderdataPython |
1709167 | <filename>config.py
def hic_ame_keyword(i):
return "hic" == i['message'].lower() or \
":_hic1::_hic2::_hic3:" == i['message']
def hic_ubye_keyword(i):
# this is enough in chinese chat
return "hic" in i['message'].lower()
| StarcoderdataPython |
3298662 | # Generated by Django 3.1.2 on 2021-10-22 23:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('abastece', '0014_producto_porcentaje_aporte'),
]
operations = [
migrations.AlterField(
model_name='producto',
name='porcentaje_aporte',
field=models.IntegerField(default=100, help_text='El porcentaje de aportes que se aplicará al producto.'),
),
]
| StarcoderdataPython |
37160 | from django.test import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from animais.models import Animal
class AnimaisTestCase(LiveServerTestCase):
def setUp(self):
chrome_options = Options()
chrome_options.add_argument('--headless')
self.browser = webdriver.Chrome(executable_path='chromedriver.exe', chrome_options=chrome_options)
self.animal = Animal.objects.create(
nome_animal='Leão',
predador='Sim',
venenoso='Não',
domestico='Não'
)
def tearDown(self) -> None:
self.browser.quit()
def test_busca_animal(self):
"""Teste se um usuário pode buscar um animal pelo nome"""
home_page = self.browser.get(self.live_server_url)
brand_element = self.browser.find_element_by_css_selector('.navbar')
self.assertEqual('Busca Animal', brand_element.text)
buscar_animal_input = self.browser.find_element_by_css_selector('input#buscar-animal')
self.assertEqual(buscar_animal_input.get_attribute('placeholder'), "Exemplo: leão, urso...")
buscar_animal_input.send_keys('leão')
self.browser.find_element_by_css_selector('form button').click()
caracteristicas = self.browser.find_elements_by_css_selector('.result-description')
self.assertGreater(len(caracteristicas), 3)
| StarcoderdataPython |
35498 | <reponame>bhaving07/pyup
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2017 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import functools
import importlib
import re
import sys
import gitlab.config
camel_re = re.compile("(.)([A-Z])")
# custom_actions = {
# cls: {
# action: (mandatory_args, optional_args, in_obj),
# },
# }
custom_actions = {}
def register_custom_action(cls_names, mandatory=tuple(), optional=tuple()):
def wrap(f):
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
return f(*args, **kwargs)
# in_obj defines whether the method belongs to the obj or the manager
in_obj = True
classes = cls_names
if type(cls_names) != tuple:
classes = (cls_names,)
for cls_name in classes:
final_name = cls_name
if cls_name.endswith("Manager"):
final_name = cls_name.replace("Manager", "")
in_obj = False
if final_name not in custom_actions:
custom_actions[final_name] = {}
action = f.__name__.replace("_", "-")
custom_actions[final_name][action] = (mandatory, optional, in_obj)
return wrapped_f
return wrap
def die(msg, e=None):
if e:
msg = "%s (%s)" % (msg, e)
sys.stderr.write(msg + "\n")
sys.exit(1)
def what_to_cls(what):
return "".join([s.capitalize() for s in what.split("-")])
def cls_to_what(cls):
return camel_re.sub(r"\1-\2", cls.__name__).lower()
def _get_base_parser(add_help=True):
parser = argparse.ArgumentParser(
add_help=add_help, description="GitLab API Command Line Interface"
)
parser.add_argument("--version", help="Display the version.", action="store_true")
parser.add_argument(
"-v",
"--verbose",
"--fancy",
help="Verbose mode (legacy format only)",
action="store_true",
)
parser.add_argument(
"-d", "--debug", help="Debug mode (display HTTP requests)", action="store_true"
)
parser.add_argument(
"-c",
"--config-file",
action="append",
help="Configuration file to use. Can be used multiple times.",
)
parser.add_argument(
"-g",
"--gitlab",
help=(
"Which configuration section should "
"be used. If not defined, the default selection "
"will be used."
),
required=False,
)
parser.add_argument(
"-o",
"--output",
help="Output format (v4 only): json|legacy|yaml",
required=False,
choices=["json", "legacy", "yaml"],
default="legacy",
)
parser.add_argument(
"-f",
"--fields",
help=(
"Fields to display in the output (comma "
"separated). Not used with legacy output"
),
required=False,
)
return parser
def _get_parser(cli_module):
parser = _get_base_parser()
return cli_module.extend_parser(parser)
def _parse_value(v):
if isinstance(v, str) and v.startswith("@"):
# If the user-provided value starts with @, we try to read the file
# path provided after @ as the real value. Exit on any error.
try:
with open(v[1:]) as fl:
return fl.read()
except Exception as e:
sys.stderr.write("%s\n" % e)
sys.exit(1)
return v
def main():
if "--version" in sys.argv:
print(gitlab.__version__)
sys.exit(0)
parser = _get_base_parser(add_help=False)
# This first parsing step is used to find the gitlab config to use, and
# load the propermodule (v3 or v4) accordingly. At that point we don't have
# any subparser setup
(options, args) = parser.parse_known_args(sys.argv)
try:
config = gitlab.config.GitlabConfigParser(options.gitlab, options.config_file)
except gitlab.config.ConfigError as e:
if "--help" in sys.argv or "-h" in sys.argv:
parser.print_help()
sys.exit(0)
sys.exit(e)
cli_module = importlib.import_module("gitlab.v%s.cli" % config.api_version)
# Now we build the entire set of subcommands and do the complete parsing
parser = _get_parser(cli_module)
try:
import argcomplete
argcomplete.autocomplete(parser)
except Exception:
pass
args = parser.parse_args(sys.argv[1:])
config_files = args.config_file
gitlab_id = args.gitlab
verbose = args.verbose
output = args.output
fields = []
if args.fields:
fields = [x.strip() for x in args.fields.split(",")]
debug = args.debug
action = args.whaction
what = args.what
args = args.__dict__
# Remove CLI behavior-related args
for item in (
"gitlab",
"config_file",
"verbose",
"debug",
"what",
"whaction",
"version",
"output",
):
args.pop(item)
args = {k: _parse_value(v) for k, v in args.items() if v is not None}
try:
gl = gitlab.Gitlab.from_config(gitlab_id, config_files)
if gl.private_token or gl.oauth_token or gl.job_token:
gl.auth()
except Exception as e:
die(str(e))
if debug:
gl.enable_debug()
cli_module.run(gl, what, action, args, verbose, output, fields)
sys.exit(0)
| StarcoderdataPython |
1706178 | <reponame>pbellec/SUITPy
# -*- coding: utf-8 -*-
"""Functions related to the documentation.
@author: maedbhking
docdict contains the standard documentation entries
used across SUITPy (source: Nilearn and Eric Larson and MNE-python team.
https://github.com/mne-tools/mne-python/blob/main/mne/utils/docs.py
"""
import sys
###################################
# Standard documentation entries
#
docdict = dict()
NILEARN_LINKS = {"landing_page": "http://nilearn.github.io"}
NILEARN_LINKS["input_output"] = (
"{}/manipulating_images/input_output.html".format(
NILEARN_LINKS["landing_page"])
)
# Verbose
verbose = """
verbose : :obj:`int`, optional
Verbosity level (0 means no message).
Default={}."""
docdict['verbose'] = verbose.format(1)
docdict['verbose0'] = verbose.format(0)
# Resume
docdict['resume'] = """
resume : :obj:`bool`, optional
Whether to resume download of a partly-downloaded file.
Default=True."""
# Data_dir
docdict['data_dir'] = """
data_dir : :obj:`pathlib.Path` or :obj:`str`, optional
Path where data should be downloaded. By default,
files are downloaded in home directory."""
# URL
docdict['url'] = """
url : :obj:`str`, optional
URL of file to download.
Override download URL. Used for test only (or if you
setup a mirror of the data).
Default=None."""
# Smoothing_fwhm
docdict['smoothing_fwhm'] = """
smoothing_fwhm : :obj:`float`, optional.
If ``smoothing_fwhm`` is not ``None``, it gives
the full-width at half maximum in millimeters
of the spatial smoothing to apply to the signal."""
# Standardize
standardize = """
standardize : :obj:`bool`, optional.
If ``standardize`` is True, the data are centered and normed:
their mean is put to 0 and their variance is put to 1 in the
time dimension.
Default={}."""
docdict['standardize'] = standardize.format('True')
docdict['standardize_false'] = standardize.format('False')
# detrend
docdict['detrend'] = """
detrend : :obj:`bool`, optional
Whether to detrend signals or not."""
# Target_affine
docdict['target_affine'] = """
target_affine : :class:`numpy.ndarray`, optional.
If specified, the image is resampled corresponding to this new affine.
``target_affine`` can be a 3x3 or a 4x4 matrix.
Default=None."""
# Target_shape
docdict['target_shape'] = """
target_shape : :obj:`tuple` or :obj:`list`, optional.
If specified, the image will be resized to match this new shape.
``len(target_shape)`` must be equal to 3.
.. note::
If ``target_shape`` is specified, a ``target_affine`` of shape
``(4, 4)`` must also be given.
Default=None."""
# Low_pass
docdict['low_pass'] = """
low_pass : :obj:`float` or None, optional
Low cutoff frequency in Hertz.
If None, no low-pass filtering will be performed.
Default=None."""
# High pass
docdict['high_pass'] = """
high_pass : :obj:`float`, optional
High cutoff frequency in Hertz.
Default=None."""
# t_r
docdict['t_r'] = """
t_r : :obj:`float` or None, optional
Repetition time, in seconds (sampling period).
Set to ``None`` if not provided.
Default=None."""
# mask_img
docdict['mask_img'] = """
mask_img : Niimg-like object
Object used for masking the data."""
# Memory
docdict['memory'] = """
memory : instance of :class:`joblib.Memory` or :obj:`str`
Used to cache the masking process.
By default, no caching is done. If a :obj:`str` is given, it is the
path to the caching directory."""
# n_parcels
docdict['n_parcels'] = """
n_parcels : :obj:`int`, optional
Number of parcels to divide the data into.
Default=50."""
# random_state
docdict['random_state'] = """
random_state : :obj:`int` or RandomState, optional
Pseudo-random number generator state used for random sampling."""
# Memory_level
memory_level = """
memory_level : :obj:`int`, optional.
Rough estimator of the amount of memory used by caching. Higher value
means more memory for caching.
Default={}."""
docdict['memory_level'] = memory_level.format(0)
docdict['memory_level1'] = memory_level.format(1)
# n_jobs
n_jobs = """
n_jobs : :obj:`int`, optional.
The number of CPUs to use to do the computation. -1 means 'all CPUs'.
Default={}."""
docdict['n_jobs'] = n_jobs.format("1")
docdict['n_jobs_all'] = n_jobs.format("-1")
# img
docdict['img'] = """
img : Niimg-like object
See `input-output <%(input_output)s>`_.
""" % NILEARN_LINKS
# imgs
docdict['imgs'] = """
imgs : :obj:`list` of Niimg-like objects
See `input-output <%(input_output)s>`_.
""" % NILEARN_LINKS
# cut_coords
docdict['cut_coords'] = """
cut_coords : None, a :obj:`tuple` of :obj:`float`, or :obj:`int`, optional
The MNI coordinates of the point where the cut is performed.
- If ``display_mode`` is 'ortho' or 'tiled', this should
be a 3-tuple: ``(x, y, z)``
- For ``display_mode == 'x'``, 'y', or 'z', then these are
the coordinates of each cut in the corresponding direction.
- If ``None`` is given, the cuts are calculated automaticaly.
- If ``display_mode`` is 'mosaic', and the number of cuts is the same
for all directions, ``cut_coords`` can be specified as an integer.
It can also be a length 3 tuple specifying the number of cuts for
every direction if these are different.
.. note::
If ``display_mode`` is 'x', 'y' or 'z', ``cut_coords`` can be
an integer, in which case it specifies the number of
cuts to perform.
"""
# output_file
docdict['output_file'] = """
output_file : :obj:`str`, or None, optional
The name of an image file to export the plot to. Valid extensions
are .png, .pdf, .svg. If ``output_file`` is not None, the plot
is saved to a file, and the display is closed."""
# display_mode
docdict['display_mode'] = """
display_mode : {'ortho', 'tiled', 'mosaic','x',\
'y', 'z', 'yx', 'xz', 'yz'}, optional
Choose the direction of the cuts:
- 'x': sagittal
- 'y': coronal
- 'z': axial
- 'ortho': three cuts are performed in orthogonal
directions
- 'tiled': three cuts are performed and arranged
in a 2x2 grid
- 'mosaic': three cuts are performed along
multiple rows and columns
Default='ortho'."""
# figure
docdict['figure'] = """
figure : :obj:`int`, or :class:`matplotlib.figure.Figure`, or None, optional
Matplotlib figure used or its number. If ``None`` is given, a
new figure is created."""
# axes
docdict['axes'] = """
axes : :class:`matplotlib.axes.Axes`, or 4 tuple\
of :obj:`float`: (xmin, ymin, width, height), optional
The axes, or the coordinates, in matplotlib figure
space, of the axes used to display the plot.
If ``None``, the complete figure is used."""
# title
docdict['title'] = """
title : :obj:`str`, or None, optional
The title displayed on the figure.
Default=None."""
# threshold
docdict['threshold'] = """
threshold : a number, None, or 'auto', optional
If ``None`` is given, the image is not thresholded.
If a number is given, it is used to threshold the image:
values below the threshold (in absolute value) are plotted
as transparent. If 'auto' is given, the threshold is determined
magically by analysis of the image.
"""
# annotate
docdict['annotate'] = """
annotate : :obj:`bool`, optional
If ``annotate`` is ``True``, positions and left/right annotation
are added to the plot. Default=True."""
# draw_cross
docdict['draw_cross'] = """
draw_cross : :obj:`bool`, optional
If ``draw_cross`` is ``True``, a cross is drawn on the plot to indicate
the cut position. Default=True."""
# black_bg
docdict['black_bg'] = """
black_bg : :obj:`bool`, or 'auto', optional
If ``True``, the background of the image is set to be black.
If you wish to save figures with a black background, you
will need to pass facecolor='k', edgecolor='k'
to :func:`matplotlib.pyplot.savefig`."""
# colorbar
docdict['colorbar'] = """
colorbar : :obj:`bool`, optional
If ``True``, display a colorbar on the right of the plots."""
# symmetric_cbar
docdict['symmetric_cbar'] = """
symmetric_cbar : :obj:`bool`, or 'auto', optional
Specifies whether the colorbar should range from ``-vmax`` to ``vmax``
or from ``vmin`` to ``vmax``. Setting to 'auto' will select the latter
if the range of the whole image is either positive or negative.
.. note::
The colormap will always range from ``-vmax`` to ``vmax``.
"""
# cbar_tick_format
docdict['cbar_tick_format'] = """
cbar_tick_format : :obj:`str`, optional
Controls how to format the tick labels of the colorbar.
Ex: use "%%.2g" to display using scientific notation."""
# bg_img
docdict['bg_img'] = """
bg_img : Niimg-like object, optional
See `input_output <%(input_output)s>`_.
The background image to plot on top of.
""" % NILEARN_LINKS
# vmin
docdict['vmin'] = """
vmin : :obj:`float`, optional
Lower bound of the colormap. If ``None``, the min of the image is used.
Passed to :func:`matplotlib.pyplot.imshow`.
"""
# vmax
docdict['vmax'] = """
vmax : :obj:`float`, optional
Upper bound of the colormap. If ``None``, the max of the image is used.
Passed to :func:`matplotlib.pyplot.imshow`.
"""
# bg_vmin
docdict['bg_vmin'] = """
bg_vmin : :obj:`float`, optional
vmin for ``bg_img``."""
# bg_vmax
docdict['bg_vmax'] = """
bg_vmin : :obj:`float`, optional
vmax for ``bg_img``."""
# resampling_interpolation
docdict['resampling_interpolation'] = """
resampling_interpolation : :obj:`str`, optional
Interpolation to use when resampling the image to
the destination space. Can be:
- "continuous": use 3rd-order spline interpolation
- "nearest": use nearest-neighbor mapping.
.. note::
"nearest" is faster but can be noisier in some cases.
"""
# cmap
docdict['cmap'] = """
cmap : :class:`matplotlib.colors.Colormap`, or :obj:`str`, optional
The colormap to use. Either a string which is a name of
a matplotlib colormap, or a matplotlib colormap object."""
# Dimming factor
docdict['dim'] = """
dim : :obj:`float`, or 'auto', optional
Dimming factor applied to background image. By default, automatic
heuristics are applied based upon the background image intensity.
Accepted float values, where a typical span is between -2 and 2
(-2 = increase contrast; 2 = decrease contrast), but larger values
can be used for a more pronounced effect. 0 means no dimming."""
# avg_method
docdict['avg_method'] = """
avg_method : {'mean', 'median', 'min', 'max', custom function}, optional
How to average vertex values to derive the face value:
- ``mean``: results in smooth boundaries
- ``median``: results in sharp boundaries
- ``min`` or ``max``: for sparse matrices
- ``custom function``: You can also pass a custom function
which will be executed though :func:`numpy.apply_along_axis`.
Here is an example of a custom function:
.. code-block:: python
def custom_function(vertices):
return vertices[0] * vertices[1] * vertices[2]
"""
# hemi
docdict['hemi'] = """
hemi : {'left', 'right'}, optional
Hemisphere to display. Default='left'."""
# hemispheres
docdict['hemispheres'] = """
hemispheres : list of :obj:`str`, optional
Hemispheres to display. Default=['left', 'right']."""
# view
docdict['view'] = """
view : {'lateral', 'medial', 'dorsal', 'ventral',\
'anterior', 'posterior'}, optional
View of the surface that is rendered.
Default='lateral'.
"""
# bg_on_data
docdict['bg_on_data'] = """
bg_on_data : :obj:`bool`, optional
If ``True``, and a ``bg_map`` is specified,
the ``surf_data`` data is multiplied by the background
image, so that e.g. sulcal depth is visible beneath
the ``surf_data``.
.. note::
This non-uniformly changes the surf_data values according
to e.g the sulcal depth.
"""
# darkness
docdict['darkness'] = """
darkness : :obj:`float` between 0 and 1, optional
Specifying the darkness of the background image:
- '1' indicates that the original values of the background are used
- '.5' indicates that the background values are reduced by half
before being applied.
"""
# linewidth
docdict['linewidths'] = """
linewidths : :obj:`float`, optional
Set the boundary thickness of the contours.
Only reflects when ``view_type=contours``."""
# fsaverage options
docdict['fsaverage_options'] = """
- 'fsaverage3': the low-resolution fsaverage3 mesh (642 nodes)
- 'fsaverage4': the low-resolution fsaverage4 mesh (2562 nodes)
- 'fsaverage5': the low-resolution fsaverage5 mesh (10242 nodes)
- 'fsaverage5_sphere': the low-resolution fsaverage5 spheres
.. deprecated:: 0.8.0
This option has been deprecated and will be removed in v0.9.0.
fsaverage5 sphere coordinates can now be accessed through
attributes sphere_{left, right} using mesh='fsaverage5'
- 'fsaverage6': the medium-resolution fsaverage6 mesh (40962 nodes)
- 'fsaverage7': same as 'fsaverage'
- 'fsaverage': the high-resolution fsaverage mesh (163842 nodes)
.. note::
The high-resolution fsaverage will result in more computation
time and memory usage
"""
docdict_indented = {}
def _indentcount_lines(lines):
"""Minimum indent for all lines in line list.
>>> lines = [' one', ' two', ' three']
>>> _indentcount_lines(lines)
1
>>> lines = []
>>> _indentcount_lines(lines)
0
>>> lines = [' one']
>>> _indentcount_lines(lines)
1
>>> _indentcount_lines([' '])
0
"""
indentno = sys.maxsize
for line in lines:
stripped = line.lstrip()
if stripped:
indentno = min(indentno, len(line) - len(stripped))
if indentno == sys.maxsize:
return 0
return indentno
def fill_doc(f):
"""Fill a docstring with docdict entries.
Parameters
----------
f : callable
The function to fill the docstring of. Will be modified in place.
Returns
-------
f : callable
The function, potentially with an updated ``__doc__``.
"""
docstring = f.__doc__
if not docstring:
return f
lines = docstring.splitlines()
# Find the minimum indent of the main docstring, after first line
if len(lines) < 2:
icount = 0
else:
icount = _indentcount_lines(lines[1:])
# Insert this indent to dictionary docstrings
try:
indented = docdict_indented[icount]
except KeyError:
indent = ' ' * icount
docdict_indented[icount] = indented = {}
for name, dstr in docdict.items():
lines = dstr.splitlines()
try:
newlines = [lines[0]]
for line in lines[1:]:
newlines.append(indent + line)
indented[name] = '\n'.join(newlines)
except IndexError:
indented[name] = dstr
try:
f.__doc__ = docstring % indented
except (TypeError, ValueError, KeyError) as exp:
funcname = f.__name__
funcname = docstring.split('\n')[0] if funcname is None else funcname
raise RuntimeError('Error documenting %s:\n%s'
% (funcname, str(exp)))
return f
| StarcoderdataPython |
143644 | '''ESTRUTURA DE REPETIÇÃO WHILE, LAÇOS DE REPETIÇÃO (PARTE 2)'''
print('*' * 40, '\nEquanto c < 10 faça:')
c = 0
while c < 10: #Enquanto c < 10 faça:
print(c, end=' ')
c += 1
print('END')
print('*' * 40, '\nEnquanto o valor digitado NÃO for 0 faça:')
n = 1
while n != 0: #condição de PARADA
n = int(input('Digite um valor: '))
print('END')
print('*' * 40, '\n:Enquanto não for digitado ZERO(0), conte quantos números são pares e ímpares, e informe no final.')
n = 1
par = impar = 0
while n != 0:
n = int(input('Digite um valor: '))
if n != 0:
if n % 2 == 0:
par += 1
else:
impar += 1
print('Foram digitados {} números pares, e {} números ímpares.'.format(par, impar))
| StarcoderdataPython |
1609943 | <reponame>rudecs/jumpscale_core7
from JumpScale import j
def cb():
from .openvstorage import OpenvStorageFactory
return OpenvStorageFactory()
j.base.loader.makeAvailable(j, 'clients')
j.clients._register('openvstorage', cb)
| StarcoderdataPython |
1764837 | from queue import Queue
myq = Queue([1,2,3,6])
print(myq.data)
myq2 = Queue([4,5,6])
def weave_queues(q_one, q_two):
if len(q_one.data) != len(q_two.data):
while len(q_one.data) < len(q_two.data):
q_one.data.append(0)
elif len(q_two.data) != len(q_one.data):
while len(q_two.data) < len(q_one.data):
q_two.data.append(0)
return [ item for item in (zip(q_one.data, q_two.data))]
print(weave_queues(myq, myq2)) | StarcoderdataPython |
3245134 | """Checkpoint averaging script."""
import argparse
import tensorflow as tf
from opennmt.utils.checkpoint import average_checkpoints
def main():
tf.logging.set_verbosity(tf.logging.INFO)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--model_dir", required=True,
help="The model directory containing the checkpoints.")
parser.add_argument("--output_dir", required=True,
help="The output directory where the averaged checkpoint will be saved.")
parser.add_argument("--max_count", type=int, default=8,
help="The maximal number of checkpoints to average.")
args = parser.parse_args()
average_checkpoints(args.model_dir, args.output_dir, max_count=args.max_count)
if __name__ == "__main__":
main()
| StarcoderdataPython |
128996 | <reponame>adolli/pywinauto
"""
Uninstall script for 7zip 9.20 (64-bit)
Requirements:
- Win7 or Win8.1 x64, 64-bit Python
- pywinauto 0.5.2+
- UAC is fully disabled
"""
from __future__ import print_function
import pywinauto
pywinauto.Application().Start(r'explorer.exe')
explorer = pywinauto.Application().Connect(path='explorer.exe')
# Go to "Control Panel -> Programs and Features"
NewWindow = explorer.window(top_level_only=True, active_only=True, class_name='CabinetWClass')
try:
NewWindow.AddressBandRoot.click_input()
NewWindow.type_keys(r'Control Panel\Programs\Programs and Features{ENTER}',
with_spaces=True, set_foreground=False)
ProgramsAndFeatures = explorer.window(top_level_only=True, active_only=True,
title='Programs and Features', class_name='CabinetWClass')
# wait while the list of programs is loading
explorer.wait_cpu_usage_lower(threshold=5)
item_7z = ProgramsAndFeatures.FolderView.get_item('7-Zip 9.20 (x64 edition)')
item_7z.ensure_visible()
item_7z.click_input(button='right', where='icon')
explorer.PopupMenu.menu_item('Uninstall').click()
Confirmation = explorer.window(title='Programs and Features', class_name='#32770', active_only=True)
if Confirmation.Exists():
Confirmation.Yes.click_input()
Confirmation.wait_not('visible')
WindowsInstaller = explorer.window(title='Windows Installer', class_name='#32770', active_only=True)
if WindowsInstaller.Exists():
WindowsInstaller.wait_not('visible', timeout=20)
SevenZipInstaller = explorer.window(title='7-Zip 9.20 (x64 edition)', class_name='#32770', active_only=True)
if SevenZipInstaller.Exists():
SevenZipInstaller.wait_not('visible', timeout=20)
if '7-Zip 9.20 (x64 edition)' not in ProgramsAndFeatures.FolderView.texts():
print('OK')
finally:
NewWindow.close() | StarcoderdataPython |
1743740 | # Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import cugraph
from cugraph.tests import utils
def test_bfs_paths():
with pytest.raises(ValueError) as ErrorMsg:
gc.collect()
graph_file = '../datasets/karate.csv'
cu_M = utils.read_csv_file(graph_file)
G = cugraph.Graph()
G.from_cudf_edgelist(cu_M, source='0', destination='1', edge_attr='2')
# run BFS starting at vertex 17
df = cugraph.bfs(G, 16)
# Get the path to vertex 1
p_df = cugraph.utils.get_traversed_path(df, 0)
assert len(p_df) == 3
# Get path to vertex 0 - which is not in graph
p_df = cugraph.utils.get_traversed_path(df, 100)
assert "not in the result set" in str(ErrorMsg)
def test_bfs_paths_array():
with pytest.raises(ValueError) as ErrorMsg:
gc.collect()
graph_file = '../datasets/karate.csv'
cu_M = utils.read_csv_file(graph_file)
G = cugraph.Graph()
G.from_cudf_edgelist(cu_M, source='0', destination='1', edge_attr='2')
# run BFS starting at vertex 17
df = cugraph.bfs(G, 16)
# Get the path to vertex 1
answer = cugraph.utils.get_traversed_path_list(df, 0)
assert len(answer) == 3
# Get path to vertex 0 - which is not in graph
answer = cugraph.utils.get_traversed_path_list(df, 100)
assert "not in the result set" in str(ErrorMsg)
| StarcoderdataPython |
3216383 | <gh_stars>0
# Move: Contains all details about a Piece's move in the game.
# DO NOT MODIFY THIS FILE
# Never try to directly create an instance of this class, or modify its member variables.
# Instead, you should only be reading its variables and calling its functions.
from games.chess.game_object import GameObject
# <<-- Creer-Merge: imports -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# you can add additional import(s) here
# <<-- /Creer-Merge: imports -->>
class Move(GameObject):
"""The class representing the Move in the Chess game.
Contains all details about a Piece's move in the game.
"""
def __init__(self):
"""Initializes a Move with basic logic as provided by the Creer code generator."""
GameObject.__init__(self)
# private attributes to hold the properties so they appear read only
self._captured = None
self._from_file = ""
self._from_rank = 0
self._piece = None
self._promotion = ""
self._san = ""
self._to_file = ""
self._to_rank = 0
@property
def captured(self):
"""The Piece captured by this Move, None if no capture.
:rtype: games.chess.piece.Piece
"""
return self._captured
@property
def from_file(self):
"""The file the Piece moved from.
:rtype: str
"""
return self._from_file
@property
def from_rank(self):
"""The rank the Piece moved from.
:rtype: int
"""
return self._from_rank
@property
def piece(self):
"""The Piece that was moved.
:rtype: games.chess.piece.Piece
"""
return self._piece
@property
def promotion(self):
"""The Piece type this Move's Piece was promoted to from a Pawn, empty string if no promotion occurred.
:rtype: str
"""
return self._promotion
@property
def san(self):
"""The standard algebraic notation (SAN) representation of the move.
:rtype: str
"""
return self._san
@property
def to_file(self):
"""The file the Piece moved to.
:rtype: str
"""
return self._to_file
@property
def to_rank(self):
"""The rank the Piece moved to.
:rtype: int
"""
return self._to_rank
# <<-- Creer-Merge: functions -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# if you want to add any client side logic (such as state checking functions) this is where you can add them
# <<-- /Creer-Merge: functions -->>
| StarcoderdataPython |
3235093 | # vim: expandtab:ts=4:sw=4
from __future__ import absolute_import
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
import EKF
import pdb
from mbest_ilp import new_m_best_sol
from multiprocessing import Pool
from functools import partial
#from mbest_ilp import m_best_sol as new_m_best_sol
INFTY_COST = 1e+5
def min_marg_matching(marginalizations, track_indices=None, max_distance=1):
cost_matrix = 1 - marginalizations
num_tracks, num_detections = cost_matrix.shape
if track_indices is None:
track_indices = np.arange(num_tracks)
detection_indices = np.arange(num_detections-1)
if num_tracks == 0 or num_detections == 0:
return [], track_indices, detection_indices # Nothing to match.
extra_dummy_cols = np.tile(cost_matrix[:,0,np.newaxis], (1, num_tracks-1))
expanded_cost_matrix = np.hstack((extra_dummy_cols, cost_matrix))
indices = linear_assignment(expanded_cost_matrix)
matches, unmatched_tracks, unmatched_detections = [], [], []
# gather unmatched detections (new track)
for col, detection_idx in enumerate(detection_indices):
if col+num_tracks not in indices[:, 1]:
unmatched_detections.append(detection_idx)
# gather unmatched tracks (no detection)
for row, track_idx in enumerate(track_indices):
if row not in indices[:, 0]:
unmatched_tracks.append(track_idx)
# thresholding and matches
for row, col in indices:
track_idx = track_indices[row]
detection_idx = col - num_tracks
if detection_idx < 0:
unmatched_tracks.append(track_idx)
continue
if expanded_cost_matrix[row, col] > max_distance:
# apply thresholding
unmatched_tracks.append(track_idx)
unmatched_detections.append(detection_idx)
else:
# associate matches
matches.append((track_idx, detection_idx))
return matches, unmatched_tracks, unmatched_detections
def min_cost_matching(
distance_metric, max_distance, tracks, detections, track_indices=None,
detection_indices=None, compare_2d = False, detections_3d=None):
"""Solve linear assignment problem.
Parameters
----------
distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
The distance metric is given a list of tracks and detections as well as
a list of N track indices and M detection indices. The metric should
return the NxM dimensional cost matrix, where element (i, j) is the
association cost between the i-th track in the given track indices and
the j-th detection in the given detection_indices.
max_distance : float
Gating threshold. Associations with cost larger than this value are
disregarded.
tracks : List[track.Track]
A list of predicted tracks at the current time step.
detections : List[detection.Detection]
A list of detections at the current time step.
track_indices : List[int]
List of track indices that maps rows in `cost_matrix` to tracks in
`tracks` (see description above).
detection_indices : List[int]
List of detection indices that maps columns in `cost_matrix` to
detections in `detections` (see description above).
Returns
-------
(List[(int, int)], List[int], List[int])
Returns a tuple with the following three entries:
* A list of matched track and detection indices.
* A list of unmatched track indices.
* A list of unmatched detection indices.
"""
if track_indices is None:
track_indices = np.arange(len(tracks))
if detection_indices is None:
detection_indices = np.arange(len(detections))
if len(detection_indices) == 0 or len(track_indices) == 0:
return [], track_indices, detection_indices # Nothing to match.
cost_matrix = distance_metric(
tracks, detections, track_indices, detection_indices, compare_2d, detections_3d)
cost_matrix[cost_matrix > max_distance] = max_distance + 1e-5
#print("\n\nCascade Cost Matrix: ", cost_matrix)
indices = linear_assignment(cost_matrix)
matches, unmatched_tracks, unmatched_detections = [], [], []
# gather unmatched detections (new track)
for col, detection_idx in enumerate(detection_indices):
if col not in indices[:, 1]:
unmatched_detections.append(detection_idx)
# gather unmatched trackes (no detection)
for row, track_idx in enumerate(track_indices):
if row not in indices[:, 0]:
unmatched_tracks.append(track_idx)
# thresholding and matches
for row, col in indices:
track_idx = track_indices[row]
detection_idx = detection_indices[col]
if cost_matrix[row, col] > max_distance:
# apply thresholding
unmatched_tracks.append(track_idx)
unmatched_detections.append(detection_idx)
else:
# associate matches
matches.append((track_idx, detection_idx))
return matches, unmatched_tracks, unmatched_detections
# @profile
def JPDA(
distance_metric, dummy_node_cost_app, dummy_node_cost_iou, tracks, detections, track_indices=None,
detection_indices=None, m=1, compare_2d = False, windowing = False):
"""Solve linear assignment problem.
Parameters
----------
distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
The distance metric is given a list of tracks and detections as well as
a list of N track indices and M detection indices. The metric should
return the NxM dimensional cost matrix, where element (i, j) is the
association cost between the i-th track in the given track indices and
the j-th detection in the given detection_indices.
max_distance : float
Gating threshold. Associations with cost larger than this value are
disregarded.
tracks : List[track.Track]
A list of predicted tracks at the current time step.
detections : List[detection.Detection]
A list of detections at the current time step.
track_indices : List[int]
List of track indices that maps rows in `cost_matrix` to tracks in
`tracks` (see description above).
detection_indices : List[int]
List of detection indices that maps columns in `cost_matrix` to
detections in `detections` (see description above).
Returns
-------
(List[(int, int)], List[int], List[int])
Returns a tuple with the following three entries:
* A list of matched track and detection indices.
* A list of unmatched track indices.
* A list of unmatched detection indices.
"""
if track_indices is None:
track_indices = np.arange(len(tracks))
if detection_indices is None:
detection_indices = np.arange(len(detections))
if len(detection_indices) == 0 or len(track_indices) == 0:
return np.zeros((0, len(detections) + 1)) # Nothing to match.
cost_matrix, gate_mask = distance_metric(
tracks, detections, track_indices, detection_indices, compare_2d)
num_tracks, num_detections = cost_matrix.shape[0], cost_matrix.shape[1]
cost_matrix[gate_mask] = INFTY_COST
# print("\nIOU Cost Matrix:", cost_matrix[:,:,0])
# print("App:", cost_matrix[:,:,1])
clusters = find_clusters(cost_matrix[:,:,0], INFTY_COST - 0.0001)
# print('\n', clusters)
jpda_output = []
for cluster in clusters:
jpda_output.append(get_JPDA_output(cluster, cost_matrix, dummy_node_cost_app, dummy_node_cost_iou, INFTY_COST - 0.0001, m))
if not jpda_output:
mc = np.zeros((num_tracks, num_detections + 1))
mc[:, 0] = 1
return mc
assignments, assignment_cost = zip(*jpda_output)
assignments = np.vstack([item for sublist in assignments for item in sublist])
assignment_cost = np.array([item for sublist in assignment_cost for item in sublist])
marginalised_cost = np.sum(assignments*np.exp(-np.expand_dims(assignment_cost, 1)), axis = 0)
marginalised_cost = np.reshape(marginalised_cost, (num_tracks, num_detections+1))
# print('\n', marginalised_cost)
return marginalised_cost
def calculate_entropy(matrix, idx, idy):
mask = np.ones(matrix.shape)
mask[idx, idy] = 0
entropy = matrix/np.sum(mask*matrix, axis=1, keepdims=True)
entropy = (-entropy*np.log(entropy)) * mask
entropy = np.mean(np.sum(entropy, axis=1))
return entropy
def get_JPDA_output(cluster, cost_matrix, dummy_node_cost_app, dummy_node_cost_iou, cutoff, m):
if len(cluster[1]) == 0:
assignment = np.zeros((cost_matrix.shape[0], cost_matrix.shape[1]+1))
assignment[cluster[0], 0] = 1
assignment = assignment.reshape(1,-1)
return [assignment], np.array([0])
new_cost_matrix_appearance = np.reshape(cost_matrix[np.repeat(cluster[0], len(cluster[1])),
np.tile(cluster[1] - 1, len(cluster[0])),
[0]*(len(cluster[1])*len(cluster[0]))],
(len(cluster[0]), len(cluster[1])))
new_cost_matrix_iou = np.reshape(cost_matrix[np.repeat(cluster[0], len(cluster[1])), np.tile(cluster[1] - 1, len(cluster[0])), 1],
(len(cluster[0]), len(cluster[1])))
idx_x, idx_y = np.where(new_cost_matrix_appearance > cutoff)
appearance_entropy = calculate_entropy(new_cost_matrix_appearance, idx_x, idx_y)
iou_entropy = calculate_entropy(new_cost_matrix_iou, idx_x, idx_y)
if appearance_entropy < iou_entropy:
new_cost_matrix = new_cost_matrix_appearance
new_cost_matrix = 2*np.ones(new_cost_matrix.shape)/(new_cost_matrix+1) - 1
dummy_node_cost = -np.log(2/(dummy_node_cost_app+1) - 1)
else:
new_cost_matrix = new_cost_matrix_iou
new_cost_matrix[new_cost_matrix==1] -= 1e-3
new_cost_matrix = 1 - new_cost_matrix
dummy_node_cost = -np.log(1-dummy_node_cost_iou)
new_cost_matrix = -np.log(new_cost_matrix)
new_cost_matrix[idx_x, idx_y] = cutoff
if len(cluster[0]) == 1:
new_cost_matrix = np.concatenate([np.ones((new_cost_matrix.shape[0], 1))*dummy_node_cost, new_cost_matrix], axis = 1)
total_cost = np.sum(np.exp(-new_cost_matrix))
new_assignment = np.zeros((cost_matrix.shape[0], cost_matrix.shape[1]+1))
new_assignment[np.repeat(cluster[0], len(cluster[1])+1), np.tile(
np.concatenate([np.zeros(1, dtype = np.int32), cluster[1]]), len(cluster[0]))] = np.exp(-new_cost_matrix)/total_cost
new_assignment = new_assignment.reshape(1, -1)
return [new_assignment], np.array([0])
if new_cost_matrix.ndim <= 1:
new_cost_matrix = np.expand_dims(new_cost_matrix, 1)
# print(new_cost_matrix)
assignments, assignment_cost = new_m_best_sol(new_cost_matrix, m, dummy_node_cost)
offset = np.amin(assignment_cost)
assignment_cost -= offset
new_assignments = []
total_cost = np.sum(np.exp(-assignment_cost))
for assignment in assignments:
new_assignment = np.zeros((cost_matrix.shape[0], cost_matrix.shape[1]+1))
new_assignment[np.repeat(cluster[0], len(cluster[1])+1), np.tile(
np.concatenate([np.zeros(1, dtype = np.int32), cluster[1]]), len(cluster[0]))] = \
assignment/total_cost
new_assignments.append(new_assignment.reshape(1, -1))
return new_assignments, assignment_cost
def matching_cascade(
distance_metric, max_distance, cascade_depth, tracks, detections,
track_indices=None, detection_indices=None, compare_2d = False, detections_3d=None):
"""Run matching cascade.
Parameters
----------
distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
The distance metric is given a list of tracks and detections as well as
a list of N track indices and M detection indices. The metric should
return the NxM dimensional cost matrix, where element (i, j) is the
association cost between the i-th track in the given track indices and
the j-th detection in the given detection indices.
max_distance : float
Gating threshold. Associations with cost larger than this value are
disregarded.
cascade_depth: int
The cascade depth, should be se to the maximum track age.
tracks : List[track.Track]
A list of predicted tracks at the current time step.
detections : List[detection.Detection]
A list of detections at the current time step.
track_indices : Optional[List[int]]
List of track indices that maps rows in `cost_matrix` to tracks in
`tracks` (see description above). Defaults to all tracks.
detection_indices : Optional[List[int]]
List of detection indices that maps columns in `cost_matrix` to
detections in `detections` (see description above). Defaults to all
detections.
Returns
-------
(List[(int, int)], List[int], List[int])
Returns a tuple with the following three entries:
* A list of matched track and detection indices.
* A list of unmatched track indices.
* A list of unmatched detection indices.
"""
if track_indices is None:
track_indices = list(range(len(tracks)))
if detection_indices is None:
detection_indices = list(range(len(detections)))
unmatched_detections = detection_indices
matches = []
for level in range(cascade_depth):
if len(unmatched_detections) == 0: # No detections left
break
track_indices_l = [
k for k in track_indices
if tracks[k].time_since_update == 1 + level
]
if len(track_indices_l) == 0: # Nothing to match at this level
continue
matches_l, _, unmatched_detections = \
min_cost_matching(
distance_metric, max_distance, tracks, detections,
track_indices_l, unmatched_detections, compare_2d, detections_3d=detections_3d)
matches += matches_l
unmatched_tracks = list(set(track_indices) - set(k for k, _ in matches))
return matches, unmatched_tracks, unmatched_detections
# @profile
def gate_cost_matrix(
kf, tracks, detections, track_indices, detection_indices,
gated_cost=INFTY_COST, only_position=False, use3d=False, windowing = False):
"""Invalidate infeasible entries in cost matrix based on the state
distributions obtained by Kalman filtering.
Parameters
----------
kf : The Kalman filter.
cost_matrix : ndarray
The NxM dimensional cost matrix, where N is the number of track indices
and M is the number of detection indices, such that entry (i, j) is the
association cost between `tracks[track_indices[i]]` and
`detections[detection_indices[j]]`.
tracks : List[track.Track]
A list of predicted tracks at the current time step.
detections : List[detection.Detection]
A list of detections at the current time step.
track_indices : List[int]
List of track indices that maps rows in `cost_matrix` to tracks in
`tracks` (see description above).
detection_indices : List[int]
List of detection indices that maps columns in `cost_matrix` to
detections in `detections` (see description above).
gated_cost : Optional[float]
Entries in the cost matrix corresponding to infeasible associations are
set this value. Defaults to a very large value.
only_position : Optional[bool]
If True, only the x, y position of the state distribution is considered
during gating. Defaults to False.
Returns
-------
ndarray
Returns the modified cost matrix.
"""
# assert (len(track_indices) == cost_matrix.shape[0]), "Cost matrix shape does not match track indices"
# assert (len(detection_indices) == cost_matrix.shape[1]), "Cost matrix shape does match detection indices"
if len(track_indices) == 0 or len(detection_indices) == 0:
return None
if use3d:
measurements = np.array([det.box_3d for i, det in enumerate(detections) if i in detection_indices])
else:
measurements = np.asarray(
[detections[i].to_xywh() for i in detection_indices])
if use3d and only_position:
gating_dim = 3
elif use3d:
gating_dim = measurements.shape[1]
elif only_position:
gating_dim = 2
else:
gating_dim = measurements.shape[1]
gating_threshold = EKF.chi2inv95[gating_dim]
gate_mask = []
for track_idx in track_indices:
track = tracks[track_idx]
gating_distance = kf.gating_distance(
track.mean, track.covariance, measurements, only_position, use3d)
gated_set = gating_distance > gating_threshold
if np.all(gated_set):
gated_set = gating_distance > gating_threshold * 3
# print(track.track_id, gating_threshold, gating_distance)
gate_mask.append(gated_set)
# print(gated_set)
return np.vstack(gate_mask)
def find_clusters(cost_matrix, cutoff):
num_tracks, _ = cost_matrix.shape
clusters = []
total_tracks = 0
total_detections = 0
all_tracks = set(range(num_tracks))
all_visited_tracks = set()
while total_tracks < num_tracks:
visited_detections = set()
visited_tracks = set()
potential_track = next(iter(all_tracks - all_visited_tracks))
potential_tracks = set()
potential_tracks.add(potential_track)
while potential_tracks:
current_track = potential_tracks.pop()
visited_detections.update((np.where(cost_matrix[current_track] < cutoff)[0])+1)
visited_tracks.add(current_track)
for detection in visited_detections:
connected_tracks = np.where(cost_matrix[:, detection - 1] < cutoff)[0]
for track in connected_tracks:
if track in visited_tracks or track in potential_tracks:
continue
potential_tracks.add(track)
total_tracks += len(visited_tracks)
total_detections += len(visited_detections)
all_visited_tracks.update(visited_tracks)
clusters.append((np.array(list(visited_tracks), dtype = np.int32), np.array(list(visited_detections), dtype = np.int32)))
return clusters
| StarcoderdataPython |
1627535 | <filename>bin/bump_version.py
"""
bump __version__ = in (".py", ".md") files
and clean line endings.
"""
import re
import os
import sys
from contextlib import contextmanager
@contextmanager
def open_new(file_path, exists_ok=True):
try:
fd = os.open(file_path, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
f = os.fdopen(fd, "wb")
except OSError as e:
if not ((e.errno == errno.EEXIST) and exists_ok):
raise # skipped only if exits and exists_ok
else:
try:
yield f
finally:
f.close()
if __name__ == "__main__":
this_dir = os.path.dirname(sys.argv[0])
project_dir = os.path.abspath(os.path.join(this_dir, ".."))
version_re = re.compile(r"(?P<head>^.*__version__\s*=\s*[\"']?)(?P<version>\d+\.\d+\.\d+)(?P<tail>[\"']?.*$)")
for parent, folders, files in os.walk(project_dir):
# prune hidden
folders[:] = (f for f in folders if not f.startswith("."))
files[:] = (f for f in files if not f.startswith("."))
for file_name in files:
if file_name.endswith((".py", ".md")):
file_path = os.path.join(parent, file_name)
content = None
with open(file_path, "r") as f:
content = f.read()
new_content = []
for line_number, line in enumerate(content.splitlines(), start=1):
found = version_re.search(line)
if found:
version_str = found.group("version")
major, minor, batch = (int(t) for t in version_str.split("."))
next_major, next_minor, next_batch = major, minor, batch
print('{file_path} [line: {line_number}] __version__ = "{major}.{minor}.{batch}"'.format(**locals()))
bump_version = input("do you wish to bump? (a=add to version) (y/n/a)").strip().lower()
if bump_version in {"y", "yes"}:
# yes
next_batch += 1
elif bump_version in {"a", "add"}:
# add to version
next_major += max(0, int(input("major +") or "0"))
next_minor += max(0, int(input("minor +") or "0"))
next_batch += max(0, int(input("batch +") or "0"))
else:
# default is no change
pass
new_line = version_re.sub(r"\g<head>{next_major}.{next_minor}.{next_batch}\g<tail>".format(**locals()), line)
print(new_line)
new_content.append(new_line)
else:
new_content.append(line)
new_file_path = file_path +"_new"
with open_new(new_file_path, exists_ok=False) as f:
for line in new_content:
f.write((line + "\n").encode("utf-8"))
# remove old file
os.remove(file_path)
# os.rename(file_path, file_path + "_bu")
# rename new file
os.rename(new_file_path, file_path)
| StarcoderdataPython |
112730 | '''
Assign stellar mass/magnitude to subhalos via abundance matching.
Masses in log {M_sun}, luminosities in log {L_sun / h^2}, distances in {Mpc comoving}.
'''
# system -----
#from __future__ import division
import numpy as np
from numpy import log10, Inf
from scipy import integrate, interpolate, ndimage
# local -----
#from visualize import plot_sm
try:
from utilities import utility as ut
except ImportError:
pass
def assign(sub, m_kind='m.star', scat=0, dis_mf=0.007, source='', sham_prop='m.max', zis=None):
'''
Assign Mag_r or M_star via abundance matching.
Import catalog of subhalo [at snapshot], mass kind (mag.r, m.star),
1-sigma mass scatter at fixed sham prop [dex], disruption mass fraction (for both cens & sats),
mass source, property to abundance match against, [snapshot index[s]].
'''
if isinstance(sub, list):
if zis is None:
raise ValueError('subhalo catalog is a tree list, but no input snapshot index[s]')
elif isinstance(sub, dict):
if zis is not None:
raise ValueError('input snapshot index[s], but input catalog of subhalo at snapshot')
sub = [sub]
zis = [0]
subz = sub[zis[0]]
vol = subz.info['box.length'] ** 3
print('Box Length', subz.info['box.length'])
print('Box Hubble', subz.Cosmo['hubble'])
zis = ut.array.arrayize(zis)
if m_kind == 'm.star':
if not source:
source = 'li-drory-march'
redshift = subz.snap['z']
if redshift < 0.1:
redshift = 0.1
MF = SMFClass(source, redshift, scat, subz.Cosmo['hubble'])
elif m_kind == 'mag.r':
if source == 'cool_ages':
redshift = subz.snap['z']
if redshift < 0.1:
redshift = 0.1
MF = LFClass(source, scat, subz.Cosmo['hubble'], redshift)
else:
if not source:
source = 'blanton'
MF = LFClass(source, scat, subz.Cosmo['hubble'])
else:
raise ValueError('not recognize m_kind = %s' % m_kind)
for zi in zis:
subz = sub[zi]
subz[m_kind] = np.zeros(subz[sham_prop].size, np.float32)
if m_kind == 'm.star':
z = subz.snap['z']
if z < 0.1:
z = 0.1
MF.initialize_redshift(z)
elif m_kind == 'mag.r':
if source == 'cool_ages':
z = subz.snap['z']
if z < 0.1:
z = 0.1
MF.initialize_redshift(z)
# maximum number of objects in volume to assign given SMF/LF threshold
num_max = int(round(MF.numden(MF.mmin) * vol))
sis = ut.array.elements(subz[sham_prop], [0.001, Inf])
if dis_mf:
sis = ut.array.elements(subz['m.frac.min'], [dis_mf, Inf], sis)
siis_sort = np.argsort(subz[sham_prop][sis]).astype(sis.dtype)[::-1][:num_max]
num_sums = ut.array.arange_length(num_max) + 1
if scat:
if m_kind == 'm.star':
scats = np.random.normal(np.zeros(num_max), MF.scat).astype(np.float32)
elif m_kind == 'mag.r':
scats = np.random.normal(np.zeros(num_max), 2.5 * MF.scat).astype(np.float32)
#print MF.m_scat(num_sums / vol) + scats
subz[m_kind][sis[siis_sort]] = MF.m_scat(num_sums / vol) + scats
else:
subz[m_kind][sis[siis_sort]] = MF.m(num_sums / vol)
class SMFClass:
'''
Relate number density [dnumden / dlog(M_star/M_sun)] <-> stellar mass [log10(M_star/M_sun)]
using fits to observed stellar mass functions.
All SMFs assume input Hubble constant.
'''
def __init__(self, source='li-march', redshift=0.1, scat=0, hubble=0.7):
'''
Import SMF source, redshift, log scatter in M_star at fixed Msub.
'''
self.source = source
self.scat = scat
self.hubble = hubble
if source == 'li':
'''
Li & White 2009. z = 0.1 from SDSS. Chabrier IMF. Complete to 1e8 M_sun/h^2.
'''
self.redshifts = np.array([0.1])
self.mchars = np.array([10.525]) - 2 * log10(hubble) # {M_sun}
self.amplitudes = np.array([0.0083]) * hubble ** 3 # {Mpc ^ -3 / log(M/M_sun)}
self.slopes = np.array([-1.155])
self.initialize_redshift(redshift)
elif source == 'baldry':
'''
Baldry et al 2008. z = 0.1 from SDSS. diet Salpeter IMF = 0.7 Salpeter.
Complete to 1e8 M_sun.
'''
h_them = 0.7 # their assumed hubble constant
self.redshifts = np.array([0.1])
# covert to Chabrier
self.mchars = (np.array([10.525]) + 2 * log10(h_them / hubble) + log10(1 / 1.6 / 0.7))
self.amplitudes = np.array([0.00426]) * (hubble / h_them) ** 3
self.amplitudes2 = np.array([0.00058]) * (hubble / h_them) ** 3
self.slopes = np.array([-0.46])
self.slopes2 = np.array([-1.58])
self.initialize_redshift(redshift)
elif source == 'cole-march':
'''
Marchesini et al 2009. 1.3 < z < 4.0. Kroupa IMF.
z = 0.1 from Cole et al 2001 (2dF), converting their Salpeter to Kroupa.
*** In order to use out to z ~ 4, made evolution flat from z = 3.5 to 4.
'''
self.redshifts = np.array([0.1, 1.6, 2.5, 3.56, 4.03])
self.mchars = np.array([10.65, 10.60, 10.65, 11.07, 11.07]) - 2 * log10(hubble)
# converted to {Mpc ^ -3 dex ^ -1}
self.amplitudes = np.array([90.00, 29.65, 11.52, 1.55, 1.55]) * 1e-4 * hubble ** 3
self.slopes = np.array([-1.18, -1.00, -1.01, -1.39, -1.39])
self.make_splines()
self.initialize_redshift(redshift)
elif source == 'li-march':
'''
Marchesini et al 2009, using Li & White at z = 0.1.
'''
self.redshifts = np.array([0.1, 1.6, 2.5, 3.56, 4.03])
self.mchars = np.array([10.525, 10.60, 10.65, 11.07, 11.07]) - 2 * log10(hubble)
self.amplitudes = (np.array([0.0083, 0.002965, 0.00115, 0.000155, 0.000155]) *
hubble ** 3)
self.slopes = np.array([-1.155, -1.00, -1.01, -1.39, -1.39])
self.make_splines()
self.initialize_redshift(redshift)
elif source == 'li-march-extreme':
'''
More extreme version of Marchesini et al 2009, using Li & White at z = 0.1.
'''
self.redshifts = np.array([0.1, 1.6, 2.5, 3.56, 4.03])
self.mchars = np.array([10.525, 10.60, 10.65, 11.07, 11.07]) - 2 * log10(hubble)
self.amplitudes = (np.array([0.0083, 0.00001, 0.00001, 0.00001, 0.000001]) *
hubble ** 3)
self.slopes = np.array([-1.155, -1.00, -1.01, -1.39, -1.39])
self.make_splines()
self.initialize_redshift(redshift)
elif source == 'constant-li':
'''
Li & White at all redshifts
'''
self.redshifts = np.arange(0.1, 4.03, 0.1)
self.mchars = np.repeat(10.525, len(self.redshifts)) - 2 * log10(hubble)
self.amplitudes = (np.repeat(0.0083, len(self.redshifts))* hubble ** 3)
self.slopes = np.repeat(-1.155, len(self.redshifts))
self.make_splines()
self.initialize_redshift(redshift)
elif source == 'fontana':
'''
Fontana et al 2006. 0.4 < z < 4 from GOODS-MUSIC. Salpeter IMF.
z = 0.1 from Cole et al 2001.
'''
h_them = 0.7 # their assumed hubble constant
self.redshifts = np.array([0.1, 4.0]) # store redshift range of validity
self.amplitude0 = 0.0035 * (hubble / h_them) ** 3 # to {Mpc ^ -3 / log10(M/M_sun)}
self.amplitude1 = -2.2
self.slope0 = -1.18
self.slope1 = -0.082
self.mchar0 = 11.16 # log10(M/M_sun)
self.mchar1 = 0.17 # log10(M/M_sun)
self.mchar2 = -0.07 # log10(M/M_sun)
# convert to my hubble & Chabrier IMF
self.mchar0 += 2 * log10(h_them / hubble) - log10(1.6)
self.initialize_redshift(redshift)
elif source == 'li-drory-march':
'''
Drory et al 2009. 0.3 < z < 1.0 from COSMOS.
Chabrier IMF limited to 0.1 - 100 M_sun.
Complete to (8.0, 8.6, 8.9, 9.1) M_sun/h^2 at z = (0.3, 0.5, 0.7, 0.9).
Anchor to Li & White at z = 0.1, Marchesini et al at higher redshift.
See Ilbert et al 2010 for alternate COSMOS version.
'''
h_them = 0.72 # their assumed hubble constant
self.redshifts = np.array([0.3, 0.5, 0.7, 0.9])
self.mchars = np.array([10.90, 10.91, 10.95, 10.92]) + 2 * log10(h_them / hubble)
# convert to [Mpc ^ -3 dex^-1]
self.amplitudes = (np.array([0.00289, 0.00174, 0.00216, 0.00294]) *
(hubble / h_them) ** 3)
self.slopes = np.array([-1.06, -1.05, -0.93, -0.91])
self.mchars2 = np.array([9.63, 9.70, 9.75, 9.85]) + 2 * log10(h_them / hubble)
self.amplitudes2 = (np.array([0.00180, 0.00143, 0.00289, 0.00212]) *
(hubble / h_them) ** 3)
self.slopes2 = np.array([-1.73, -1.76, -1.65, -1.65])
# add li & white
self.redshifts = np.append(0.1, self.redshifts)
self.mchars = np.append(10.525 - 2 * log10(hubble), self.mchars)
self.amplitudes = np.append(0.0083 * hubble ** 3, self.amplitudes)
self.slopes = np.append(-1.155, self.slopes)
self.mchars2 = np.append(self.mchars2[0], self.mchars2)
self.amplitudes2 = np.append(0, self.amplitudes2)
self.slopes2 = np.append(self.slopes2[0], self.slopes2)
# add marchesini et al
h_them = 0.7 # their assumed hubble constant
self.redshifts = np.append(self.redshifts, [1.6, 2.5, 3.56, 4.03])
self.mchars = np.append(self.mchars,
np.array([10.60, 10.65, 11.07, 11.07]) - 2 * log10(hubble))
self.amplitudes = np.append(self.amplitudes,
np.array([0.002965, 0.00115, 0.000155, 0.000155]) *
hubble ** 3)
self.slopes = np.append(self.slopes, [-1.00, -1.01, -1.39, -1.39])
self.mchars2 = np.append(self.mchars2, np.zeros(4) + self.mchars2[0])
self.amplitudes2 = np.append(self.amplitudes2, np.zeros(4))
self.slopes2 = np.append(self.slopes2, np.zeros(4) + self.slopes2[0])
self.make_splines()
self.initialize_redshift(redshift)
elif source == 'li-drory-march_sameslope':
'''
Apply low-mass slope from Drory et al 2009 to Li & White, Marchesini et al.
'''
self.redshifts = np.array([0.1, 0.3, 0.5, 0.7, 0.9, 1.6, 2.5, 3.56, 4.03])
self.mchars = np.array([10.525, 10.61, 10.62, 10.66, 10.63, 10.60, 10.65, 11.07,
11.07] - 2 * log10(hubble))
self.amplitudes = np.array([0.0083, 0.00774, 0.00466, 0.00579, 0.00787, 0.00297,
0.00115, 0.000155, 0.000155]) * hubble ** 3
self.slopes = np.array([-1.155, -1.06, -1.05, -0.93, -0.91, -1.00, -1.01, -1.39, -1.39])
self.mchars2 = (np.array([9.35, 9.34, 9.41, 9.46, 9.56, 9.41, 9.46, 9.83, 9.83]) -
2 * log10(hubble))
self.amplitudes2 = np.array([0.00269, 0.00482, 0.00383, 0.00774, 0.00568, 0.000962,
0.000375, 0.0000503, 0.0000503]) * hubble ** 3
self.slopes2 = np.array([-1.70, -1.73, -1.76, -1.65, -1.65, -1.72, -1.74, -2.39, -2.39])
self.make_splines()
self.initialize_redshift(redshift)
elif source == 'perez':
'''
Perez-Gonzalez et al 2008. 0.1 < z < 4.0 from Spitzer, Hubble, Chandra.
Salpeter IMF.
Complete to (8, 9.5, 10, 11) M_star at z = (0, 1, 2, 3).
'''
h_them = 0.7 # their assumed hubble constant
self.redshifts = np.array([0.1, 0.3, 0.5, 0.7, 0.9, 1.15, 1.45, 1.8, 2.25, 2.75, 3.25,
3.75])
self.mchars = np.array([11.16, 11.20, 11.26, 11.25, 11.27, 11.31, 11.34, 11.40, 11.46,
11.34, 11.33, 11.36]) + 2 * log10(h_them / hubble)
# convert to Chabrier IMF
self.mchars -= log10(1.6)
# convert to [Mpc ^ -3 dex ^ -1]
self.amplitudes = (10 ** np.array([-2.47, -2.65, -2.76, -2.82, -2.91, -3.06, -3.27,
- 3.49, -3.69, -3.64, -3.74, -3.94]) *
(hubble / h_them) ** 3)
self.slopes = np.array([-1.18, -1.19, -1.22, -1.26, -1.23, -1.26, -1.29, -1.27, -1.26,
- 1.20, -1.14, -1.23])
self.make_splines()
self.initialize_redshift(redshift)
else:
raise ValueError('not recognize source = %s' % source)
def make_splines(self):
'''
Make spline fits to SMF fit parameters v redshift.
Use 1st order spline (k) to avoid ringing.
'''
self.mchar_z_spl = interpolate.splrep(self.redshifts, self.mchars, k=1)
self.slope_z_spl = interpolate.splrep(self.redshifts, self.slopes, k=1)
self.amplitude_z_spl = interpolate.splrep(self.redshifts, self.amplitudes, k=1)
if self.source in ('li-drory-march', 'li-drory-march_sameslope'):
self.mchar2_z_spl = interpolate.splrep(self.redshifts, self.mchars2, k=1)
self.slope2_z_spl = interpolate.splrep(self.redshifts, self.slopes2, k=1)
self.amplitude2_z_spl = interpolate.splrep(self.redshifts, self.amplitudes2, k=1)
def initialize_redshift(self, redshift=0.1):
'''
Make spline to get mass from number density.
Import redshift.
Find SMF fit parameters at redshift, correcting amplitude by * log(10) & slope
by + 1 to make dndm call faster.
'''
if redshift < self.redshifts.min() - 1e-5 or redshift > self.redshifts.max() + 1e-5:
raise ValueError('z = %.2f out of range for %s' % (redshift, self.source))
self.redshift = redshift
if self.source in ('li'):
self.m_char = self.mchars[0]
self.amplitude = self.amplitudes[0] * np.log(10)
self.slope = self.slopes[0] + 1
elif self.source in ('baldry'):
self.m_char = self.mchars[0]
self.mchar2 = self.mchars[0]
self.amplitude = self.amplitudes[0] * np.log(10)
self.amplitude2 = self.amplitudes2[0] * np.log(10)
self.slope = self.slopes[0] + 1
self.slope2 = self.slopes2[0] + 1
elif self.source in ('cole-march', 'li-march', 'perez', 'constant-li', 'li-march-extreme'):
self.m_char = interpolate.splev(redshift, self.mchar_z_spl)
self.amplitude = interpolate.splev(redshift, self.amplitude_z_spl) * np.log(10)
self.slope = interpolate.splev(redshift, self.slope_z_spl) + 1
elif self.source == 'fontana':
self.m_char = self.mchar0 + self.mchar1 * redshift + self.mchar2 * redshift ** 2
self.amplitude = (self.amplitude0 * (1 + redshift) ** self.amplitude1) * np.log(10)
self.slope = (self.slope0 + self.slope1 * redshift) + 1
elif self.source in ('li-drory-march', 'li-drory-march_sameslope'):
self.m_char = interpolate.splev(redshift, self.mchar_z_spl)
self.amplitude = interpolate.splev(redshift, self.amplitude_z_spl) * np.log(10)
self.slope = interpolate.splev(redshift, self.slope_z_spl) + 1
self.mchar2 = interpolate.splev(redshift, self.mchar2_z_spl)
self.amplitude2 = interpolate.splev(redshift, self.amplitude2_z_spl) * np.log(10)
self.slope2 = interpolate.splev(redshift, self.slope2_z_spl) + 1
self.make_numden_m_spline(self.redshift, self.scat)
def dndm(self, m_star):
'''
Compute d(num-den) / d(log m) = ln(10) * amplitude * (10^(m_star - m_char)) ** (1 + slope) *
exp(-10^(m_star - m_char)).
Import stellar mass.
'''
m_rats = 10 ** (m_star - self.m_char)
if 'drory' in self.source or self.source == 'baldry':
dm2s = 10 ** (m_star - self.mchar2)
return (self.amplitude * m_rats ** self.slope * np.exp(-m_rats) +
self.amplitude2 * dm2s ** self.slope2 * np.exp(-dm2s))
else:
return self.amplitude * m_rats ** self.slope * np.exp(-m_rats)
def numden(self, m_min, m_max=14):
'''
Compute number density within range.
Import stellar mass range.
'''
return integrate.quad(self.dndm, m_min, m_max)[0]
def make_numden_m_spline(self, redshift=0.1, scat=0):
'''
Make splines to relate d(num-den) / d[log]m & num-den(> m) to m.
Import redshift (if want to change), mass scatter [dex].
'''
iter_num = 30
if redshift != self.redshift:
self.initialize_redshift(redshift)
if scat != self.scat:
self.scat = scat
dm = 0.01
dm_scat_lo = 3 * scat # extend fit for deconvolute b.c.'s
dm_scat_hi = 0.5 * scat # extend fit for deconvolute b.c.'s
self.mmin = 7.3
self.mmax = 12.3
m_stars = np.arange(self.mmin - dm_scat_lo, self.mmax + dm_scat_hi, dm, np.float32)
numdens = np.zeros(m_stars.size)
dndms = np.zeros(m_stars.size)
for mi in xrange(m_stars.size):
# make sure numdens are monotonically decreasing even if = -infinity
numdens[mi] = self.numden(m_stars[mi]) + 1e-9 * (1 - mi * 0.001)
dndms[mi] = self.dndm(m_stars[mi]) + 1e-9 * (1 - mi * 0.001)
# make no scatter splines
self.log_numden_m_spl = interpolate.splrep(m_stars, log10(numdens))
self.m_log_numden_spl = interpolate.splrep(log10(numdens)[::-1], m_stars[::-1])
# at high z, smf not monotonically decreasing, so spline not work on below
# self.m_log_dndm_spl = interpolate.splrep(log10(dndms)[::-1], m_stars[::-1])
# make scatter splines
if scat:
# deconvolve osbserved smf assuming scatter to find unscattered one
dndms_scat = ut.math.deconvolute(dndms, scat, dm, iter_num)
# chop off lower boundaries, unreliable
m_stars = m_stars[int(dm_scat_lo / dm):]
dndms_scat = dndms_scat[int(dm_scat_lo / dm):]
# find spline to integrate over
self.dndm_m_scat_spl = interpolate.splrep(m_stars, dndms_scat)
numdens_scat = np.zeros(m_stars.size)
for mi in xrange(m_stars.size):
numdens_scat[mi] = interpolate.splint(m_stars[mi], m_stars.max(),
self.dndm_m_scat_spl)
numdens_scat[mi] += 1e-9 * (1 - mi * 0.001)
self.log_numden_m_scat_spl = interpolate.splrep(m_stars, log10(numdens_scat))
self.m_log_numden_scat_spl = interpolate.splrep(log10(numdens_scat)[::-1],
m_stars[::-1])
def m(self, num_den):
'''
Get mass at threshold.
Import threshold number density.
'''
return interpolate.splev(log10(num_den), self.m_log_numden_spl).astype(np.float32)
def m_scat(self, num_den):
'''
Get mass at threshold, using de-scattered source.
Import threshold number density.
'''
return interpolate.splev(log10(num_den), self.m_log_numden_scat_spl).astype(np.float32)
def m_dndm(self, dn_dm):
'''
Get mass at d(num-den)/d[log]m.
Import d(num-den) / d[log]m.
'''
return interpolate.splev(log10(dn_dm), self.m_log_dndm_spl)
def dndm_scat(self, m):
'''
Get d(num-den) / d[log]m at m, using de-scattered source.
Import mass.
'''
return interpolate.splev(m, self.dndm_m_scat_spl)
def numden_scat(self, m):
'''
Get num-den(>[log]m) at m, using de-scattered source.
Import mass.
'''
return 10 ** (interpolate.splev(m, self.log_numden_m_scat_spl))
class LFClass(SMFClass):
'''
Relate number density [Mpc ^ -3] <-> magnitude/luminosity using spline fit to luminosity
functions.
Import spline querying functions from SMFClass.
'''
def __init__(self, source='blanton', scat=0, hubble=0.7, redshift=0.1):
'''
Import source, log-normal scatter.
'''
self.source = source
self.scat = scat
self.hubble = hubble
if source == 'norberg':
# Norberg et al 2002: 2dF r-band at z ~ 0.1.
self.m_char = -19.66
self.amplitude = 1.61e-2 * hubble ** 3 # Mpc ^ -3
self.slope = -1.21
elif source == 'blanton':
# Blanton et al 03: SDSS r-band z ~ 0.1.
self.m_char = -20.44
self.amplitude = 1.49e-2 * hubble ** 3 # Mpc ^ -3
self.slope = -1.05
elif source == 'sheldon':
# Sheldon et al 07: SDSS i-band z = 0.25. Valid for Mag < -19.08 (0.19L*).
self.m_char = -20.9 # Hansen et al 09 catalog has -20.8
self.amplitude = 1.02e-2 * hubble ** 3 # Mpc ^ -3
self.slope = -1.21
elif source == 'cool_ages':
# Cool et al 2012: AGES.
self.redshifts = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.65])
self.mchars = np.array([-20.58, -20.81, -20.81, -20.99, -21.29, -21.38])
self.amplitudes = (np.array([1.59e-2, 1.52e-2, 1.24e-2, 1.44e-2, 1.08e-2, 1.05e-2]) * hubble ** 3) # Mpc ^ -3
self.slopes = np.repeat(-1.05, len(self.redshifts))
self.make_splines()
self.initialize_redshift(redshift)
else:
raise ValueError('not recognize source = %s in LFClass' % source)
if source != 'cool_ages':
self.make_numden_m_spline(scat, redshift=None)
def dndm(self, mag):
'''
Get d(num-den) / d(mag).
Import (positive) magnitude.
'''
mag *= -1.
return (np.log(10) / 2.5 * self.amplitude *
10 ** ((self.slope + 1) / 2.5 * (self.m_char - mag)) *
np.exp(-10 ** ((self.m_char - mag) / 2.5)))
def numden(self, m_min, m_max=25):
'''
Get number density within range.
Import (positive) magnitude range.
'''
return integrate.quad(self.dndm, m_min, m_max)[0]
def initialize_redshift(self, redshift=0.1):
'''
Make spline to get mass from number density.
Import redshift.
Find SMF fit parameters at redshift, correcting amplitude by * log(10) & slope
by + 1 to make dndm call faster.
'''
if redshift < self.redshifts.min() - 1e-5:# or redshift > self.redshifts.max() + 1e-5:
raise ValueError('z = %.2f out of range for %s' % (redshift, self.source))
self.redshift = redshift
self.m_char = interpolate.splev(redshift, self.mchar_z_spl, ext=0)
self.amplitude = interpolate.splev(redshift, self.amplitude_z_spl, ext=0)
self.slope = interpolate.splev(redshift, self.slope_z_spl, ext=0)
self.make_numden_m_spline(scat = self.scat, redshift = self.redshift)
def make_numden_m_spline(self, scat=0, redshift=0.1):
'''
Make splines to relate d(num-den)/d(mag) & num-den(> mag) to mag.
Import scatter [dex].
'''
try:
if redshift != self.redshift:
self.initialize_redshift(redshift)
except AttributeError:
pass
if scat != self.scat:
self.scat = scat # convert scatter in log(lum) to scatter in magnitude
mag_scat = 2.5 * self.scat
deconvol_iter_num = 20
dmag = 0.01
dmag_scat_lo = 2 * mag_scat # extend fit for b.c.'s of deconvolute
dmag_scat_hi = 1 * mag_scat
self.mmin = 17.0
self.mmax = 23.3
mags = np.arange(self.mmin - dmag_scat_lo, self.mmax + dmag_scat_hi, dmag, np.float32)
numdens = np.zeros(mags.size)
dndms = np.zeros(mags.size)
for mi in xrange(len(mags)):
numdens[mi] = np.abs(self.numden(mags[mi]))
dndms[mi] = self.dndm(mags[mi])
#print 'numden ', numdens[:10]
#print mags[:10]
# make no scatter splines
self.log_numden_m_spl = interpolate.splrep(mags, log10(numdens))
self.dndm_m_spl = interpolate.splrep(mags, dndms)
self.m_log_numden_spl = interpolate.splrep(log10(numdens)[::-1], mags[::-1])
# make scatter splines
if self.scat:
# deconvolve observed lf assuming scatter to find unscattered one
dndms_scat = ut.math.deconvolute(dndms, mag_scat, dmag, deconvol_iter_num)
# chop off boundaries, unreliable
mags = mags[dmag_scat_lo / dmag:-dmag_scat_hi / dmag]
dndms_scat = dndms_scat[dmag_scat_lo / dmag:-dmag_scat_hi / dmag]
# find spline to integrate over
self.dndm_m_scat_spl = interpolate.splrep(mags, dndms_scat)
numdens_scat = np.zeros(mags.size)
for mi in xrange(mags.size):
numdens_scat[mi] = np.abs(interpolate.splint(mags[mi], mags.max(), self.dndm_m_scat_spl))
numdens_scat[mi] += 1e-9 * (1 - mi * 0.001)
self.log_numden_m_scat_spl = interpolate.splrep(mags, log10(numdens_scat))
self.m_log_numden_scat_spl = interpolate.splrep(log10(numdens_scat)[::-1], mags[::-1])
#===================================================================================================
# test/plot
#===================================================================================================
def test_sham(sub, zi, m_kind, m_min, m_max, scat=0.2, mfracmin=0, m_wid=0.1, source='',
sham_kind='m.max'):
'''
Plot mass functions.
Import subhalo catalog, snapshot index,
mass kind (m.star, mag.r) & range & scatter at fixed m_max,
disruption mass fraction, bin size, GMF source, subhalo property to assign against.
'''
m_wid_scat = 3 * scat
m_bins = np.arange(m_min - m_wid_scat, m_max + m_wid_scat, m_wid, np.float32) + 0.5 * m_wid
if m_kind == 'm.star':
if not source:
source = 'li-march'
Sf = SMFClass(source, sub.snap['z'][zi], scat, sub.Cosmo['hubble'])
elif m_kind == 'mag.r':
if not source:
source = 'blanton'
Sf = LFClass(source, scat, sub.Cosmo['hubble'])
# analytic gmf, no scatter
dndm_anal = Sf.dndm(m_bins)
if scat:
# convolve above gmf with scatter, then deconvolve, to see if can recover
dndm_anal_conv = ndimage.filters.gaussian_filter1d(dndm_anal, Sf.scat / m_wid)
dndm_anal_decon = ut.math.deconvolute(dndm_anal_conv, Sf.scat, m_wid, 30)
# mean (underlying) relation
dndm_anal_pre = Sf.dndm_scat(m_bins)
# observed gmf after convolution (no random noise)
dndm_anal_recov = ndimage.filters.gaussian_filter1d(dndm_anal_pre, Sf.scat / m_wid)
# cut out extremes, unreliable
cutoff = int(round(m_wid_scat / m_wid))
if cutoff > 0:
m_bins = m_bins[cutoff:-cutoff]
dndm_anal = dndm_anal[cutoff:-cutoff]
dndm_anal_conv = dndm_anal_conv[cutoff:-cutoff]
dndm_anal_pre = dndm_anal_pre[cutoff:-cutoff]
dndm_anal_decon = dndm_anal_decon[cutoff:-cutoff]
dndm_anal_recov = dndm_anal_recov[cutoff:-cutoff]
m_bins -= 0.5 * m_wid
# assign mass to subhalo, with or without scatter (random noise at high mass end)
assign(sub, zi, m_kind, scat, mfracmin, source, sham_kind)
ims = ut.bin.idigitize(sub[zi][m_kind], m_bins)
gal_nums = np.zeros(m_bins.size)
for mi in xrange(m_bins.size):
gal_nums[mi] = ims[ims == mi].size
print('bin count min %d' % np.min(gal_nums))
dndm_sham = gal_nums / sub.info['box.length'] ** 3 / m_wid
print('assign ratio ave %.3f' % np.mean(abs(dndm_sham / dndm_anal)))
if scat:
print('recov ratio ave %.3f' % np.mean(abs(dndm_anal_recov / dndm_anal)))
# plot ----------
Plot = plot_sm.PlotClass()
Plot.set_axis('lin', 'lin', [m_min, m_max], log10(dndm_anal))
Plot.make_window()
Plot.draw('c', m_bins, log10(dndm_anal))
Plot.draw('c', m_bins, log10(dndm_sham), ct='red')
if scat:
Plot.draw('c', m_bins, log10(dndm_anal_pre), ct='green')
Plot.draw('c', m_bins, log10(dndm_anal_recov), ct='blue')
def plot_source_compare(sources=['li-march', 'perez'], redshifts=0.1, m_lim=[8.0, 11.7], m_wid=0.1,
plot_kind='value'):
'''
Plot each source at each redshift.
Import mass functions, redshifts, plotting mass range & bin width, plot kind (value, ratio).
'''
sources = ut.array.arrayize(sources)
redshifts = ut.array.arrayize(redshifts)
Mbin = ut.bin.BinClass(m_lim, m_wid)
log_dn_dlogms = []
for src_i in xrange(sources.size):
log_dn_dlogms_so = []
for zi in xrange(redshifts.size):
Smf = SMFClass(sources[src_i], redshifts[zi], scat=0, hubble=0.7)
log_dn_dlogms_so.append(log10(Smf.dndm(Mbin.mids)))
log_dn_dlogms.append(log_dn_dlogms_so)
# plot ----------
Plot = plot_sm.PlotClass()
if plot_kind == 'ratio':
ys = 10 ** (log_dn_dlogms - log_dn_dlogms[0][0])
Plot.axis.space_y = 'lin'
elif plot_kind == 'value':
ys = log_dn_dlogms
Plot.axis.space_y = 'log'
Plot.set_axis('log', '', Mbin.mids, ys, tick_lab_kind='log')
Plot.set_axis_label('m.star', 'dn/dlog(M_{star}) [h^{3}Mpc^{-3}]')
Plot.make_window()
Plot.set_label(pos_y=0.4)
for src_i in xrange(sources.size):
for zi in xrange(redshifts.size):
Plot.draw('c', Mbin.mids, log_dn_dlogms[src_i][zi], ct=src_i, lt=zi)
Plot.make_label(sources[src_i] + ' z=%.1f' % redshifts[zi])
# add in cosmos at z = 0.35
'''
cosmos = [[8.8, 0.015216 , 1.250341e-03],
[9, 0.01257, 1.210321e-03],
[9.2, 0.01009, 1.047921e-03],
[9.4, 0.007941, 8.908445e-04],
[9.6, 0.006871, 7.681928e-04],
[9.8, 0.005688, 6.825634e-04],
[10, 0.005491, 6.136567e-04],
[10.2, 0.004989, 6.004422e-04],
[10.4, 0.00478, 5.917784e-04],
[10.6, 0.00423, 5.851342e-04],
[10.8, 0.003651, 4.919025e-04],
[11, 0.002253, 3.562664e-04],
[11.2, 0.001117, 2.006811e-04],
[11.4, 0.0004182, 8.486049e-05],
[11.6, 8.365e-05, 2.802892e-05],
[11.8, 1.195e-05, 8.770029e-06]]
'''
# cosmos at z = 0.87
cosmos = [[9.8, 0.005377, 3.735001e-04],
[10, 0.004206, 3.443666e-04],
[10.2, 0.003292, 3.235465e-04],
[10.4, 0.003253, 3.318173e-04],
[10.6, 0.002985, 3.198681e-04],
[10.8, 0.002994, 2.735925e-04],
[11, 0.002218, 1.922526e-04],
[11.2, 0.001202, 1.067172e-04],
[11.4, 0.0005681, 3.983348e-05],
[11.6, 0.0001837, 1.195015e-05],
[11.8, 4.214e-05, 3.200856e-06],
[12, 1.686e-06, 7.463160e-07]]
cosmos = np.array(cosmos)
cosmos = cosmos.transpose()
cosmos[1] = log10(cosmos[1] * 0.72 ** -3)
# Plot.draw('pp', cosmos[0], cosmos[1], pt=123)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.