hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0f814ba26527117e6b84e9f229772b778a6cda9e | 537 | py | Python | host/Productivity.py | chehansivaruban/Cyber---SDGP | 6676c284c34c4c15279bf3e5cfb73fd4e5b7391a | [
"CC0-1.0"
] | 1 | 2021-05-18T10:55:32.000Z | 2021-05-18T10:55:32.000Z | host/Productivity.py | chehansivaruban/Cyber---SDGP | 6676c284c34c4c15279bf3e5cfb73fd4e5b7391a | [
"CC0-1.0"
] | null | null | null | host/Productivity.py | chehansivaruban/Cyber---SDGP | 6676c284c34c4c15279bf3e5cfb73fd4e5b7391a | [
"CC0-1.0"
] | 2 | 2021-03-29T19:00:55.000Z | 2021-04-02T13:18:07.000Z |
class Productivity:
def __init__(self, irradiance, hours,capacity):
self.irradiance = irradiance
self.hours = hours
self.capacity = capacity
def getUnits(self):
print(self.irradiance)
totalpower = 0
print(totalpower)
for i in self.irradiance:
power = int(self.capacity) * int(i) /1000
totalpower = totalpower+power
# units= (self.irradiance*self.area*self.hours)/1000
print(totalpower)
return totalpower
| 28.263158 | 61 | 0.590317 | 54 | 537 | 5.796296 | 0.388889 | 0.223642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024793 | 0.324022 | 537 | 18 | 62 | 29.833333 | 0.837466 | 0.09311 | 0 | 0.142857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.285714 | 0.214286 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0f81775989722fac86c0501dba1ff10529d51ca2 | 1,313 | py | Python | sample/all_methods/setNoteApplicationDataEntry.py | matthewayne/evernote-sdk-python | 53b0c1263d250b88a1810987b51bc9def586db02 | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2016-02-14T13:39:46.000Z | 2021-09-03T16:02:18.000Z | sample/all_methods/setNoteApplicationDataEntry.py | matthewayne/evernote-sdk-python | 53b0c1263d250b88a1810987b51bc9def586db02 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | sample/all_methods/setNoteApplicationDataEntry.py | matthewayne/evernote-sdk-python | 53b0c1263d250b88a1810987b51bc9def586db02 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2018-03-28T01:08:25.000Z | 2018-03-28T01:08:25.000Z | # Import the Evernote client
from evernote.api.client import EvernoteClient
# Define access token either:
# Developer Tokens (https://dev.evernote.com/doc/articles/dev_tokens.php)
# or OAuth (https://dev.evernote.com/doc/articles/authentication.php)
access_token = "insert dev or oauth token here"
# Setup the client
client = EvernoteClient(token = access_token, sandbox = True)
# Get note store object
note_store = client.get_note_store()
# GUID of the note to attach the application data to
note_guid = "insert note GUID to attach key-value storage to here"
# Value of the key for the storage
# 3rd party apps are only allowed 1
key = "your-consumer-key"
# the value of the application data entry
# containg a string arbitray length
value = "this is the value of the application data"
# Each note is given this 4kb map of arbitrary data, shared by all third-party applications.
# adding new data may cause the field's value to exceed the the 4kb limit.
# In this case, an instance of EDAMUserException is thrown with the BAD_DATA_FORMAT error code.
# Setting this value will overwrite any existing data
usn = note_store.setNoteApplicationDataEntry(note_guid, key, value)
print "Application data set for note with GUID, '%s' with the key '%s' and value '%s' (USN %s)" % (note_guid, key, value, usn)
| 37.514286 | 126 | 0.763899 | 210 | 1,313 | 4.719048 | 0.461905 | 0.036327 | 0.05449 | 0.038345 | 0.117053 | 0.117053 | 0 | 0 | 0 | 0 | 0 | 0.00363 | 0.160701 | 1,313 | 34 | 127 | 38.617647 | 0.895644 | 0.562072 | 0 | 0 | 0 | 0.111111 | 0.40681 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.111111 | null | null | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0f89c68a648bbf2920a3ee4f9f43df0ba5e7f56b | 800 | py | Python | sasquatch/error/exec.py | tmacro/s4 | 58729b61f56ff928c8fe928fb02816e5025ad588 | [
"BSD-3-Clause"
] | 6 | 2018-12-04T01:13:10.000Z | 2019-06-18T23:25:59.000Z | sasquatch/error/exec.py | tmacro/s4 | 58729b61f56ff928c8fe928fb02816e5025ad588 | [
"BSD-3-Clause"
] | 15 | 2018-12-01T00:13:41.000Z | 2021-06-01T23:05:08.000Z | sasquatch/error/exec.py | tmacro/sasquatch | 58729b61f56ff928c8fe928fb02816e5025ad588 | [
"BSD-3-Clause"
] | null | null | null | from .base import SQError, BaseErrorHelper
from .context import ContextAwareError
class ExecutionError(SQError):
'''raised when an error is encountered during script execution'''
class ExecErrorHelper(BaseErrorHelper):
_default = ExecutionError
@staticmethod
def throw(cls = ExecutionError, **kwargs):
if 'ctx' in kwargs and kwargs['ctx'] is not None:
ctx = kwargs.get('ctx')
return BaseErrorHelper.throw(cls, **ctx._asdict(), **kwargs)
class MissingKeywordError(ExecutionError):
'''Raised when a required keyword argument can not be collected'''
_msg = 'Unable to collect keyword {keyword}'
class InvalidFilePathError(ContextAwareError, ExecutionError):
'''Raised when a file path passed as input does not exist or is invalid'''
_msg = 'Path {filepath} is not a valid location'
| 34.782609 | 75 | 0.76125 | 98 | 800 | 6.173469 | 0.602041 | 0.049587 | 0.079339 | 0.082645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.14125 | 800 | 22 | 76 | 36.363636 | 0.88064 | 0.23625 | 0 | 0 | 0 | 0 | 0.139496 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.142857 | 0 | 0.785714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
0f8bf192c00ccdfe2c080948f76d42451737fa89 | 9,816 | py | Python | src/tools/cluster/cluster.py | uct-cbio/galaxy-tools | b9422088dc41099fdde1edaf9c014825c8ee1cbf | [
"MIT"
] | null | null | null | src/tools/cluster/cluster.py | uct-cbio/galaxy-tools | b9422088dc41099fdde1edaf9c014825c8ee1cbf | [
"MIT"
] | null | null | null | src/tools/cluster/cluster.py | uct-cbio/galaxy-tools | b9422088dc41099fdde1edaf9c014825c8ee1cbf | [
"MIT"
] | null | null | null | #!/usr/bin/python
# EST clustering
# Currently clustering are done using the wcd clustering algorithm. Other algorithms will later be supported.
# The wcd program does not use qual score for clustering. If seq qual scores are specified the
# wcd clustered information will be used to create cluster qual scores. This quality scores can then be used as
# input for further processing (e.g. assembly).
import sys, re, string, os, subprocess, commands
from optparse import OptionParser
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import UnknownSeq
def main():
usage = "usage: %prog -s SEQ_FILE -S QUAL_FILE -c CLUSTER_SEQ_FILE -C CLUSTER_QUAL_FILE -i CLUSTER_ID -r CLUSTER_SUMMARY_FILE -p CLUSTER_ALGORITHM -w PROG_WORK_DIR -d"
parser = OptionParser(usage=usage)
parser.add_option("-s", "--seq", dest="seq_file", help="File with sequences to be clustered.")
parser.add_option("-S", "--qual", dest="qual_file", help="Quality scores for the input sequences (optional).")
parser.add_option("-c", "--cluster_seq", dest="cluster_seq_file", help="Zipped FASTA sequences file. Archive contains cluster file with corresponding clustered FASTA sequence.")
parser.add_option("-C", "--cluster_qual", dest="cluster_qual_file", help="Zipped QUAL scores file. Archive contains cluster files with corresponding clustered QUAL scores (optional).")
parser.add_option("-i", "--cluster_id", dest="cluster_id", help="Cluster id")
parser.add_option("-r", "--cluster_summary", dest="cluster_summary_file", help="Cluster summary report.")
parser.add_option("-p", "--cluster_algorithm", dest="cluster_algorithm", help="The cluster algorithm (currently wcd only).")
parser.add_option("-w", "--prog_work", dest="prog_work_dir", help="Program working directory, contains all processed files.")
parser.add_option("-d", "--delete_program_work", action="store_true", dest="delete_program_work", default=False, help="Delete program working directory after program has completed.")
(options, args) = parser.parse_args()
if not options.seq_file:
print "Please specify the FASTA sequence file (-s SEQ_FILE)"
return - 1
if not options.cluster_seq_file:
print "Please specify the zipped cluster FASTA sequence file (-c CLUSTER_SEQ_FILE)"
return - 3
if not options.cluster_id:
print "Please specify the cluster id (-i CLUSTER_ID)"
return - 5
if not options.cluster_summary_file:
print "Please specify the cluster summary report file (-r CLUSTER_SUMMARY_FILE)"
return - 6
if not options.cluster_algorithm:
print "Please specify the cluster algorithm (-p CLUSTER_ALGORITHM)"
return - 7
if not options.prog_work_dir:
print "Please specify the program working directory (-w PROG_WORK_DIR)"
return - 8
if (len(args) > 0):
print "Too many input arguments"
return - 9
# Do some initialization
print "Initialize..."
root_dir = os.getcwd()
cluster_id = options.cluster_id
# Get full file paths
seq_file = os.path.abspath(options.seq_file)
if options.qual_file:
qual_file = os.path.abspath(options.qual_file)
cluster_seq_file = os.path.abspath(options.cluster_seq_file)
if options.cluster_qual_file:
cluster_qual_file = os.path.abspath(options.cluster_qual_file)
cluster_summary_file = os.path.abspath(options.cluster_summary_file)
prog_work_dir = os.path.abspath(options.prog_work_dir)
timestamp = commands.getoutput("date +%Y-%m-%d_%H_%M_%S_%N")
base_dir = prog_work_dir + "/cluster_" + timestamp
if not os.path.isdir(prog_work_dir):
os.system("mkdir " + prog_work_dir)
if os.path.isdir(prog_work_dir):
if os.path.isdir(base_dir):
os.system("rm -rf " + base_dir)
os.system("mkdir " + base_dir)
else:
os.system("mkdir " + base_dir);
else:
print "Program working directory does not exist."
return - 10
# Create working directory
if os.path.isdir(base_dir):
os.system("rm -rf " + base_dir)
os.system("mkdir " + base_dir)
else:
os.system("mkdir " + base_dir)
# Run wcd clustering
if (options.cluster_algorithm == "wcd"):
print "Run wcd..."
cluster_seq_dir = run_wcd(base_dir, seq_file, cluster_id)
else:
print "Cluster algorithm not supported (currently wcd only)"
return - 10
# Prepare cluster qual files
if(options.qual_file and options.cluster_qual_file):
print "Prepare cluster quality scores..."
cluster_qual_dir = prepare_cluster_qual_files(base_dir, qual_file, cluster_seq_dir)
elif (not options.qual_file and options.cluster_qual_file):
print "No cluster quality score will be prepared"
print "Please specify the QUAL score file (-S QUAL_FILE)"
elif (options.qual_file and not options.cluster_qual_file):
print "No cluster quality score will be prepared"
print "Please specify the zipped clustered QUAL scores file (-C CLUSTER_QUAL_FILE)"
else:
print "No cluster quality score will be prepared"
print "Please specify the QUAL score file (-S QUAL_FILE)"
print "Please specify the zipped clustered QUAL scores file(-C CLUSTER_QUAL_FILE)"
# Write summary report
print "Write summary report..."
write_cluster_summary(cluster_seq_dir, cluster_summary_file)
# Prepare output
print "Prepare output..."
tmp_zip = base_dir + "/tmp.zip" # Galaxy work around need to create a temporary zip archive and move to the output data set
if(options.qual_file and options.cluster_qual_file):
# os.system ("zip -j " + cluster_qual_file + " " + cluster_qual_dir + "/*")
os.system ("zip -jqq " + tmp_zip + " " + cluster_qual_dir + "/*")
os.system("mv " + tmp_zip + " " + cluster_qual_file)
# os.system("zip -j " + cluster_seq_file + " " + cluster_seq_dir + "/*")
os.system("zip -jqq " + tmp_zip + " " + cluster_seq_dir + "/*")
os.system("mv " + tmp_zip + " " + cluster_seq_file)
# Delete program working directory if indicated
if(options.delete_program_work):
print "Delete working directory"
os.system("rm -rf " + base_dir)
# Done
print "Done."
return 0
def run_wcd(work_dir, seq_file, cluster_id):
cluster_seq_dir = work_dir + "/cluster_fasta"
os.mkdir(cluster_seq_dir)
os.chdir(work_dir)
os.system("wcd --show_clusters -o cluster.cls " + seq_file) # get clusters
os.chdir(cluster_seq_dir)
os.system("wcd --init_cluster ../cluster.cls --split " + cluster_id + " " + seq_file) # prepare cluster FASTA files
os.chdir(work_dir)
return cluster_seq_dir
def prepare_cluster_qual_files(work_dir, qual_file, cluster_seq_dir):
cluster_qual_dir = work_dir + "/cluster_qual"
os.mkdir(cluster_qual_dir)
# get a list of all quality scores
fd_qual = open(qual_file, "rU");
quals = SeqIO.to_dict(SeqIO.parse(fd_qual, "qual"));
# get quality scores for the clusters
for cluster_seq_file in os.listdir(cluster_seq_dir):
if os.path.isfile(cluster_seq_dir + "/" + cluster_seq_file): # check if file, can do some more checking here e.g. is fasta file
fd_cluster_seq = open(cluster_seq_dir + "/" + cluster_seq_file, "rU")
cluster_seqs = SeqIO.parse(fd_cluster_seq, "fasta")
cluster_quals = []
for seq in cluster_seqs:
qual = quals[seq.name]
cluster_qual = SeqRecord(seq=UnknownSeq(len(qual.letter_annotations["phred_quality"])), id="", description=qual.description)
cluster_qual.letter_annotations["phred_quality"] = qual.letter_annotations["phred_quality"]
cluster_quals.append(cluster_qual)
cluster_qual_file = cluster_qual_dir + "/" + cluster_seq_file.split(".")[0] + ".qual"
fd_cluster_qual = open(cluster_qual_file, "w")
SeqIO.write(cluster_quals, fd_cluster_qual, "qual")
fd_cluster_qual.close()
os.system("sed -i \"s/> />/g\" " + cluster_qual_file) # need to replace the space after the > in header
fd_cluster_seq.close()
fd_qual.close()
return cluster_qual_dir
def write_cluster_summary(cluster_seq_dir, summary_file):
fd_summary = open(summary_file, 'w')
summary = ""
for cluster_seq_file in sorted(os.listdir(cluster_seq_dir)):
if os.path.isfile(cluster_seq_dir + "/" + cluster_seq_file): # check if file, can do some more checking here e.g. is fasta file
fd_cluster_seq = open(cluster_seq_dir + "/" + cluster_seq_file, "rU")
cluster_seqs = SeqIO.parse(fd_cluster_seq, "fasta")
summary = summary + cluster_seq_file.split(".")[0]
for seq in cluster_seqs:
summary = summary + "\t" + seq.name
summary = summary + "\n"
fd_cluster_seq.close()
process = subprocess.Popen("grep -v \"^[[:digit:]]*.$\" cluster.cls | wc -l", stdout=subprocess.PIPE, shell=True)
nr_clusters = process.communicate()[0]
process = subprocess.Popen("grep \"^[[:digit:]]*.$\" cluster.cls | wc -l", stdout=subprocess.PIPE, shell=True)
nr_singletons = process.communicate()[0]
header = "# nr Clusters: " + nr_clusters
header = header + "# nr Singletons: " + nr_singletons
header = header + "# Column 1: Cluster_id/Singleton_id" + "\n"
header = header + "# Columns 2 to n: Member Sequences" + "\n"
summary = header + summary
fd_summary.write(summary)
fd_summary.close()
if __name__ == "__main__":
sys.exit(main())
| 47.650485 | 188 | 0.667787 | 1,338 | 9,816 | 4.667414 | 0.171151 | 0.064051 | 0.035388 | 0.033627 | 0.365252 | 0.281505 | 0.213931 | 0.201121 | 0.183026 | 0.165893 | 0 | 0.002488 | 0.222086 | 9,816 | 205 | 189 | 47.882927 | 0.815348 | 0.114507 | 0 | 0.227848 | 0 | 0.006329 | 0.276053 | 0.009925 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.031646 | null | null | 0.14557 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7e18bcbc29a2c01e32c9c2370ce6613e24de3611 | 10,660 | py | Python | stellarisdashboard/game_info.py | Akuukis/stellaris-dashboard | baf0de5abfaf1ade719f06c5d0ce76e7ee9d17df | [
"MIT",
"Unlicense"
] | 97 | 2018-03-24T14:18:21.000Z | 2022-01-16T16:25:55.000Z | stellarisdashboard/game_info.py | Akuukis/stellaris-dashboard | baf0de5abfaf1ade719f06c5d0ce76e7ee9d17df | [
"MIT",
"Unlicense"
] | 30 | 2018-03-25T01:06:44.000Z | 2022-01-28T10:24:19.000Z | stellarisdashboard/game_info.py | Akuukis/stellaris-dashboard | baf0de5abfaf1ade719f06c5d0ce76e7ee9d17df | [
"MIT",
"Unlicense"
] | 17 | 2018-03-25T15:45:30.000Z | 2022-01-09T13:58:32.000Z | PHYSICS_TECHS = {
"tech_databank_uplinks",
"tech_basic_science_lab_1",
"tech_curator_lab",
"tech_archeology_lab",
"tech_physics_lab_1",
"tech_physics_lab_2",
"tech_physics_lab_3",
"tech_global_research_initiative",
"tech_administrative_ai",
"tech_cryostasis_1",
"tech_cryostasis_2",
"tech_self_aware_logic",
"tech_automated_exploration",
"tech_sapient_ai",
"tech_positronic_implants",
"tech_combat_computers_1",
"tech_combat_computers_2",
"tech_combat_computers_3",
"tech_combat_computers_autonomous",
"tech_auxiliary_fire_control",
"tech_synchronized_defences",
"tech_fission_power",
"tech_fusion_power",
"tech_cold_fusion_power",
"tech_antimatter_power",
"tech_zero_point_power",
"tech_reactor_boosters_1",
"tech_reactor_boosters_2",
"tech_reactor_boosters_3",
"tech_shields_1",
"tech_shields_2",
"tech_shields_3",
"tech_shields_4",
"tech_shields_5",
"tech_shield_rechargers_1",
"tech_planetary_shield_generator",
"tech_sensors_2",
"tech_sensors_3",
"tech_sensors_4",
"tech_power_plant_1",
"tech_power_plant_2",
"tech_power_plant_3",
"tech_power_plant_4",
"tech_power_hub_1",
"tech_power_hub_2",
"tech_hyper_drive_1",
"tech_hyper_drive_2",
"tech_hyper_drive_3",
"tech_wormhole_stabilization",
"tech_gateway_activation",
"tech_gateway_construction",
"tech_jump_drive_1",
"tech_ftl_inhibitor",
"tech_matter_generator",
}
SOCIETY_TECHS = {
"tech_planetary_defenses",
"tech_eco_simulation",
"tech_hydroponics",
"tech_gene_crops",
"tech_nano_vitality_crops",
"tech_nutrient_replication",
"tech_biolab_1",
"tech_biolab_2",
"tech_biolab_3",
"tech_alien_life_studies",
"tech_colonization_1",
"tech_colonization_2",
"tech_colonization_3",
"tech_colonization_4",
"tech_colonization_5",
"tech_tomb_world_adaption",
"tech_space_trading",
"tech_frontier_health",
"tech_frontier_hospital",
"tech_tb_mountain_range",
"tech_tb_volcano",
"tech_tb_dangerous_wildlife",
"tech_tb_dense_jungle",
"tech_tb_quicksand_basin",
"tech_tb_noxious_swamp",
"tech_tb_massive_glacier",
"tech_tb_toxic_kelp",
"tech_tb_deep_sinkhole",
"tech_terrestrial_sculpting",
"tech_ecological_adaptation",
"tech_climate_restoration",
"tech_genome_mapping",
"tech_vitality_boosters",
"tech_epigenetic_triggers",
"tech_cloning",
"tech_gene_banks",
"tech_gene_seed_purification",
"tech_morphogenetic_field_mastery",
"tech_gene_tailoring",
"tech_glandular_acclimation",
"tech_genetic_resequencing",
"tech_gene_expressions",
"tech_selected_lineages",
"tech_capacity_boosters",
"tech_regenerative_hull_tissue",
"tech_doctrine_fleet_size_1",
"tech_doctrine_fleet_size_2",
"tech_doctrine_fleet_size_3",
"tech_doctrine_fleet_size_4",
"tech_doctrine_fleet_size_5",
"tech_interstellar_fleet_traditions",
"tech_refit_standards",
"tech_command_matrix",
"tech_doctrine_navy_size_1",
"tech_doctrine_navy_size_2",
"tech_doctrine_navy_size_3",
"tech_doctrine_navy_size_4",
"tech_centralized_command",
"tech_combat_training",
"tech_ground_defense_planning",
"tech_global_defense_grid",
"tech_psionic_theory",
"tech_telepathy",
"tech_precognition_interface",
"tech_psi_jump_drive_1",
"tech_galactic_ambitions",
"tech_manifest_destiny",
"tech_interstellar_campaigns",
"tech_galactic_campaigns",
"tech_planetary_government",
"tech_planetary_unification",
"tech_colonial_centralization",
"tech_galactic_administration",
"tech_galactic_markets",
"tech_subdermal_stimulation",
"tech_galactic_benevolence",
"tech_adaptive_bureaucracy",
"tech_colonial_bureaucracy",
"tech_galactic_bureaucracy",
"tech_living_state",
"tech_collective_self",
"tech_autonomous_agents",
"tech_embodied_dynamism",
"tech_neural_implants",
"tech_artificial_moral_codes",
"tech_synthetic_thought_patterns",
"tech_collective_production_methods",
"tech_resource_processing_algorithms",
"tech_cultural_heritage",
"tech_heritage_site",
"tech_hypercomms_forum",
"tech_autocurating_vault",
"tech_holographic_rituals",
"tech_consecration_fields",
"tech_transcendent_faith",
"tech_ascension_theory",
"tech_ascension_theory_apoc",
"tech_psionic_shield",
}
ENGINEERING_TECHS = {
"tech_space_exploration",
"tech_corvettes",
"tech_destroyers",
"tech_cruisers",
"tech_battleships",
"tech_titans",
"tech_corvette_build_speed",
"tech_corvette_hull_1",
"tech_corvette_hull_2",
"tech_destroyer_build_speed",
"tech_destroyer_hull_1",
"tech_destroyer_hull_2",
"tech_cruiser_build_speed",
"tech_cruiser_hull_1",
"tech_cruiser_hull_2",
"tech_battleship_build_speed",
"tech_battleship_hull_1",
"tech_battleship_hull_2",
"tech_titan_hull_1",
"tech_titan_hull_2",
"tech_starbase_1",
"tech_starbase_2",
"tech_starbase_3",
"tech_starbase_4",
"tech_starbase_5",
"tech_modular_engineering",
"tech_space_defense_station_improvement",
"tech_strike_craft_1",
"tech_strike_craft_2",
"tech_strike_craft_3",
"tech_assault_armies",
"tech_ship_armor_1",
"tech_ship_armor_2",
"tech_ship_armor_3",
"tech_ship_armor_4",
"tech_ship_armor_5",
"tech_crystal_armor_1",
"tech_crystal_armor_2",
"tech_thrusters_1",
"tech_thrusters_2",
"tech_thrusters_3",
"tech_thrusters_4",
"tech_space_defense_station_1",
"tech_defense_platform_hull_1",
"tech_basic_industry",
"tech_powered_exoskeletons",
"tech_mining_network_2",
"tech_mining_network_3",
"tech_mining_network_4",
"tech_mineral_processing_1",
"tech_mineral_processing_2",
"tech_engineering_lab_1",
"tech_engineering_lab_2",
"tech_engineering_lab_3",
"tech_robotic_workers",
"tech_droid_workers",
"tech_synthetic_workers",
"tech_synthetic_leaders",
"tech_space_construction",
"tech_afterburners_1",
"tech_afterburners_2",
"tech_assembly_pattern",
"tech_construction_templates",
"tech_mega_engineering",
}
ALL_KNOWN_TECHS = set.union(PHYSICS_TECHS, ENGINEERING_TECHS, SOCIETY_TECHS)
ASCENSION_PERKS = {
"ap_enigmatic_engineering", #: "Enigmatic Engineering",
"ap_nihilistic_acquisition", #: "Nihilistic Acquisition",
"ap_colossus", #: "Colossus",
"ap_engineered_evolution", #: "Engineered Evolution",
"ap_evolutionary_mastery", #: "Evolutionary Mastery",
"ap_the_flesh_is_weak", #: "The Flesh is Weak",
"ap_synthetic_evolution", #: "Synthetic Evolution",
"ap_mind_over_matter", #: "Mind over Matter",
"ap_transcendence", #: "Transcendence",
"ap_world_shaper", #: "World Shaper",
"ap_galactic_force_projection", #: "Galactic Force Projection",
"ap_defender_of_the_galaxy", #: "Defender of the Galaxy",
"ap_interstellar_dominion", #: "Interstellar Dominion",
"ap_grasp_the_void", #: "Grasp the Void",
"ap_eternal_vigilance", #: "Eternal Vigilance",
"ap_galactic_contender", #: "Galactic Contender",
"ap_technological_ascendancy", #: "Technological Ascendancy",
"ap_one_vision", #: "One Vision",
"ap_consecrated_worlds", #: "Consecrate Worlds",
"ap_mastery_of_nature", #: "Mastery of Nature",
"ap_imperial_prerogative", #: "Imperial Prerogative",
"ap_executive_vigor", #: "Executive Vigor",
"ap_transcendent_learning", #: "Transcendent Learning",
"ap_shared_destiny", #: "Shared Destiny",
"ap_voidborn", #: "Voidborn",
"ap_master_builders", #: "Master Builders",
"ap_galactic_wonders", #: "Galactic Wonders",
"ap_synthetic_age", #: "Synthetic Age",
"ap_machine_worlds", #: "Machine Worlds",
}
COLONIZABLE_PLANET_CLASSES_PLANETS = {
"pc_desert",
"pc_arid",
"pc_savannah",
"pc_tropical",
"pc_continental",
"pc_ocean",
"pc_tundra",
"pc_arctic",
"pc_alpine",
"pc_gaia",
"pc_nuked",
"pc_machine",
}
COLONIZABLE_PLANET_CLASSES_MEGA_STRUCTURES = {
"pc_ringworld_habitable",
"pc_habitat",
}
# Planet classes for the planetary diversity mod
# (see https://steamcommunity.com/workshop/filedetails/discussion/1466534202/3397295779078104093/)
COLONIZABLE_PLANET_CLASSES_PD_PLANETS = {
"pc_antarctic",
"pc_deadcity",
"pc_retinal",
"pc_irradiated_terrestrial",
"pc_lush",
"pc_geocrystalline",
"pc_marginal",
"pc_irradiated_marginal",
"pc_marginal_cold",
"pc_crystal",
"pc_floating",
"pc_graveyard",
"pc_mushroom",
"pc_city",
"pc_archive",
"pc_biolumen",
"pc_technoorganic",
"pc_tidallylocked",
"pc_glacial",
"pc_frozen_desert",
"pc_steppe",
"pc_hadesert",
"pc_boreal",
"pc_sandsea",
"pc_subarctic",
"pc_geothermal",
"pc_cascadian",
"pc_swamp",
"pc_mangrove",
"pc_desertislands",
"pc_mesa",
"pc_oasis",
"pc_hajungle",
"pc_methane",
"pc_ammonia",
}
COLONIZABLE_PLANET_CLASSES = (
COLONIZABLE_PLANET_CLASSES_PLANETS
| COLONIZABLE_PLANET_CLASSES_MEGA_STRUCTURES
| COLONIZABLE_PLANET_CLASSES_PD_PLANETS
)
DESTROYED_BY_WEAPONS_PLANET_CLASSES = {
"pc_shattered",
"pc_shielded",
"pc_ringworld_shielded",
"pc_habitat_shielded",
"pc_ringworld_habitable_damaged",
}
DESTROYED_BY_EVENTS_AND_CRISES_PLANET_CLASSES = {
"pc_egg_cracked",
"pc_shrouded",
"pc_ai",
"pc_infested",
"pc_gray_goo",
}
DESTROYED_PLANET_CLASSES = (
DESTROYED_BY_WEAPONS_PLANET_CLASSES | DESTROYED_BY_EVENTS_AND_CRISES_PLANET_CLASSES
)
def is_destroyed_planet(planet_class):
return planet_class in DESTROYED_PLANET_CLASSES
def is_colonizable_planet(planet_class):
return planet_class in COLONIZABLE_PLANET_CLASSES
def is_colonizable_megastructure(planet_class):
return planet_class in COLONIZABLE_PLANET_CLASSES_MEGA_STRUCTURES
LOWERCASE_WORDS = {"the", "in", "of", "for", "is", "over", "under"}
WORD_REPLACEMENT = {
"Ai": "AI",
"Ftl": "FTL",
"Tb": "Tile Blocker",
}
def convert_id_to_name(object_id: str, remove_prefix="") -> str:
words = [word for word in object_id.split("_") if word != remove_prefix]
words = [
word.capitalize() if word not in LOWERCASE_WORDS else word for word in words
]
words = [WORD_REPLACEMENT.get(word, word) for word in words]
return " ".join(words)
| 28.810811 | 98 | 0.711069 | 1,236 | 10,660 | 5.523463 | 0.328479 | 0.022704 | 0.031639 | 0.01538 | 0.077194 | 0.033397 | 0.033397 | 0.01582 | 0.01582 | 0 | 0 | 0.013547 | 0.175985 | 10,660 | 369 | 99 | 28.888889 | 0.763661 | 0.072983 | 0 | 0 | 0 | 0 | 0.600954 | 0.345041 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011429 | false | 0 | 0 | 0.008571 | 0.022857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7e1a810629ff4e92bba844d970fa5906ec4f3338 | 3,689 | py | Python | starfish/pipeline/filter/gaussian_high_pass.py | Xiaojieqiu/starfish | 426480fcfeda4b8b1eb9371a818ff20275ac898d | [
"MIT"
] | 1 | 2018-10-07T03:53:43.000Z | 2018-10-07T03:53:43.000Z | starfish/pipeline/filter/gaussian_high_pass.py | Xiaojieqiu/starfish | 426480fcfeda4b8b1eb9371a818ff20275ac898d | [
"MIT"
] | null | null | null | starfish/pipeline/filter/gaussian_high_pass.py | Xiaojieqiu/starfish | 426480fcfeda4b8b1eb9371a818ff20275ac898d | [
"MIT"
] | null | null | null | import argparse
from functools import partial
from numbers import Number
from typing import Callable, Union, Tuple, Optional
import numpy as np
from skimage import img_as_uint
from starfish.errors import DataFormatWarning
from starfish.image import ImageStack
from starfish.pipeline.filter.gaussian_low_pass import GaussianLowPass
from ._base import FilterAlgorithmBase
class GaussianHighPass(FilterAlgorithmBase):
def __init__(
self, sigma: Union[Number, Tuple[Number]], is_volume: bool=False, verbose: bool=False, **kwargs
) -> None:
"""Gaussian high pass filter
Parameters
----------
sigma : Union[Number, Tuple[Number]]
standard deviation of gaussian kernel
is_volume : bool
If True, 3d (z, y, x) volumes will be filtered, otherwise, filter 2d tiles independently.
verbose : bool
if True, report on filtering progress (default = False)
"""
if isinstance(sigma, tuple):
message = ("if passing an anisotropic kernel, the dimensionality must match the data shape ({shape}), not "
"{passed_shape}")
if is_volume and len(sigma) != 3:
raise ValueError(message.format(shape=3, passed_shape=len(sigma)))
if not is_volume and len(sigma) != 2:
raise ValueError(message.format(shape=2, passed_shape=len(sigma)))
self.sigma = sigma
self.is_volume = is_volume
self.verbose = verbose
@classmethod
def add_arguments(cls, group_parser: argparse.ArgumentParser) -> None:
group_parser.add_argument(
"--sigma", type=float, help="standard deviation of gaussian kernel")
group_parser.add_argument(
"--is-volume", action="store_true", help="indicates that the image stack should be filtered in 3d")
@staticmethod
def high_pass(image: np.ndarray, sigma: Union[Number, Tuple[Number]]) -> np.ndarray:
"""
Applies a gaussian high pass filter to an image
Parameters
----------
image : numpy.ndarray[np.uint32]
2-d or 3-d image data
sigma : Union[Number, Tuple[Number]]
Standard deviation of gaussian kernel
Returns
-------
np.ndarray :
Standard deviation of the Gaussian kernel that will be applied. If a float, an isotropic kernel will be
assumed, otherwise the dimensions of the kernel give (z, y, x)
"""
if image.dtype != np.uint16:
DataFormatWarning('gaussian filters currently only support uint16 images. Image data will be converted.')
image = img_as_uint(image)
blurred: np.ndarray = GaussianLowPass.low_pass(image, sigma)
over_flow_ind: np.ndarray[bool] = image < blurred
filtered: np.ndarray = image - blurred
filtered[over_flow_ind] = 0
return filtered
def filter(self, stack: ImageStack, in_place: bool=True) -> Optional[ImageStack]:
"""Perform filtering of an image stack
Parameters
----------
stack : ImageStack
Stack to be filtered.
in_place : bool
if True, process ImageStack in-place, otherwise return a new stack
Returns
-------
Optional[ImageStack] :
if in-place is False, return the results of filter as a new stack
"""
high_pass: Callable = partial(self.high_pass, sigma=self.sigma)
result = stack.apply(high_pass, is_volume=self.is_volume, verbose=self.verbose, in_place=in_place)
if not in_place:
return result
return None
| 35.815534 | 119 | 0.632421 | 442 | 3,689 | 5.180995 | 0.321267 | 0.031441 | 0.027948 | 0.036681 | 0.135808 | 0.052402 | 0.052402 | 0.052402 | 0.052402 | 0.052402 | 0 | 0.006022 | 0.279751 | 3,689 | 102 | 120 | 36.166667 | 0.855852 | 0.278666 | 0 | 0.043478 | 0 | 0 | 0.131424 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0.217391 | 0.217391 | 0 | 0.391304 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
7e1d6767408fd3f3df4bbb5588442b63585f4595 | 5,937 | py | Python | factom_core/blockchains/base.py | sourcery-ai-bot/factom-core | 186dca550d98d758e9f8dab878e6382153efeaf3 | [
"MIT"
] | null | null | null | factom_core/blockchains/base.py | sourcery-ai-bot/factom-core | 186dca550d98d758e9f8dab878e6382153efeaf3 | [
"MIT"
] | null | null | null | factom_core/blockchains/base.py | sourcery-ai-bot/factom-core | 186dca550d98d758e9f8dab878e6382153efeaf3 | [
"MIT"
] | null | null | null | from typing import Any, List
import factom_core.blocks as blocks
from factom_core.db import FactomdLevelDB
from .pending_block import PendingBlock
class BaseBlockchain:
"""The base class for all Blockchain objects"""
network_id: bytes = None
vms: List[Any] = None
data_path: str = None
db: FactomdLevelDB = None
current_block: PendingBlock = None
def __init__(self, data_path: str = None) -> None:
if not isinstance(self.network_id, bytes) or len(self.network_id) != 4:
raise ValueError("The Blockchain class must be instantiated with a `network_id` bytes object of length 4")
# if not isinstance(self.vms, list) or len(self.vms) == 0:
# raise ValueError(
# "The Blockchain class must be instantiated with a `vms` list of length > 1"
# )
self.data_path = data_path
self.db = FactomdLevelDB(path=data_path, create_if_missing=True)
def load_genesis_block(self) -> blocks.DirectoryBlock:
raise NotImplementedError("Blockchain classes must implement this method")
def vm_for_hash(self, h: bytes) -> int:
raise NotImplementedError("Blockchain classes must implement this method")
def seal_minute(self) -> None:
raise NotImplementedError("Blockchain classes must implement this method")
def rotate_vms(self) -> None:
raise NotImplementedError("Blockchain classes must implement this method")
def seal_block(self) -> None:
raise NotImplementedError("Blockchain classes must implement this method")
class Blockchain(BaseBlockchain):
"""
A Blockchain is a combination of VM classes. Each VM is associated
with a range of chains. The Blockchain class acts as a wrapper around these other
VM classes, delegating operations to the appropriate VM depending on the
current block / minute number.
"""
def __init__(self, data_path: str = None):
super().__init__(data_path)
def load_genesis_block(self) -> blocks.DirectoryBlock:
pass
def vm_for_hash(self, h: bytes) -> int:
"""
Compute the VM index responsible for hash h
Taken from: factomd/state/processList.go/VMindexFor(hash []byte)
"""
if len(self.vms) == 0:
return 0
v = sum(h)
return v % len(self.vms)
def seal_minute(self) -> None:
"""Finalize the current block minute"""
self.rotate_vms()
if self.current_block.current_minute == 10:
self.seal_block()
else:
self.current_block.current_minute += 1
def rotate_vms(self) -> None:
"""Rotate the responsibilities of the VM set (if necessary)"""
# TODO: see processList.go/MakgeMap for formula per block height
if len(self.vms) == 1:
return
self.vms = self.vms[1:] + self.vms[:1]
def seal_block(self):
"""
Bundles all added transactions, entries, and other elements into a set of finalized
blocks with headers.
"""
block = self.current_block
entry_blocks: List[blocks.EntryBlock] = []
for chain_id, block_body in block.entry_blocks.items():
prev = self.db.get_entry_block_head(chain_id)
header = block_body.construct_header(
chain_id=chain_id,
prev_keymr=prev.keymr if prev is not None else bytes(32),
prev_full_hash=prev.full_hash if prev is not None else bytes(32),
sequence=prev.header.sequence + 1 if prev is not None else 0,
height=block.height,
)
entry_blocks.append(blocks.EntryBlock(header, block_body))
prev = self.db.get_entry_credit_block(height=block.height - 1)
header = block.entry_credit_block.construct_header(
prev_header_hash=prev.header_hash, prev_full_hash=prev.full_hash, height=block.height,
)
entry_credit_block = blocks.EntryCreditBlock(header, block.entry_credit_block)
prev = self.db.get_factoid_block(height=block.height - 1)
header = block.factoid_block.construct_header(
prev_keymr=block.previous.body.factoid_block_keymr,
prev_ledger_keymr=prev.ledger_keymr,
ec_exchange_rate=1000, # TODO
height=block.height,
)
factoid_block = blocks.FactoidBlock(header, block.factoid_block)
prev = self.db.get_admin_block(height=block.height - 1)
header = block.admin_block.construct_header(
prev_back_reference_hash=prev.back_reference_hash, height=block.height
)
admin_block = blocks.AdminBlock(header, block.admin_block)
# Compile all the above blocks and the previous directory block, into a new one
directory_block_body = blocks.DirectoryBlockBody(
admin_block_lookup_hash=admin_block.lookup_hash,
entry_credit_block_header_hash=entry_credit_block.header_hash,
factoid_block_keymr=factoid_block.keymr,
entry_blocks=[
{"chain_id": entry_block.header.chain_id, "keymr": entry_block.keymr} for entry_block in entry_blocks
],
)
header = directory_block_body.construct_header(
network_id=self.network_id,
prev_keymr=block.previous.keymr,
prev_full_hash=block.previous.full_hash,
timestamp=block.timestamp,
height=block.height,
)
directory_block = blocks.DirectoryBlock(header, directory_block_body)
# Persist the blocks as new chain heads
self.db.put_directory_block_head(directory_block)
self.db.put_admin_block_head(admin_block)
self.db.put_entry_credit_block_head(entry_credit_block)
self.db.put_factoid_block_head(factoid_block)
for entry_block in entry_blocks:
self.db.put_entry_block_head(entry_block)
| 39.58 | 118 | 0.662961 | 755 | 5,937 | 4.989404 | 0.218543 | 0.035041 | 0.036103 | 0.05442 | 0.322272 | 0.262809 | 0.216353 | 0.152641 | 0.126361 | 0.108574 | 0 | 0.005663 | 0.256358 | 5,937 | 149 | 119 | 39.845638 | 0.847565 | 0.160687 | 0 | 0.163265 | 0 | 0 | 0.066516 | 0 | 0 | 0 | 0 | 0.013423 | 0 | 1 | 0.122449 | false | 0.010204 | 0.040816 | 0 | 0.265306 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7e1f08195236c36e7e4adbc510902d90c5d6a72a | 492 | py | Python | bot/plugins/bongo.py | Preocts/twitch-chat-bot | 50341c30d8eada4b50634c8f25a9eb0eed681735 | [
"MIT"
] | 62 | 2019-11-16T22:07:42.000Z | 2022-03-08T20:50:01.000Z | bot/plugins/bongo.py | Preocts/twitch-chat-bot | 50341c30d8eada4b50634c8f25a9eb0eed681735 | [
"MIT"
] | 30 | 2019-03-19T15:05:55.000Z | 2022-03-24T05:00:53.000Z | bot/plugins/bongo.py | Preocts/twitch-chat-bot | 50341c30d8eada4b50634c8f25a9eb0eed681735 | [
"MIT"
] | 56 | 2019-06-08T20:34:31.000Z | 2022-02-21T20:10:38.000Z | from __future__ import annotations
from typing import Match
from bot.config import Config
from bot.data import command
from bot.data import esc
from bot.data import format_msg
@command('!bongo')
async def cmd_bongo(config: Config, match: Match[str]) -> str:
_, _, rest = match['msg'].partition(' ')
rest = rest.strip()
if rest:
rest = f'{rest} '
return format_msg(
match,
f'awcBongo awcBongo awcBongo {esc(rest)}awcBongo awcBongo awcBongo',
)
| 22.363636 | 76 | 0.676829 | 66 | 492 | 4.909091 | 0.378788 | 0.08642 | 0.101852 | 0.157407 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.21748 | 492 | 21 | 77 | 23.428571 | 0.841558 | 0 | 0 | 0 | 0 | 0 | 0.164634 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.375 | 0 | 0.4375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
7e1f4ae6822590bfe77246c78771d04e3114e141 | 532 | py | Python | sensorsproject/settings/dev_edwin.py | edwinsteele/sensorsproject | 21164b9bb437a3b1bbcc05f05c58cd5621bd4501 | [
"CC0-1.0"
] | null | null | null | sensorsproject/settings/dev_edwin.py | edwinsteele/sensorsproject | 21164b9bb437a3b1bbcc05f05c58cd5621bd4501 | [
"CC0-1.0"
] | 2 | 2020-02-12T01:18:25.000Z | 2020-06-05T18:40:51.000Z | sensorsproject/settings/dev_edwin.py | edwinsteele/sensorsproject | 21164b9bb437a3b1bbcc05f05c58cd5621bd4501 | [
"CC0-1.0"
] | null | null | null | __author__ = 'esteele'
# Common settings
from .base import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'sensors',
'USER': '',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# Is it a problem that this is at the end rather than before sensors?
INSTALLED_APPS += ('debug_toolbar',)
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INTERNAL_IPS = ('127.0.0.1',)
| 22.166667 | 74 | 0.62218 | 57 | 532 | 5.614035 | 0.842105 | 0.075 | 0.1375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016949 | 0.223684 | 532 | 23 | 75 | 23.130435 | 0.757869 | 0.156015 | 0 | 0 | 0 | 0 | 0.374439 | 0.190583 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.058824 | 0.058824 | 0 | 0.058824 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
7e249d769adff61f3ed7920cc7d27c1e95686e3c | 887 | py | Python | class/cls_006.py | rpoliselit/python-for-dummies | d6f45a966a5238058953f93d8660832fa692b3d4 | [
"MIT"
] | null | null | null | class/cls_006.py | rpoliselit/python-for-dummies | d6f45a966a5238058953f93d8660832fa692b3d4 | [
"MIT"
] | null | null | null | class/cls_006.py | rpoliselit/python-for-dummies | d6f45a966a5238058953f93d8660832fa692b3d4 | [
"MIT"
] | null | null | null | # Custom errors in classes.
class TooManyPagesReadError(ValueError):
pass
class Book:
def __init__(self, title, page_count):
self.title = title
self.page_count = page_count
self.pages_read = 0
def __repr__(self):
return (
f"<Book {self.title}, read {self.pages_read} pages out of {self.page_count}>"
)
def read(self, pages):
if self.pages_read + pages > self.page_count:
msg = f"You tried to read {self.pages_read + pages} but this book only has {self.page_count} pages."
raise TooManyPagesReadError(msg)
self.pages_read += pages
print(f"You have now read {self.pages_read} pages out of {self.page_count}")
book_1 = Book("Fluent Python", 800)
try:
book_1.read(450)
book_1.read(800)
except TooManyPagesReadError as e:
print(e)
finally:
print(book_1)
| 26.878788 | 112 | 0.641488 | 124 | 887 | 4.387097 | 0.370968 | 0.115809 | 0.143382 | 0.165441 | 0.1875 | 0.147059 | 0.147059 | 0.147059 | 0.147059 | 0.147059 | 0 | 0.021244 | 0.257046 | 887 | 32 | 113 | 27.71875 | 0.804249 | 0.028185 | 0 | 0 | 0 | 0.08 | 0.283721 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0.04 | 0 | 0.04 | 0.24 | 0.12 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7e25064cbef2ae141cd04ce9086cb43f954ff0bf | 661 | py | Python | data/external/repositories/113677/KaggleBillionWordImputation-master/scripts/test_stanford_nltk.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories/113677/KaggleBillionWordImputation-master/scripts/test_stanford_nltk.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories/113677/KaggleBillionWordImputation-master/scripts/test_stanford_nltk.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | 1 | 2019-12-04T08:23:33.000Z | 2019-12-04T08:23:33.000Z | #!/usr/bin/env python
import sys, bz2
sys.path.insert(0, '/Users/timpalpant/Documents/Workspace/corenlp-python')
import nltk
from nltk.tree import Tree
from corenlp import StanfordCoreNLP
from remove_random_word import remove_random_word
print "Booting StanfordCoreNLP"
nlp = StanfordCoreNLP()
print "Initializing train file"
train = bz2.BZ2File('../data/train_v2.txt.bz2')
for line in train:
rline = remove_random_word(line)
lparse = nlp.raw_parse(line)
ltree = Tree.fromstring(lparse['sentences'][0]['parsetree'])
rparse = nlp.raw_parse(rline)
rtree = Tree.fromstring(rparse['sentences'][0]['parsetree'])
print ltree
print rtree | 30.045455 | 74 | 0.748865 | 90 | 661 | 5.4 | 0.5 | 0.074074 | 0.098765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013937 | 0.131619 | 661 | 22 | 75 | 30.045455 | 0.832753 | 0.030257 | 0 | 0 | 0 | 0 | 0.24649 | 0.118565 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.277778 | null | null | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7e264599a8cb2bf8b1fd89fbf7da642cc5d1526e | 7,549 | py | Python | parsifal/library/models.py | reeta1234/parsifal | dc2ea19916e7430634e2efd3e1bacc89ebbde4f1 | [
"MIT"
] | null | null | null | parsifal/library/models.py | reeta1234/parsifal | dc2ea19916e7430634e2efd3e1bacc89ebbde4f1 | [
"MIT"
] | 6 | 2020-06-06T00:35:23.000Z | 2022-03-12T00:15:47.000Z | parsifal/library/models.py | reeta1234/parsifal | dc2ea19916e7430634e2efd3e1bacc89ebbde4f1 | [
"MIT"
] | null | null | null | # coding: utf-8
from django.db import models
from django.contrib.auth.models import User
from django.utils.text import slugify
class SharedFolder(models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField(max_length=255, null=True, blank=True)
users = models.ManyToManyField(User, through='Collaborator', related_name='shared_folders')
class Meta:
verbose_name = 'Shared Folder'
verbose_name_plural = 'Shared Folders'
ordering = ('name',)
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
if not self.pk:
super(SharedFolder, self).save(*args, **kwargs)
base_slug = slugify(self.name)
if len(base_slug) > 0:
base_slug = slugify(u'{0} {1}'.format(self.name, self.pk))
else:
base_slug = self.pk
i = 0
unique_slug = base_slug
while SharedFolder.objects.filter(slug=unique_slug).exists():
i += 1
unique_slug = u'{0}-{1}'.format(base_slug, i)
self.slug = unique_slug
super(SharedFolder, self).save(*args, **kwargs)
class Collaborator(models.Model):
READ = 'R'
WRITE = 'W'
ADMIN = 'A'
ACCESS_TYPES = (
(READ, 'Read'),
(WRITE, 'Write'),
(ADMIN, 'Admin'),
)
user = models.ForeignKey(User,on_delete=models.CASCADE)
shared_folder = models.ForeignKey(SharedFolder,on_delete=models.CASCADE)
joined_at = models.DateTimeField(auto_now_add=True)
is_owner = models.BooleanField(default=False)
access = models.CharField(max_length=1, choices=ACCESS_TYPES, default=READ)
class Meta:
verbose_name = 'Collaborator'
verbose_name_plural = 'Collaborators'
def save(self, *args, **kwargs):
if self.is_owner:
self.access = Collaborator.ADMIN
super(Collaborator, self).save(*args, **kwargs)
class Document(models.Model):
ARTICLE = 'article'
BOOK = 'book'
BOOKLET = 'booklet'
CONFERENCE = 'conference'
INBOOK = 'inbook'
INCOLLECTION = 'incollection'
INPROCEEDINGS = 'inproceedings'
MANUAL = 'manual'
MASTERSTHESIS = 'mastersthesis'
MISC = 'misc'
PHDTHESIS = 'phdthesis'
PROCEEDINGS = 'proceedings'
TECHREPORT = 'techreport'
UNPUBLISHED = 'unpublished'
ENTRY_TYPES = (
(ARTICLE, 'Article'),
(BOOK, 'Book'),
(BOOKLET, 'Booklet'),
(CONFERENCE, 'Conference'),
(INBOOK, 'Inbook'),
(INCOLLECTION, 'Incollection'),
(INPROCEEDINGS, 'Inproceedings'),
(MANUAL, 'Manual'),
(MASTERSTHESIS, 'Master\'s Thesis'),
(MISC, 'Misc'),
(PHDTHESIS, 'Ph.D. Thesis'),
(PROCEEDINGS, 'Proceedings'),
(TECHREPORT, 'Tech Report'),
(UNPUBLISHED, 'Unpublished'),
)
# Bibtex required fields
bibtexkey = models.CharField('Bibtex key', max_length=255, null=True, blank=True)
entry_type = models.CharField('Document type', max_length=13, choices=ENTRY_TYPES, null=True, blank=True)
# Bibtex base fields
address = models.CharField(max_length=2000, null=True, blank=True)
author = models.TextField(max_length=1000, null=True, blank=True)
booktitle = models.CharField(max_length=1000, null=True, blank=True)
chapter = models.CharField(max_length=1000, null=True, blank=True)
crossref = models.CharField('Cross-referenced', max_length=1000, null=True, blank=True)
edition = models.CharField(max_length=1000, null=True, blank=True)
editor = models.CharField(max_length=1000, null=True, blank=True)
howpublished = models.CharField('How it was published', max_length=1000, null=True, blank=True)
institution = models.CharField(max_length=1000, null=True, blank=True)
journal = models.CharField(max_length=1000, null=True, blank=True)
month = models.CharField(max_length=50, null=True, blank=True)
note = models.CharField(max_length=2000, null=True, blank=True)
number = models.CharField(max_length=1000, null=True, blank=True)
organization = models.CharField(max_length=1000, null=True, blank=True)
pages = models.CharField(max_length=255, null=True, blank=True)
publisher = models.CharField(max_length=1000, null=True, blank=True)
school = models.CharField(max_length=1000, null=True, blank=True)
series = models.CharField(max_length=500, null=True, blank=True)
title = models.CharField(max_length=1000, null=True, blank=True)
publication_type = models.CharField(max_length=1000, null=True, blank=True) # Type
volume = models.CharField(max_length=1000, null=True, blank=True)
year = models.CharField(max_length=50, null=True, blank=True)
# Extra fields
abstract = models.TextField(max_length=4000, null=True, blank=True)
coden = models.CharField(max_length=1000, null=True, blank=True)
doi = models.CharField('DOI', max_length=255, null=True, blank=True)
isbn = models.CharField('ISBN', max_length=255, null=True, blank=True)
issn = models.CharField('ISSN', max_length=255, null=True, blank=True)
keywords = models.CharField(max_length=2000, null=True, blank=True)
language = models.CharField(max_length=1000, null=True, blank=True)
url = models.CharField('URL', max_length=1000, null=True, blank=True)
# Parsifal management field
user = models.ForeignKey(User, null=True, related_name='documents',on_delete=models.CASCADE)
review = models.ForeignKey('reviews.Review', null=True, related_name='documents',on_delete=models.CASCADE)
shared_folder = models.ForeignKey(SharedFolder, null=True, related_name='documents',on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = 'Document'
verbose_name_plural = 'Documents'
def __unicode__(self):
return self.title
def document_file_upload_to(instance, filename):
return u'library/{0}/'.format(instance.document.user.pk)
class DocumentFile(models.Model):
document = models.ForeignKey(Document, related_name='files',on_delete=models.CASCADE)
document_file = models.FileField(upload_to='library/')
filename = models.CharField(max_length=255)
size = models.IntegerField(default=0)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = 'Document File'
verbose_name_plural = 'Document Files'
def __unicode__(self):
return self.filename
class Folder(models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField(max_length=255, null=True, blank=True)
user = models.ForeignKey(User, related_name='library_folders',on_delete=models.CASCADE)
documents = models.ManyToManyField(Document)
class Meta:
verbose_name = 'Folder'
verbose_name_plural = 'Folders'
ordering = ('name',)
unique_together = (('name', 'user'),)
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
base_slug = slugify(self.name)
if len(base_slug) > 0:
unique_slug = base_slug
else:
base_slug = unique_slug = 'untitled-folder'
i = 0
while Folder.objects.filter(slug=unique_slug).exists():
i += 1
unique_slug = u'{0}-{1}'.format(base_slug, i)
self.slug = unique_slug
super(Folder, self).save(*args, **kwargs)
| 38.126263 | 113 | 0.672672 | 918 | 7,549 | 5.38671 | 0.198257 | 0.069161 | 0.089383 | 0.116886 | 0.549039 | 0.517695 | 0.501517 | 0.440849 | 0.440849 | 0.209505 | 0 | 0.02407 | 0.202014 | 7,549 | 197 | 114 | 38.319797 | 0.796813 | 0.013114 | 0 | 0.2625 | 0 | 0 | 0.080881 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.01875 | 0.03125 | 0.625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
7e28c7fed222b2d6fc00b0669d8343daf9331941 | 286 | py | Python | demo/onlyuserrole/demo/urls.py | tangdyy/onlyuserclient | d93b4e1077afda6b58bba002729f6bc89b988c7a | [
"MIT"
] | 2 | 2021-03-12T00:42:13.000Z | 2021-05-24T06:31:13.000Z | demo/onlyuserrole/demo/urls.py | tangdyy/onlyuserclient | d93b4e1077afda6b58bba002729f6bc89b988c7a | [
"MIT"
] | null | null | null | demo/onlyuserrole/demo/urls.py | tangdyy/onlyuserclient | d93b4e1077afda6b58bba002729f6bc89b988c7a | [
"MIT"
] | null | null | null | from django.conf.urls import url
from django.urls import path,include
from rest_framework import routers
from .views import RoleViewSet
router = routers.DefaultRouter()
router.register(r'roles', RoleViewSet, basename='role')
urlpatterns = [
url(r'^', include(router.urls)),
]
| 22 | 55 | 0.755245 | 37 | 286 | 5.810811 | 0.567568 | 0.093023 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.132867 | 286 | 12 | 56 | 23.833333 | 0.866935 | 0 | 0 | 0 | 0 | 0 | 0.034965 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.444444 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
7e2c4a41a01548c788b1f56baad24c0343c82cf7 | 753 | py | Python | slender/tests/dictionary/test_contain.py | torokmark/slender | 3bf815e22f7802ba48706f31ba608cf609e23e68 | [
"Apache-2.0"
] | 1 | 2020-01-10T21:51:46.000Z | 2020-01-10T21:51:46.000Z | slender/tests/dictionary/test_contain.py | torokmark/slender | 3bf815e22f7802ba48706f31ba608cf609e23e68 | [
"Apache-2.0"
] | null | null | null | slender/tests/dictionary/test_contain.py | torokmark/slender | 3bf815e22f7802ba48706f31ba608cf609e23e68 | [
"Apache-2.0"
] | null | null | null | from unittest import TestCase, skip
from expects import *
from slender import Dictionary
class TestContain(TestCase):
def setUp(self):
self.key = 'a'
def test_contain_if_dictionary_is_empty(self):
d1 = Dictionary[str, int]({})
expect(self.key in d1).to(be_false)
def test_contain_if_dictionary_not_contains_key(self):
d1 = Dictionary[str, int]({'b' : 2, 'c' : 3})
expect(self.key in d1).to(be_false)
def test_contain_if_dictionary_contains_key(self):
d1 = Dictionary[str, int]({'a': 1, 'b' : 20, 'c' : 3})
expect(self.key in d1).to(be_true)
def test_contain_if_negate(self):
d1 = Dictionary[str, int]({'b' : 2})
expect(self.key not in d1).to(be_true)
| 26.892857 | 62 | 0.636122 | 113 | 753 | 4.035398 | 0.336283 | 0.076754 | 0.122807 | 0.140351 | 0.625 | 0.484649 | 0.484649 | 0.282895 | 0.282895 | 0.22807 | 0 | 0.025862 | 0.229748 | 753 | 27 | 63 | 27.888889 | 0.760345 | 0 | 0 | 0.111111 | 0 | 0 | 0.009321 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.277778 | false | 0 | 0.166667 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7e2cb45d24767d8a20fc314235185561540eae21 | 490 | py | Python | Python/0263_ugly_number.py | codingyen/CodeAlone | b16653957258ac09f74bb1a380f29dd93e055a44 | [
"MIT"
] | 2 | 2018-08-20T04:38:06.000Z | 2019-02-03T07:48:28.000Z | Python/0263_ugly_number.py | codingyen/CodeAlone | b16653957258ac09f74bb1a380f29dd93e055a44 | [
"MIT"
] | null | null | null | Python/0263_ugly_number.py | codingyen/CodeAlone | b16653957258ac09f74bb1a380f29dd93e055a44 | [
"MIT"
] | null | null | null | # Time: O(logn) = O(1)
# Space: O(1)
class Solution:
def isUgly(self, num):
if not num:
return False
while num % 2 == 0:
num = num / 2
while num % 3 == 0:
num = num / 3
while num % 5 == 0:
num = num / 5
return num == 1:
#if num == 1:
# return True
#else:
# return False
if __name__ == "__main__":
num = 14
s = Solution()
print(s.isUgly(num)) | 17.5 | 27 | 0.418367 | 62 | 490 | 3.177419 | 0.435484 | 0.121827 | 0.106599 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.056604 | 0.459184 | 490 | 28 | 28 | 17.5 | 0.686792 | 0.163265 | 0 | 0 | 0 | 0 | 0.019802 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7e2fade8c98465a167e99e3c11e9a3c85426d6ee | 2,913 | py | Python | wiki-preparation/dump_topn.py | bertrandlalo/piaf-code | 2c879590fca92d85bb6a51e13c9603dd2f7c573f | [
"MIT"
] | 8 | 2020-02-07T15:28:15.000Z | 2021-03-25T22:58:21.000Z | wiki-preparation/dump_topn.py | etalab-ia/piaf-code | 86948a73b26498948e70ad92e4b9b0231eac81f7 | [
"MIT"
] | null | null | null | wiki-preparation/dump_topn.py | etalab-ia/piaf-code | 86948a73b26498948e70ad92e4b9b0231eac81f7 | [
"MIT"
] | null | null | null | import struct
import pickle
import sys
class DataInputStream:
"""
Reading from Java DataInputStream format.
"""
def __init__(self, stream):
self.stream = stream
def read_boolean(self):
return struct.unpack('?', self.stream.read(1))[0]
def read_byte(self):
return struct.unpack('b', self.stream.read(1))[0]
def read_unsigned_byte(self):
return struct.unpack('B', self.stream.read(1))[0]
def read_char(self):
return chr(struct.unpack('>H', self.stream.read(2))[0])
def read_double(self):
return struct.unpack('>d', self.stream.read(8))[0]
def read_float(self):
return struct.unpack('>f', self.stream.read(4))[0]
def read_short(self):
return struct.unpack('>h', self.stream.read(2))[0]
def read_unsigned_short(self):
return struct.unpack('>H', self.stream.read(2))[0]
def read_long(self):
return struct.unpack('>q', self.stream.read(8))[0]
def read_utf(self):
utf_length = struct.unpack('>H', self.stream.read(2))[0]
return self.stream.read(utf_length)
def read_int(self):
return struct.unpack('>i', self.stream.read(4))[0]
def main(top_n, path_wiki_pageranks_raw, path_wiki_pagerank_id_title_raw, path_wiki_pagerank_title):
pageranks = []
with open(path_wiki_pageranks_raw, 'rb') as f:
stream = DataInputStream(f)
while True:
try:
val = stream.read_double()
pageranks.append(val)
except struct.error:
print("I am dead")
break
id_title = {}
with open(path_wiki_pagerank_id_title_raw) as f:
for title in f:
page_id = int(f.readline())
id_title[page_id] = title.rstrip()
with open(path_wiki_pagerank_title, 'w') as f:
for page_id, pagerank in enumerate(pageranks):
if pagerank > 0.0 and page_id in id_title:
title = id_title.get(page_id)
f.write('{} \t {} \n '.format(pagerank, title))
with open(path_wiki_pagerank_title) as f:
tuples = [(i.split('\t')[0], i.split('\t')[1])
for i in f.readlines() if i.strip()]
sorted_ = sorted(tuples, key=lambda tup: tup[0])
pickle.dump(sorted_[:top_n], open(f"top_{top_n}.pkl", 'wb'))
if __name__ == "__main__":
if len(sys.argv) < 3:
print(
"Usage: \n dump_topn.py top_N input_path_wikipedia-pageranks.raw"
" input_path_wikipedia-pagerank-id-title.raw"
" output_path_wikipedia-pagerank-title.txt")
exit(1)
top_n = int(sys.argv[1])
path_wiki_pageranks_raw = sys.argv[2]
path_wiki_pagerank_id_title_raw = sys.argv[3]
path_wiki_pagerank_title = sys.argv[4]
main(top_n, path_wiki_pageranks_raw,
path_wiki_pagerank_id_title_raw,
path_wiki_pagerank_title)
| 29.13 | 100 | 0.61174 | 414 | 2,913 | 4.062802 | 0.241546 | 0.083234 | 0.099881 | 0.117717 | 0.387634 | 0.387634 | 0.288942 | 0.247919 | 0.230678 | 0.230678 | 0 | 0.015193 | 0.254377 | 2,913 | 99 | 101 | 29.424242 | 0.759208 | 0.014075 | 0 | 0 | 0 | 0 | 0.076681 | 0.040616 | 0 | 0 | 0 | 0 | 0 | 1 | 0.188406 | false | 0 | 0.043478 | 0.144928 | 0.405797 | 0.028986 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
7e357df698ecec1e1881a2b4bd0ec295d7103a9f | 1,102 | py | Python | users/forms.py | shyam999/Django-blog | 8f59987f362be0edca4e1542f820eb0d82c7bf2c | [
"MIT"
] | 13 | 2020-04-16T14:34:07.000Z | 2021-06-22T04:27:02.000Z | users/forms.py | alexhan-sys/Django-blog | 563d1975d85b2c288d6d58b5af8d54f9ab16541e | [
"MIT"
] | 4 | 2021-03-30T13:00:21.000Z | 2022-01-13T02:30:26.000Z | users/forms.py | alexhan-sys/Django-blog | 563d1975d85b2c288d6d58b5af8d54f9ab16541e | [
"MIT"
] | 5 | 2020-04-09T18:01:45.000Z | 2021-09-06T12:47:17.000Z | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import UserProfile
class UserRegistrationForm(UserCreationForm):
def __init__(self, *args, **kwargs):
super(UserRegistrationForm, self).__init__(*args, **kwargs)
self.fields['username'].help_text = ''
self.fields['password1'].help_text = ''
self.fields['password2'].help_text = ''
class Meta:
model = User
fields = (
'username',
'email',
'password1',
'password2'
)
def save(self):
user = User.objects.create_user(username=self.cleaned_data['username'], password=self.cleaned_data['password1'])
user.email = self.cleaned_data['email']
user.save()
return user
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('profile_pic', 'bio')
class ProfileEditForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ['profile_pic', 'bio'] | 28.25641 | 120 | 0.627949 | 112 | 1,102 | 6.026786 | 0.357143 | 0.044444 | 0.062222 | 0.062222 | 0.171852 | 0.171852 | 0.171852 | 0.171852 | 0.171852 | 0.171852 | 0 | 0.006083 | 0.254083 | 1,102 | 39 | 121 | 28.25641 | 0.815085 | 0 | 0 | 0.16129 | 0 | 0 | 0.097008 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0.16129 | 0.129032 | 0 | 0.419355 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
7e383cf10dafc058011f30ba8f5f288676b64fb4 | 777 | py | Python | app/routes/student/subjects.py | roshnet/Electron-Server | f21ced024d69d7d96e89a2aa90c80f3d6757b3e1 | [
"MIT"
] | null | null | null | app/routes/student/subjects.py | roshnet/Electron-Server | f21ced024d69d7d96e89a2aa90c80f3d6757b3e1 | [
"MIT"
] | 5 | 2021-10-30T06:04:47.000Z | 2021-11-14T21:09:00.000Z | app/routes/student/subjects.py | roshnet/Electron-Server | f21ced024d69d7d96e89a2aa90c80f3d6757b3e1 | [
"MIT"
] | 1 | 2022-02-28T15:24:58.000Z | 2022-02-28T15:24:58.000Z | from app import app
from app.database import db
from app.database.models.student_subject_map import StudentSubjectMap
from app.database.models.subjects import Subjects
from fastapi import Depends, Response, status
from fastapi_jwt_auth import AuthJWT
@app.get("/students/{student_id}/subjects")
async def student_subjects(student_id, response: Response, Auth: AuthJWT = Depends()):
Auth.jwt_required()
subjects = (
db.query(Subjects)
.join(StudentSubjectMap, StudentSubjectMap.student_id == student_id)
.filter(Subjects.id == StudentSubjectMap.subject_id)
.all()
)
if not subjects:
response.status_code = status.HTTP_404_NOT_FOUND
return {"result": "fail", "reason": "No subjects found"}
return subjects
| 31.08 | 86 | 0.728443 | 95 | 777 | 5.8 | 0.410526 | 0.050817 | 0.08167 | 0.076225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004695 | 0.177606 | 777 | 24 | 87 | 32.375 | 0.85759 | 0 | 0 | 0 | 0 | 0 | 0.082368 | 0.039897 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.315789 | 0 | 0.421053 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
7e38b53b5f4c4aae5647971334f2234e632edaa5 | 1,069 | py | Python | chapter100/mongodb_04.py | thiagola92/learning-databases-with-python | cf23c34d7fd1ecd36dd3e7b30dc5916eb23eaf1e | [
"MIT"
] | null | null | null | chapter100/mongodb_04.py | thiagola92/learning-databases-with-python | cf23c34d7fd1ecd36dd3e7b30dc5916eb23eaf1e | [
"MIT"
] | null | null | null | chapter100/mongodb_04.py | thiagola92/learning-databases-with-python | cf23c34d7fd1ecd36dd3e7b30dc5916eb23eaf1e | [
"MIT"
] | null | null | null | import time
from pymongo import MongoClient
from datetime import datetime
from threading import Thread, Lock
start = datetime.now()
client = MongoClient("mongodb://username:password@127.0.0.1")
database = client["database_name"]
collection = database["collection_name"]
threads_count = 0
lock = Lock()
package = []
def send(p):
global threads_count
with lock:
threads_count += 1
collection.insert_many(p)
with lock:
threads_count -= 1
with open("utils/trash.csv") as file:
for line in file.readlines():
name, description = line.split(",")
package.append({"name": name, "description": description})
if len(package) >= 10000:
while threads_count >= 4:
time.sleep(0)
Thread(target=send, args=(package[:],), daemon=True).start()
package.clear()
if package:
collection.insert_many(package)
while threads_count != 0:
pass
print(collection.count_documents({}))
collection.drop()
client.drop_database("mongo")
print(datetime.now() - start)
| 20.169811 | 72 | 0.656688 | 129 | 1,069 | 5.348837 | 0.465116 | 0.104348 | 0.037681 | 0.057971 | 0.06087 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020286 | 0.21609 | 1,069 | 52 | 73 | 20.557692 | 0.803103 | 0 | 0 | 0.057143 | 0 | 0 | 0.094481 | 0.034612 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0.057143 | 0.114286 | 0 | 0.142857 | 0.057143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
7e3b10150f30d30b54781201a09f35f781054e7c | 2,963 | py | Python | contact_test.py | Njihia413/contact-list | bf555aa3657db1f90274dfe768b649007d725285 | [
"Unlicense"
] | null | null | null | contact_test.py | Njihia413/contact-list | bf555aa3657db1f90274dfe768b649007d725285 | [
"Unlicense"
] | null | null | null | contact_test.py | Njihia413/contact-list | bf555aa3657db1f90274dfe768b649007d725285 | [
"Unlicense"
] | null | null | null | import unittest #Importing the unittest module
from contact import Contact #Importing the contact class
#import pyperclip #Pyperclip will allow us to copy and paste items to our clipboard
class TestContact(unittest.TestCase):
def setUp(self):
self.new_contact = Contact("Lyn","Muthoni","0796654066","sonnie2154@gmail.com")
def tearDown(self):
Contact.contact_list = []
#First test to check if our contact objects are being instantiated correctly
def test_instance(self):
self.assertEqual(self.new_contact.first_name,"Lyn")
self.assertEqual(self.new_contact.last_name,"Muthoni")
self.assertEqual(self.new_contact.phone_number,"0796654066")
self.assertEqual(self.new_contact.email,"sonnie2154@gmail.com")
#Second test to check if we can save contacts into the contact list
def test_save_contact(self):
self.new_contact.save_contact() #saving the new contact
self.assertEqual(len(Contact.contact_list),1)
#Third test to test if we can save multiple contacts
def test_save_multiple_contact(self):
self.new_contact.save_contact()
test_contact = Contact("Test","user","0712345678","test@user.com") #new contact
test_contact.save_contact()
self.assertEqual(len(Contact.contact_list),2)
#Fourth test to test if we can remove a contact from our contact list
def test_delete_contact(self):
self.new_contact.save_contact()
test_contact = Contact("Test","user","0712345678","test@user.com") #new contact
test_contact.save_contact()
self.new_contact.delete_contact() #Deleting a contact object
self.assertEqual(len(Contact.contact_list),1)
#Fifth test to check if we can find a contact by phone number and display information
def test_find_contact_by_number(self):
self.new_contact.save_contact()
test_contact = Contact("Test","user","0711223344","test@user.com") #new contact
test_contact.save_contact()
found_contact = Contact.find_by_number("0711223344")
self.assertEqual(found_contact.email,test_contact.email)
#Sixth test to check if a contact object exists
def test_contact_exists(self):
self.new_contact.save_contact()
test_contact = Contact("Test","user","0711223344","test@user.com") #new contact
test_contact.save_contact()
contact_exists = Contact.contact_exist("0711223344")
self.assertTrue(contact_exists)
#Seventh test to display all contacts
def test_display_all_contacts(self):
self.assertEqual(Contact.display_contacts(),Contact.contact_list)
#Eighth test to allow us to copy items to the clipboard
'''
def test_copy_email(self):
self.new_contact.save_contact()
Contact.copy_email("0712345678")
self.assertEqual(self.new_contact.email,pyperclip.paste())
'''
if __name__ == '__main__':
unittest.main()
| 42.942029 | 89 | 0.710429 | 392 | 2,963 | 5.173469 | 0.214286 | 0.088757 | 0.089744 | 0.06213 | 0.422584 | 0.379684 | 0.297337 | 0.223866 | 0.223866 | 0.223866 | 0 | 0.042189 | 0.192035 | 2,963 | 68 | 90 | 43.573529 | 0.804929 | 0.238947 | 0 | 0.357143 | 0 | 0 | 0.113447 | 0 | 0 | 0 | 0 | 0 | 0.238095 | 1 | 0.214286 | false | 0 | 0.047619 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7e3d308df34767b7c5b7bbfbef6d03d373ce1d9f | 319 | py | Python | back_end/celery_tasks/main.py | 22014471/malonghui_Django | c9c2a68882450f9327e141333f30fdd73e530c28 | [
"MIT"
] | 1 | 2021-01-31T16:57:35.000Z | 2021-01-31T16:57:35.000Z | back_end/celery_tasks/main.py | 22014471/malonghui_Django | c9c2a68882450f9327e141333f30fdd73e530c28 | [
"MIT"
] | null | null | null | back_end/celery_tasks/main.py | 22014471/malonghui_Django | c9c2a68882450f9327e141333f30fdd73e530c28 | [
"MIT"
] | null | null | null | from celery import Celery
import os
# 为celery设置django默认配置
if not os.getenv('DJANGO_SETTINGS_MODULE'):
os.environ['DJANGO_SETTINGS_MODULE'] = 'mlh.settings.dev'
# 创建对象,命名为meiduo,并指明broker
celery_app = Celery('mlh',broker='redis://127.0.0.1:6379/15')
# 自动注册任务
celery_app.autodiscover_tasks(['celery_tasks.sms',]) | 22.785714 | 61 | 0.758621 | 45 | 319 | 5.2 | 0.644444 | 0.102564 | 0.17094 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.041522 | 0.094044 | 319 | 14 | 62 | 22.785714 | 0.768166 | 0.159875 | 0 | 0 | 0 | 0 | 0.392453 | 0.260377 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
7e3f3d1673e07847a0ccc4fa36154b7d2891f18d | 676 | py | Python | tests/unittest/parser/test_basic_parser.py | alessandrome/pywiktionary | b9378ca1e2dfe704eaa8a044bd82519b12f81226 | [
"MIT"
] | 4 | 2019-08-08T21:15:01.000Z | 2021-01-14T01:32:18.000Z | tests/unittest/parser/test_basic_parser.py | alessandrome/pywiktionary | b9378ca1e2dfe704eaa8a044bd82519b12f81226 | [
"MIT"
] | 1 | 2021-09-02T17:24:12.000Z | 2021-09-02T17:24:12.000Z | tests/unittest/parser/test_basic_parser.py | alessandrome/pywiktionary | b9378ca1e2dfe704eaa8a044bd82519b12f81226 | [
"MIT"
] | 1 | 2020-03-19T12:57:45.000Z | 2020-03-19T12:57:45.000Z | import unittest
from pywiktionary.parsers import basic_parser
def get_pizza_html_extract():
with open('tests/file/html-responses/pizza-it.html', 'r', encoding='utf-8') as pizza_html_file:
pizza_html = pizza_html_file.read()
return pizza_html
class BasicParseTestCase(unittest.TestCase):
def test_init(self):
parser = basic_parser.BasicParser(get_pizza_html_extract())
self.assertEqual(get_pizza_html_extract(), parser.html)
def test_parse_method(self):
parser = basic_parser.BasicParser(get_pizza_html_extract())
self.assertRaises(NotImplementedError, parser.parse)
if __name__ == '__main__':
unittest.main()
| 29.391304 | 99 | 0.739645 | 86 | 676 | 5.44186 | 0.465116 | 0.153846 | 0.102564 | 0.162393 | 0.235043 | 0.235043 | 0.235043 | 0.235043 | 0.235043 | 0.235043 | 0 | 0.001757 | 0.158284 | 676 | 22 | 100 | 30.727273 | 0.820738 | 0 | 0 | 0.133333 | 0 | 0 | 0.078402 | 0.057692 | 0 | 0 | 0 | 0 | 0.133333 | 1 | 0.2 | false | 0 | 0.133333 | 0 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7e46b7c1bdfe713325687e0706d7633667a0c0ae | 5,818 | py | Python | iktomi/forms/widgets.py | boltnev/iktomi | bc92006c026f9b42e56f1af5ced2fe577673a486 | [
"MIT"
] | 14 | 2015-02-15T05:24:22.000Z | 2020-03-19T10:07:28.000Z | iktomi/forms/widgets.py | boltnev/iktomi | bc92006c026f9b42e56f1af5ced2fe577673a486 | [
"MIT"
] | 10 | 2015-04-04T10:10:41.000Z | 2016-06-01T13:17:58.000Z | iktomi/forms/widgets.py | boltnev/iktomi | bc92006c026f9b42e56f1af5ced2fe577673a486 | [
"MIT"
] | 5 | 2015-02-20T11:18:58.000Z | 2016-10-18T15:30:13.000Z | # -*- coding: utf-8 -*-
from ..utils import weakproxy
from . import convs
class Widget(object):
# obsolete parameters from previous versions
_obsolete = frozenset(['multiple'])
#: Template to render widget
template = None
#: Value of HTML element's *class* attribute
classname = ''
#: describes how the widget is rendered.
#: the following values are supported by default:
#: 'default': label is rendered in usual place
#: 'checkbox': label and widget are rendered close to each other
#: 'full-width': for table-like templates, otherwise should be rendered as default
#: 'hidden': label is not rendered
render_type = 'default'
#: True if widget renders hint itself.
#: Otherwise parent field should render the hint
renders_hint = False
def __init__(self, field=None, **kwargs):
if self._obsolete & set(kwargs):
raise TypeError(
'Obsolete parameters are used: {}'.format(
list(self._obsolete & set(kwargs))))
self.field = weakproxy(field)
self._init_kwargs = kwargs
self.__dict__.update(kwargs)
@property
def multiple(self):
return self.field.multiple
@property
def input_name(self):
return self.field.input_name
@property
def id(self):
return self.field.id
@property
def env(self):
return self.field.env
def prepare_data(self):
'''
Method returning data passed to template.
Subclasses can override it.
'''
value = self.get_raw_value()
return dict(widget=self,
field=self.field,
value=value,
readonly=not self.field.writable)
def get_raw_value(self):
return self.field.raw_value
def render(self):
'''
Renders widget to template
'''
data = self.prepare_data()
if self.field.readable:
return self.env.template.render(self.template, **data)
return ''
def __call__(self, **kwargs):
'''
Creates current object's copy with extra constructor arguments passed.
'''
kwargs = dict(self._init_kwargs, **kwargs)
kwargs.setdefault('field', self.field)
return self.__class__(**kwargs)
class TextInput(Widget):
template = 'widgets/textinput'
classname = 'textinput'
class Textarea(Widget):
template = 'widgets/textarea'
class HiddenInput(Widget):
render_type = 'hidden'
template = 'widgets/hiddeninput'
class PasswordInput(Widget):
template = 'widgets/passwordinput'
classname = 'textinput'
class Select(Widget):
'''
Takes options from :class:`EnumChoice<EnumChoice>` converter,
looks up if converter allows null and passed this value as template
:obj:`required` variable.
'''
template = 'widgets/select'
classname = None
#: HTML select element's select attribute value.
size = None
#: Label assigned to None value if field is not required
null_label = '--------'
def get_options(self, value):
options = []
# XXX ugly
choice_conv = self.field.conv
if isinstance(choice_conv, convs.ListOf):
choice_conv = choice_conv.conv
assert isinstance(choice_conv, convs.EnumChoice)
has_null_value = False
values = value if self.multiple else [value]
for choice, label in choice_conv.options():
has_null_value = has_null_value or choice == ''
options.append(dict(value=choice,
title=label,
selected=(choice in values)))
if not self.multiple and not has_null_value and \
(value == '' or not self.field.conv.required) and \
self.null_label is not None:
options.insert(0, {'value': '',
'title': self.null_label,
'selected': value in (None, '')})
return options
def prepare_data(self):
data = Widget.prepare_data(self)
return dict(data,
options=self.get_options(data['value']),
required=('true' if self.field.conv.required else 'false'))
class CheckBoxSelect(Select):
classname = 'select-checkbox'
template = 'widgets/select-checkbox'
class CheckBox(Widget):
render_type = 'checkbox'
template = 'widgets/checkbox'
class CharDisplay(Widget):
template = 'widgets/span'
classname = 'chardisplay'
#: If is True, value is escaped while rendering.
#: Passed to template as :obj:`should_escape` variable.
escape = True
#: Function converting the value to string.
getter = staticmethod(lambda v: v)
def prepare_data(self):
data = Widget.prepare_data(self)
return dict(data,
value=self.getter(data['value']),
should_escape=self.escape)
class AggregateWidget(Widget):
def get_raw_value(self):
return None
class FieldListWidget(AggregateWidget):
allow_create = True
allow_delete = True
template = 'widgets/fieldlist'
def render_template_field(self):
# used in iktomi.cms: templates/widgets/fieldlist.html
field = self.field.field(name='%'+self.field.input_name+'-index%')
# XXX looks like a HACK
field.set_raw_value(self.field.form.raw_data,
field.from_python(field.get_initial()))
return field.widget.render()
class FieldSetWidget(AggregateWidget):
template = 'widgets/fieldset'
class FieldBlockWidget(FieldSetWidget):
render_type = 'full-width'
class FileInput(Widget):
template = 'widgets/file'
| 26.688073 | 86 | 0.611894 | 645 | 5,818 | 5.404651 | 0.275969 | 0.046472 | 0.02008 | 0.027252 | 0.046472 | 0.046472 | 0.032702 | 0.032702 | 0.032702 | 0.032702 | 0 | 0.000483 | 0.288931 | 5,818 | 217 | 87 | 26.81106 | 0.842156 | 0.202991 | 0 | 0.125 | 0 | 0 | 0.078831 | 0.009743 | 0 | 0 | 0 | 0 | 0.008333 | 1 | 0.116667 | false | 0.016667 | 0.016667 | 0.05 | 0.616667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
7e48637038313d81e9c4efaa9dd4ea2b3a9e9145 | 391 | py | Python | yadil/image/face_model.py | neolaw84/yadil | 9f56a1beec5359e0233ae342f01263b29f752bc7 | [
"Apache-2.0"
] | null | null | null | yadil/image/face_model.py | neolaw84/yadil | 9f56a1beec5359e0233ae342f01263b29f752bc7 | [
"Apache-2.0"
] | null | null | null | yadil/image/face_model.py | neolaw84/yadil | 9f56a1beec5359e0233ae342f01263b29f752bc7 | [
"Apache-2.0"
] | null | null | null | # downloaded from https://raw.githubusercontent.com/TadasBaltrusaitis/OpenFace/master/lib/local/LandmarkDetector/model/pdms/In-the-wild_aligned_PDM_68.txt
import pickle
import pathlib
THIS_FILE_PATH = pathlib.Path(__file__)
MODEL_FILE_PATH = pathlib.Path.joinpath(THIS_FILE_PATH.parent, "face_model.bin")
with open(MODEL_FILE_PATH, "rb") as f:
model_points = pickle.load(f)
| 35.545455 | 155 | 0.790281 | 57 | 391 | 5.122807 | 0.666667 | 0.109589 | 0.082192 | 0.130137 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005698 | 0.102302 | 391 | 10 | 156 | 39.1 | 0.826211 | 0.388747 | 0 | 0 | 0 | 0 | 0.070485 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
7e550a8084b1674ea1aeeea565a6759f3741fb88 | 1,557 | py | Python | ask/qa/forms.py | kzn-bulat-s/abracadabra | 25d582a3d6fc6587772776479ef2f1d284fbe0e9 | [
"Unlicense"
] | null | null | null | ask/qa/forms.py | kzn-bulat-s/abracadabra | 25d582a3d6fc6587772776479ef2f1d284fbe0e9 | [
"Unlicense"
] | null | null | null | ask/qa/forms.py | kzn-bulat-s/abracadabra | 25d582a3d6fc6587772776479ef2f1d284fbe0e9 | [
"Unlicense"
] | null | null | null | from django import forms
from django.contrib.auth.models import User
from models import Question, Answer
class AskForm(forms.ModelForm):
class Meta:
model = Question
fields = ['title', 'text']
def save(self, commit=True):
question = super(AskForm, self).save(commit=False)
question.author = self.user
if commit:
question.save()
return question
class AnswerForm(forms.Form):
text = forms.CharField()
question = forms.IntegerField()
def clean(self):
data = self.cleaned_data
try:
question = Question.objects.get(pk=data['question'])
except (Question.DoesNotExist, KeyError):
raise forms.ValidationError("Question doesn't exists.")
else:
data['question'] = question
return data
def save(self):
data = self.cleaned_data
answer = Answer.objects.create(text=data['text'],
question=data['question'],
author=self.user)
return answer
class UserCreationFormWithEmail(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'password', 'email',)
widgets = {
'password': forms.PasswordInput(),
}
def save(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
email = self.cleaned_data.get('email')
user = User.objects.create_user(username, email, password)
return user | 27.803571 | 67 | 0.594091 | 160 | 1,557 | 5.74375 | 0.34375 | 0.059848 | 0.08161 | 0.05876 | 0.11099 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.298651 | 1,557 | 56 | 68 | 27.803571 | 0.841575 | 0 | 0 | 0.136364 | 0 | 0 | 0.071245 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0.090909 | 0.068182 | 0 | 0.409091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
7e56afeffeb1143dfa4e1a4ffb0c6566c1c6fcad | 1,762 | py | Python | python/se3last-w3c/conftest.py | saucelabs-training/platform-config-tests | 11dfab8f9be2fe118ed0b0fa4adebb75a5f1a64c | [
"MIT"
] | 1 | 2021-11-17T22:29:42.000Z | 2021-11-17T22:29:42.000Z | python/se3last-w3c/conftest.py | saucelabs-training/platform-config-tests | 11dfab8f9be2fe118ed0b0fa4adebb75a5f1a64c | [
"MIT"
] | null | null | null | python/se3last-w3c/conftest.py | saucelabs-training/platform-config-tests | 11dfab8f9be2fe118ed0b0fa4adebb75a5f1a64c | [
"MIT"
] | 1 | 2021-11-17T22:29:35.000Z | 2021-11-17T22:29:35.000Z | import os
import sys
import time
import pytest
from appium import webdriver as appiumdriver
from selenium import webdriver
class Helpers:
@staticmethod
def validate_google(driver):
driver.get("http://google.com")
time.sleep(1)
result = 'passed' if driver.title == 'Google' else 'failed'
driver.execute_script("sauce:job-result={}".format(result))
driver.quit()
@staticmethod
def validate_app(driver):
elements = driver.find_elements_by_accessibility_id('test-Username')
result = 'passed' if len(elements) == 1 else 'failed'
driver.execute_script("sauce:job-result={}".format(result))
@staticmethod
def start_driver(caps):
sauce_username = os.environ["SAUCE_USERNAME"]
sauce_access_key = os.environ["SAUCE_ACCESS_KEY"]
remote_url = "http://{}:{}@ondemand.saucelabs.com/wd/hub".format(sauce_username, sauce_access_key)
caps['sauce:options']['name'] = sys._getframe(1).f_code.co_name
caps['sauce:options']['build'] = 'Python Se3Last W3C - {}'.format(os.environ.get("BUILD_TIME"))
return webdriver.Remote(remote_url, desired_capabilities=caps)
@staticmethod
def start_appium_driver(caps):
sauce_username = os.environ["SAUCE_USERNAME"]
sauce_access_key = os.environ["SAUCE_ACCESS_KEY"]
remote_url = "http://{}:{}@ondemand.saucelabs.com/wd/hub".format(sauce_username, sauce_access_key)
caps['sauce:options']['name'] = sys._getframe(1).f_code.co_name
caps['sauce:options']['build'] = 'Python Se3Last W3C - {}'.format(os.environ.get("BUILD_TIME"))
return appiumdriver.Remote(remote_url, desired_capabilities=caps)
@pytest.fixture
def helpers():
return Helpers
| 33.245283 | 106 | 0.682179 | 218 | 1,762 | 5.316514 | 0.316514 | 0.046592 | 0.072476 | 0.08283 | 0.6195 | 0.6195 | 0.553926 | 0.553926 | 0.553926 | 0.553926 | 0 | 0.005544 | 0.181044 | 1,762 | 52 | 107 | 33.884615 | 0.797644 | 0 | 0 | 0.421053 | 0 | 0 | 0.214529 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131579 | false | 0.052632 | 0.157895 | 0.026316 | 0.394737 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
7e61331f1d48012031893d41bd0fba4b33926051 | 804 | py | Python | sponsors/migrations/0038_auto_20210827_1223.py | Manny27nyc/pythondotorg | 257c96d3a94755451a5a5cdcd2abad1e27ea299b | [
"Apache-2.0"
] | 911 | 2015-01-03T22:16:06.000Z | 2022-03-31T23:56:22.000Z | sponsors/migrations/0038_auto_20210827_1223.py | Manny27nyc/pythondotorg | 257c96d3a94755451a5a5cdcd2abad1e27ea299b | [
"Apache-2.0"
] | 1,342 | 2015-01-02T16:14:45.000Z | 2022-03-28T08:01:20.000Z | sponsors/migrations/0038_auto_20210827_1223.py | Manny27nyc/pythondotorg | 257c96d3a94755451a5a5cdcd2abad1e27ea299b | [
"Apache-2.0"
] | 551 | 2015-01-04T02:17:31.000Z | 2022-03-23T11:59:25.000Z | # Generated by Django 2.0.13 on 2021-08-27 12:23
from django.db import migrations
def populate_sponsorship_package_fk(apps, schema_editor):
Sponsorship = apps.get_model('sponsors.Sponsorship')
SponsorshipPackage = apps.get_model('sponsors.SponsorshipPackage')
for sponsorship in Sponsorship.objects.all().iterator():
try:
package = SponsorshipPackage.objects.get(name=sponsorship.level_name)
sponsorship.package = package
sponsorship.save()
except SponsorshipPackage.DoesNotExist:
continue
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0037_sponsorship_package'),
]
operations = [
migrations.RunPython(populate_sponsorship_package_fk, migrations.RunPython.noop)
]
| 28.714286 | 88 | 0.70398 | 81 | 804 | 6.839506 | 0.580247 | 0.129964 | 0.093863 | 0.101083 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031348 | 0.206468 | 804 | 27 | 89 | 29.777778 | 0.836991 | 0.057214 | 0 | 0 | 1 | 0 | 0.104497 | 0.06746 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.055556 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7e67680b7ff285c7a73a411e68537d03ddc32e2e | 1,190 | py | Python | home/board/migrations/0004_add_closed_at.py | he0119/smart-home | bdd3a59a8c46c0fdc07ac3049bf589c7f95a2683 | [
"MIT"
] | null | null | null | home/board/migrations/0004_add_closed_at.py | he0119/smart-home | bdd3a59a8c46c0fdc07ac3049bf589c7f95a2683 | [
"MIT"
] | 223 | 2020-02-21T06:16:56.000Z | 2022-03-01T22:24:19.000Z | home/board/migrations/0004_add_closed_at.py | he0119/smart-home | bdd3a59a8c46c0fdc07ac3049bf589c7f95a2683 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.4 on 2020-12-25 02:53
from django.db import migrations, models
def set_edited_at(apps, schema_editor):
"""修改话题的时间
将关闭话题的修改时间设置成创建时间
将其关闭时间设置成其修改时间
将置顶话题的修改时间设置成创建时间
"""
Topic = apps.get_model("board", "Topic")
for topic in Topic.objects.all():
if not topic.is_open:
topic.edited_at = topic.created_at
topic.closed_at = topic.edited_at
topic.save()
if topic.is_pin:
topic.edited_at = topic.created_at
topic.save()
def reverse_set_edited_at(apps, schema_editor):
"""不做任何事情"""
pass
class Migration(migrations.Migration):
dependencies = [
("board", "0003_rename_field"),
]
operations = [
migrations.AddField(
model_name="topic",
name="closed_at",
field=models.DateTimeField(blank=True, null=True, verbose_name="关闭时间"),
),
migrations.AlterField(
model_name="topic",
name="edited_at",
field=models.DateTimeField(verbose_name="修改时间"),
),
migrations.RunPython(set_edited_at, reverse_code=reverse_set_edited_at),
]
| 24.791667 | 83 | 0.610924 | 135 | 1,190 | 5.162963 | 0.488889 | 0.091822 | 0.063128 | 0.077475 | 0.169297 | 0.169297 | 0.091822 | 0 | 0 | 0 | 0 | 0.022196 | 0.280672 | 1,190 | 47 | 84 | 25.319149 | 0.792056 | 0.094958 | 0 | 0.266667 | 1 | 0 | 0.064824 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0.033333 | 0.033333 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7e6ca5f5a32c06e55254d834d4ec7b22d2307f05 | 8,468 | py | Python | src/lck/django/common/forms.py | ar4s/kitdjango | 1da443296ac780ae002b66452df2251642374a13 | [
"MIT"
] | 10 | 2015-06-17T08:22:10.000Z | 2020-12-10T13:48:37.000Z | src/lck/django/common/forms.py | ar4s/kitdjango | 1da443296ac780ae002b66452df2251642374a13 | [
"MIT"
] | null | null | null | src/lck/django/common/forms.py | ar4s/kitdjango | 1da443296ac780ae002b66452df2251642374a13 | [
"MIT"
] | 8 | 2015-03-23T10:59:22.000Z | 2022-01-17T09:49:26.000Z | """
Albeit useful, this module is still somewhat a mess in a really early state of development. Beware, there be dragons.
"""
import datetime
import os
import re
from subprocess import check_call, CalledProcessError
from tempfile import NamedTemporaryFile
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django import forms
from django.forms.extras.widgets import RE_DATE, SelectDateWidget
from django.forms.widgets import Select, RadioFieldRenderer, HiddenInput
from django.forms.util import flatatt
from django.utils.dates import MONTHS
from django.utils.encoding import StrAndUnicode, force_unicode
from django.utils.html import escape, conditional_escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
class JQueryUIRadioInput(StrAndUnicode):
"""
An object used by RadioFieldRenderer that represents a single
<input type='radio'>.
"""
def __init__(self, name, value, attrs, choice, index):
self.name, self.value = name, value
self.attrs = attrs
self.choice_value = force_unicode(choice[0])
self.choice_label = force_unicode(choice[1])
self.index = index
def __unicode__(self):
if 'id' in self.attrs:
label_for = ' for="%s_%s"' % (self.attrs['id'], self.index)
else:
label_for = ''
choice_label = conditional_escape(force_unicode(self.choice_label))
return mark_safe(u'%s<label%s>%s</label>' % (self.tag(), label_for,
choice_label))
def is_checked(self):
return self.value == self.choice_value
def tag(self):
if 'id' in self.attrs:
self.attrs['id'] = '%s_%s' % (self.attrs['id'], self.index)
final_attrs = dict(self.attrs, type='radio', name=self.name,
value=self.choice_value)
if self.is_checked():
final_attrs['checked'] = 'checked'
return mark_safe(u'<input%s />' % flatatt(final_attrs))
class JQueryUIRenderer(StrAndUnicode):
"""
A customized renderer for radio fields.
"""
def __init__(self, name, value, attrs, choices):
self.name, self.value, self.attrs = name, value, attrs
self.choices = choices
def __iter__(self):
for i, choice in enumerate(self.choices):
if not choice[0]:
continue
yield JQueryUIRadioInput(self.name,
self.value,
self.attrs.copy(),
choice,
i)
def __getitem__(self, idx):
choice = self.choices[idx] # Let the IndexError propogate
return JQueryUIRadioInput(self.name,
self.value,
self.attrs.copy(),
choice,
idx)
def __unicode__(self):
return self.render()
def render(self):
return mark_safe('<div class="radio">\n%s\n</div>' \
% '\n'.join([force_unicode(w) for w in self]))
class JQueryMobileVerticalRadioGroupRenderer(JQueryUIRenderer):
data_type = ""
def render(self):
return mark_safe('<div data-role="fieldcontain"><fieldset '
'data-role="controlgroup" %s>\n%s\n</fieldset></div>' %
(self.data_type, '\n'.join([force_unicode(w) for w in self])))
class JQueryMobileHorizontalRadioGroupRenderer(JQueryMobileVerticalRadioGroupRenderer):
data_type = 'data-type="horizontal"'
class JQueryUIRadioSelect(forms.RadioSelect):
renderer = JQueryUIRenderer
@classmethod
def id_for_label(cls, id_):
return id_
class JQueryMobileVerticalRadioGroup(JQueryUIRadioSelect):
renderer = JQueryMobileVerticalRadioGroupRenderer
class JQueryMobileHorizontalRadioGroup(JQueryUIRadioSelect):
renderer = JQueryMobileHorizontalRadioGroupRenderer
class PolishSelectDateWidget(SelectDateWidget):
def __init__(self, attrs=None, years=None, reverse_years=False):
self.reverse_years = reverse_years
super(PolishSelectDateWidget, self).__init__(attrs, years)
def render(self, name, value, attrs=None):
try:
year_val, month_val, day_val = value.year, value.month, value.day
except AttributeError:
year_val = month_val = day_val = None
if isinstance(value, basestring):
match = RE_DATE.match(value)
if match:
year_val, month_val, day_val = [int(v) for v in match.groups()]
output = []
if 'id' in self.attrs:
id_ = self.attrs['id']
else:
id_ = 'id_%s' % name
day_choices = [(i, i) for i in range(1, 32)]
local_attrs = self.build_attrs(id=self.day_field % id_)
select_html = Select(choices=day_choices).render(self.day_field % name, day_val, local_attrs)
output.append(select_html)
month_choices = MONTHS.items()
month_choices.sort()
local_attrs['id'] = self.month_field % id_
select_html = Select(choices=month_choices).render(self.month_field % name, month_val, local_attrs)
output.append(select_html)
year_choices = [(i, i) for i in self.years]
local_attrs['id'] = self.year_field % id_
if self.reverse_years:
year_choices.reverse()
select_html = Select(choices=year_choices).render(self.year_field % name, year_val, local_attrs)
output.append(select_html)
return mark_safe(u'\n'.join(output))
class WebpImageField(forms.ImageField):
"""Extends the default django ImageField with WEBP support through
`dwebp <http://code.google.com/intl/pl/speed/webp/docs/dwebp.html>`_.
Converts image data to PNG on the fly so that PIL (as of 1.1.7) is able
to use it.
"""
def __init__(self, *args, **kwargs):
print args
print kwargs
super(WebpImageField, self).__init__(*args, **kwargs)
def to_python(self, data):
try:
# try PIL-supported images first
return super(WebpImageField, self).to_python(data)
except forms.ValidationError, e:
pass
if hasattr(data, 'temporary_file_path'):
# file already on disk
file = data.temporary_file_path()
abs_path = os.path.splitext(file)[0] + '.png'
devnull = os.open(os.devnull, os.O_RDWR)
try:
check_call(['dwebp', file, '-o', abs_path],
stdout=devnull, stderr=devnull)
data.temporary_file_path = abs_path
data.name = os.path.basename(abs_path)
data.size = getsize(abs_path)
os.unlink(file)
except CalledProcessError:
raise ValidationError(self.error_messages['invalid_image'])
finally:
os.close(devnull)
else:
with NamedTemporaryFile() as file:
abs_path = os.path.splitext(file.name)[0] + '.png'
if hasattr(data, 'read'):
# InMemoryUploadFile
data.seek(0)
file.write(data.read())
output = data.file
else:
file.write(data['content'])
output = data['content']
file.flush()
devnull = os.open(os.devnull, os.O_RDWR)
try:
check_call(['dwebp', file.name, '-o', abs_path],
stdout=devnull, stderr=devnull)
# Monkey-patch the UploadFile object.
data.name = os.path.basename(abs_path)
data.size = os.path.getsize(abs_path)
with open(abs_path, 'rb') as image:
# InMemoryUploadFile
if hasattr(data, 'read'):
data.file = StringIO(image.read())
else:
data['content'] = StringIO(image.read())
except CalledProcessError:
raise ValidationError(self.error_messages['invalid_image'])
finally:
os.close(devnull)
# Return the monkey-patched UploadFile object.
return data
| 35.430962 | 117 | 0.590694 | 936 | 8,468 | 5.174145 | 0.25 | 0.024159 | 0.013628 | 0.014041 | 0.246541 | 0.243444 | 0.167871 | 0.110675 | 0.110675 | 0.073095 | 0 | 0.002053 | 0.309636 | 8,468 | 238 | 118 | 35.579832 | 0.826377 | 0.023618 | 0 | 0.245614 | 0 | 0 | 0.043866 | 0.019798 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.005848 | 0.099415 | null | null | 0.011696 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7e706c63ed781d4a98c0e2432ffb583ee2d4d55c | 1,795 | py | Python | typarser/_base_optarg.py | ermishechkin/typarser | 00d965f87718c837a2c246f0b5c61401e8a6e3c8 | [
"MIT"
] | null | null | null | typarser/_base_optarg.py | ermishechkin/typarser | 00d965f87718c837a2c246f0b5c61401e8a6e3c8 | [
"MIT"
] | null | null | null | typarser/_base_optarg.py | ermishechkin/typarser | 00d965f87718c837a2c246f0b5c61401e8a6e3c8 | [
"MIT"
] | null | null | null | from __future__ import annotations
import typing
from typing import Callable, Generic, Iterable, Literal, Optional, Tuple, Union
from ._base import RESULT, TYPE, BaseComponent
from ._internal_namespace import set_value
if typing.TYPE_CHECKING:
from ._base import Namespace
from .action import Action
NARGS = Union[int, Literal['*'], Literal['+'], Literal['?']]
class BaseOptArg(BaseComponent[TYPE, RESULT], Generic[TYPE, RESULT]):
def __init__(
self,
*,
type: Callable[[str], TYPE], # pylint: disable=redefined-builtin
nargs: Optional[NARGS],
choices: Optional[Iterable[TYPE]],
default: Optional[Union[RESULT, str]],
metavar: Optional[Union[str, Tuple[str, ...]]],
action: Optional[Action] = None,
help: Optional[str], # pylint: disable=redefined-builtin
) -> None:
super().__init__(help=help)
self._type = type
self._nargs = nargs
self._choices = tuple(choices) if choices else None
self._default = default
self._metavar = metavar
self._action = action
@property
def type(self) -> Callable[[str], TYPE]:
return self._type
@property
def nargs(self) -> Optional[NARGS]:
return self._nargs
@property
def choices(self) -> Optional[Tuple[TYPE, ...]]:
return self._choices
@property
def default(self) -> Optional[Union[RESULT, str]]:
return self._default
@property
def metavar(self) -> Optional[Union[str, Tuple[str, ...]]]:
return self._metavar
@property
def action(self) -> Optional[Action]:
return self._action
def __set__(self, owner: Namespace, value: TYPE):
set_value(owner, self, value)
| 29.42623 | 79 | 0.622284 | 195 | 1,795 | 5.548718 | 0.230769 | 0.060998 | 0.025878 | 0.053604 | 0.044362 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.259053 | 1,795 | 60 | 80 | 29.916667 | 0.813534 | 0.037326 | 0 | 0.125 | 0 | 0 | 0.001739 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.145833 | 0.125 | 0.458333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
7e72e12b97f96d60c1b664e0376ea773f0bdcd6f | 2,918 | py | Python | backend/accounts/serializers.py | hvitis/geodjango-rest-vue-boilerplate | bd825fdfc81430ecfac3fe7d3c06a6e1145de1cd | [
"MIT"
] | 5 | 2020-11-23T11:31:40.000Z | 2022-03-11T01:24:46.000Z | backend/accounts/serializers.py | hvitis/geodjango-rest-vue-boilerplate | bd825fdfc81430ecfac3fe7d3c06a6e1145de1cd | [
"MIT"
] | null | null | null | backend/accounts/serializers.py | hvitis/geodjango-rest-vue-boilerplate | bd825fdfc81430ecfac3fe7d3c06a6e1145de1cd | [
"MIT"
] | null | null | null | from rest_framework_gis.serializers import GeoFeatureModelSerializer, GeometrySerializerMethodField
from django.contrib.gis.geos import Point
from accounts.models import UserProfile
import datetime
from rest_framework_gis.pagination import GeoJsonPagination
from django.contrib.auth import get_user_model
from django.utils import timezone
from rest_framework import serializers
from rest_framework_simplejwt.settings import api_settings
from rest_framework.reverse import reverse as api_reverse
from rest_framework_gis import serializers as geo_serializers
User = get_user_model()
class UserPublicSerializer(serializers.ModelSerializer):
uri = serializers.SerializerMethodField(read_only=True)
class Meta:
model = User
fields = [
'id',
'username',
'uri'
]
def get_uri(self, obj):
request = self.context.get('request')
return api_reverse("api-user:detail", kwargs={"username": obj.username}, request=request)
class UserRegisterSerializer(serializers.ModelSerializer):
password2 = serializers.CharField(
style={'input_type': 'password'}, write_only=True)
token = serializers.SerializerMethodField(read_only=True)
expires = serializers.SerializerMethodField(read_only=True)
message = serializers.SerializerMethodField(read_only=True)
class Meta:
model = User
fields = [
'username',
'email',
'password',
'password2',
'token',
'expires',
'message',
]
extra_kwargs = {'password': {'write_only': True}}
def get_message(self, obj):
return "Thank you for registering. Please verify your email before continuing."
def get_expires(self, obj):
return timezone.now() + expire_delta - datetime.timedelta(seconds=200)
def validate_email(self, value):
qs = User.objects.filter(email__iexact=value)
if qs.exists():
raise serializers.ValidationError(
"User with this email already exists")
return value
def validate_username(self, value):
qs = User.objects.filter(username__iexact=value)
if qs.exists():
raise serializers.ValidationError(
"User with this username already exists")
return value
def validate(self, data):
pw = data.get('password')
pw2 = data.pop('password2')
if pw != pw2:
raise serializers.ValidationError("Passwords must match")
return data
def create(self, validated_data):
#print(validated_data)
user_obj = User(
username=validated_data.get('username'),
email=validated_data.get('email'))
user_obj.set_password(validated_data.get('password'))
user_obj.is_active = False
user_obj.save()
return user_obj
| 30.715789 | 99 | 0.664839 | 312 | 2,918 | 6.064103 | 0.339744 | 0.02537 | 0.053911 | 0.084567 | 0.252643 | 0.206131 | 0.139535 | 0.139535 | 0.139535 | 0.139535 | 0 | 0.003648 | 0.248458 | 2,918 | 94 | 100 | 31.042553 | 0.859097 | 0.007197 | 0 | 0.194444 | 0 | 0 | 0.113644 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097222 | false | 0.125 | 0.152778 | 0.027778 | 0.472222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
7e7440fb1d1c7e95290bfc94cc2ad2a4debb8384 | 734 | py | Python | main.py | henryfox/encrypted-chat | c69ba786892dd870cb3fddd7da6ea77eadeb4a7e | [
"MIT"
] | null | null | null | main.py | henryfox/encrypted-chat | c69ba786892dd870cb3fddd7da6ea77eadeb4a7e | [
"MIT"
] | null | null | null | main.py | henryfox/encrypted-chat | c69ba786892dd870cb3fddd7da6ea77eadeb4a7e | [
"MIT"
] | null | null | null | import webapp2
import os
import jinja2
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), "templates")
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape = True)
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
class MainHandler(Handler):
def get(self):
self.response.write('Hello world!')
app = webapp2.WSGIApplication([
('/', MainHandler)
], debug=True)
| 27.185185 | 98 | 0.690736 | 93 | 734 | 5.333333 | 0.483871 | 0.044355 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009852 | 0.1703 | 734 | 26 | 99 | 28.230769 | 0.804598 | 0 | 0 | 0 | 0 | 0 | 0.029973 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.2 | 0 | 0.55 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
7e74c6f460e27189aecb37d569cafb98fe35f2d1 | 5,782 | py | Python | ipyannotator/datasets/download.py | EnriqueMoran/ipyannotator | 5517f8ded24e9e1347d0d72c73d620778f7b3069 | [
"Apache-2.0"
] | 19 | 2020-10-12T19:52:10.000Z | 2022-02-07T18:23:26.000Z | ipyannotator/datasets/download.py | EnriqueMoran/ipyannotator | 5517f8ded24e9e1347d0d72c73d620778f7b3069 | [
"Apache-2.0"
] | 6 | 2021-08-02T09:36:32.000Z | 2022-01-05T15:29:30.000Z | ipyannotator/datasets/download.py | EnriqueMoran/ipyannotator | 5517f8ded24e9e1347d0d72c73d620778f7b3069 | [
"Apache-2.0"
] | 1 | 2020-12-01T22:42:01.000Z | 2020-12-01T22:42:01.000Z | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01a_datasets_download.ipynb (unless otherwise specified).
__all__ = ['get_cifar10', 'get_oxford_102_flowers', 'get_cub_200_2011']
# Internal Cell
import glob
import json
from pathlib import Path
import os
import subprocess
import tarfile
import urllib
import zlib
# Internal Cell
def _download_url(url, root, filename=None):
"""Download a file from a url and place it in root.
Args:
url (str): URL to download file from
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the basename of the URL
"""
root = os.path.expanduser(root)
if not filename:
filename = os.path.basename(url)
fpath = os.path.join(root, filename)
os.makedirs(root, exist_ok=True)
if not os.path.isfile(fpath):
try:
print('Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(url, fpath)
except (urllib.error.URLError, IOError) as e:
if url[:5] == 'https':
url = url.replace('https:', 'http:')
print('Failed download. Trying https -> http instead.'
' Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(url, fpath)
else:
print(f'File {filename} already exists, skip download.')
# Internal Cell
def _extract_tar(tar_path, output_dir):
try:
print('Extracting...')
with tarfile.open(tar_path) as tar:
tar.extractall(output_dir)
except (tarfile.TarError, IOError, zlib.error) as e:
print('Failed to extract!', e)
# Cell
def get_cifar10(output_dir):
output_dir = Path(output_dir)
dataset_dir = output_dir / 'cifar10'
_download_url(url='https://s3.amazonaws.com/fast-ai-imageclas/cifar10.tgz', root=output_dir)
if not dataset_dir.is_dir():
_extract_tar(output_dir / 'cifar10.tgz', output_dir)
else:
print(f'Directory {dataset_dir} already exists, skip extraction.')
print('Generating train/test data..')
imdir_train = dataset_dir / 'train'
imdir_test = dataset_dir / 'test'
# split train/test
train = [Path(p) for p in glob.glob(f'{imdir_train}/*/*')]
test = [Path(p) for p in glob.glob(f'{imdir_test}/*/*')]
# generate data for annotations.json
# {'image-file.jpg': ['label1.jpg']}
annotations_train = dict((str(p), [f'{p.parts[-2]}.jpg']) for p in train)
annotations_test = dict((str(p), [f'{p.parts[-2]}.jpg']) for p in test)
train_path = dataset_dir / 'annotations_train.json'
test_path = dataset_dir / 'annotations_test.json'
with open(train_path, 'w') as f:
json.dump(annotations_train, f)
with open(test_path, 'w') as f:
json.dump(annotations_test, f)
print("Done")
return train_path, test_path
# Cell
def get_oxford_102_flowers(output_dir):
output_dir = Path(output_dir)
dataset_dir = output_dir / 'oxford-102-flowers'
_download_url(url='https://s3.amazonaws.com/fast-ai-imageclas/oxford-102-flowers.tgz', root=output_dir)
if not dataset_dir.is_dir():
_extract_tar(output_dir / 'oxford-102-flowers.tgz', output_dir)
else:
print(f'Directory {dataset_dir} already exists, skip extraction.')
print('Generating train/test data..')
with open(dataset_dir / 'train.txt', 'r') as f:
annotations_train = dict(tuple(line.split()) for line in f)
annotations_train = {str(dataset_dir / k): [v+'.jpg'] for k, v in annotations_train.items()}
with open(dataset_dir / 'test.txt', 'r') as f:
annotations_test = dict(tuple(line.split()) for line in f)
annotations_test = {str(dataset_dir / k): [v+'.jpg'] for k, v in annotations_test.items()}
train_path = dataset_dir / 'annotations_train.json'
test_path = dataset_dir / 'annotations_test.json'
with open(train_path, 'w') as f:
json.dump(annotations_train, f)
with open(test_path, 'w') as f:
json.dump(annotations_test, f)
print("Done")
return train_path, test_path
# Cell
def get_cub_200_2011(output_dir):
output_dir = Path(output_dir)
dataset_dir = output_dir / 'CUB_200_2011'
_download_url(url='https://s3.amazonaws.com/fast-ai-imageclas/CUB_200_2011.tgz', root=output_dir)
if not dataset_dir.is_dir():
_extract_tar(output_dir / 'CUB_200_2011.tgz', output_dir)
else:
print(f'Directory {dataset_dir} already exists, skip extraction.')
print('Generating train/test data..')
with open(dataset_dir / 'images.txt','r') as f:
image_id_map = dict(tuple(line.split()) for line in f)
with open(dataset_dir / 'classes.txt','r') as f:
class_id_map = dict(tuple(line.split()) for line in f)
with open(dataset_dir / 'train_test_split.txt','r') as f:
splitter = dict(tuple(line.split()) for line in f)
# image ids for test/train
train_k = [k for k, v in splitter.items() if v == '0']
test_k = [k for k, v in splitter.items() if v == '1']
with open(dataset_dir / 'image_class_labels.txt','r') as f:
anno_ = dict(tuple(line.split()) for line in f)
annotations_train = {str(dataset_dir / 'images' / image_id_map[k]): [class_id_map[v]+'.jpg'] for k, v in anno_.items() if k in train_k}
annotations_test = {str(dataset_dir / 'images' / image_id_map[k]): [class_id_map[v]+'.jpg'] for k, v in anno_.items() if k in test_k}
train_path = dataset_dir / 'annotations_train.json'
test_path = dataset_dir / 'annotations_test.json'
with open(train_path, 'w') as f:
json.dump(annotations_train, f)
with open(test_path, 'w') as f:
json.dump(annotations_test, f)
print("Done")
return train_path, test_path | 35.042424 | 139 | 0.654272 | 851 | 5,782 | 4.251469 | 0.172738 | 0.074627 | 0.019901 | 0.041459 | 0.616086 | 0.583748 | 0.583748 | 0.583748 | 0.576009 | 0.522112 | 0 | 0.015622 | 0.21394 | 5,782 | 165 | 140 | 35.042424 | 0.780418 | 0.089761 | 0 | 0.40367 | 1 | 0.009174 | 0.205854 | 0.037306 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045872 | false | 0 | 0.073395 | 0 | 0.146789 | 0.12844 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7e7c1099ca5e6f0cb535351f8035469a042e59e5 | 10,296 | py | Python | draw_classes.py | Fs-agadir/StageDetect | af2841b41f80151e1f3092adbc266eb1d2b8db37 | [
"MIT"
] | null | null | null | draw_classes.py | Fs-agadir/StageDetect | af2841b41f80151e1f3092adbc266eb1d2b8db37 | [
"MIT"
] | null | null | null | draw_classes.py | Fs-agadir/StageDetect | af2841b41f80151e1f3092adbc266eb1d2b8db37 | [
"MIT"
] | null | null | null | # Copyright (c) 2021, Fs-Agadir
# All rights reserved.
#from wx import App, ScreenDC #to get monitor resolution
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import os, cv2
class Drawing:
def __init__(self):
pass
'''----drawing results tools----'''
# '''get montior resolution in dpi'''
# def monitordpi(self):
# app = App(0)
# s = ScreenDC()
# monitordpi = s.GetPPI()[0]
# return monitordpi
'''define different colors for specific number of values'''
def color_spectrum(self, unique_vals, offset=35, color_type='spectral'):
# unique_vals: type is list
# offset to differentiate colors
# color definitions
# output is cmap color values for each data value
cmap = plt.get_cmap(color_type) #'binary'PiYG
colors = []
i = 0
c = 0
while i < len(unique_vals):
colors.append(cmap(c))
i=i+1
c=c+offset
return colors
'''draw points on image'''
def draw_points_onto_image(self, image, image_points, point_id, markSize=2, fontSize=8, switched=False):
# draw image points into image and label the point id
# image_points: array with 2 columns
# point_id: list of point ids in same order as corresponding image_points file; if empty no points labeled
# dpi from screen resolution
#dpi = self.monitordpi()
dpi = 600
set_markersize = markSize
fontProperties_text = {'size' : fontSize,
'family' : 'serif'}
matplotlib.rc('font', **fontProperties_text)
fig = plt.figure(frameon=False) #dpi of screen resolution
fig.set_size_inches(image.shape[1]/float(dpi), image.shape[0]/float(dpi)) #dpi screen resolution!
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
if switched:
ax.plot([p[1] for p in image_points],
[p[0] for p in image_points],
marker='o', ms=set_markersize, color='green', markeredgecolor='green', markeredgewidth=1)
else:
ax.plot([p[0] for p in image_points],
[p[1] for p in image_points],
marker='o', ms=set_markersize, color='red', markeredgecolor='black', markeredgewidth=1)
#ax.plot(image_points[:,0], image_points[:,1], "r.", markersize=set_markersize, markeredgecolor='black')
if len(point_id) > 1:
if not switched:
for label, xl, yl in zip(point_id, image_points[:,0], image_points[:,1]):
ax.annotate(str((label)), xy = (xl, yl), xytext=(xl+5, yl+1), color='blue', **fontProperties_text)
else:
for label, xl, yl in zip(point_id, image_points[:,1], image_points[:,0]):
ax.annotate(str((label)), xy = (xl, yl), xytext=(xl+5, yl+1), color='blue', **fontProperties_text) #str(int(label)
ax.imshow(image, cmap='gray', aspect='normal')
return plt
'''draw points on image'''
def plot_pts(self, img, points, switchColRow=False, plt_title='', output_save=False, edgecolor='blue'):
plt.clf()
plt.figure(frameon=False)
plt.gray()
if switchColRow:
plt.plot([p[1] for p in points],
[p[0] for p in points],
marker='o', ms=5, color='none', markeredgecolor=edgecolor, markeredgewidth=1)
else:
plt.plot([p[0] for p in points],
[p[1] for p in points],
marker='o', ms=5, color='none', markeredgecolor=edgecolor, markeredgewidth=1)
plt.title(plt_title)
plt.axis('off')
plt.imshow(img)
if not output_save:
plt.waitforbuttonpress()
plt.close()
else:
return plt
'''draw Harris points on image'''
def plot_harris_points(self, image, filtered_coords, save=False, directory_img=None):
""" Plots corners found in image. """
plt.figure()
plt.gray()
plt.imshow(image)
plt.plot([p[1] for p in filtered_coords],
[p[0] for p in filtered_coords],
marker='o', ms=2, color='none', markeredgecolor='blue', markeredgewidth=0.2)
plt.axis('off')
if save:
plt.savefig(os.path.join(directory_img, 'harris.jpg'), dpi=600, pad_inches=0)
else:
plt.show()
'''draw SIFT matches on images'''
# source code from Jan Erik Solem
def plot_matches_SIFT(self, imagename1, imagename2, locs1, locs2, matchscores, show_below=True):
'''Show a figure with lines joining the accepted matches
input: im1, im2, (images as arrays), locs1, locs2 (feature locations),
matchscores (as ouptut from 'match()'),
show_below (if images should be shown below matches ). '''
im1 = cv2.imread(imagename1)
im2 = cv2.imread(imagename2)
im3 = self.appendimages(im1, im2)
if show_below:
#im3 = np.vstack((im3, im3))
plt.imshow(im3)
cols1 = im1.shape[1]
for i,m in enumerate(matchscores):
if m > 0:
plt.plot([locs1[i][1], locs2[m][1] + cols1], [locs1[i][0], locs2[m][0]], 'c')
plt.axis('off')
'''draw matches on images'''
# source code from Jan Erik Solem
def plot_matches(self, im1, im2, pts1, pts2, nbr_match_draw_set=0, save=False, directory_img=None):
'''draw matches
im1, im2 location and name of images
pts1, pts2 (numpy array): location of matched points in image
nbr_match_draw: amount of matches to be displayed'''
if nbr_match_draw_set == 0:
nbr_match_draw = pts1.shape[0]
else:
nbr_match_draw = nbr_match_draw_set
img2_show = plt.imread(im2)
if len(img2_show.shape) > 2:
ymax2, xmax2, _ = img2_show.shape #ymax2, xmax2, _ =
else:
ymax2, xmax2 = img2_show.shape
img1_show = plt.imread(im1)
if len(img1_show.shape) > 2:
ymax1, xmax1, _ = img1_show.shape
else:
ymax1, xmax1 = img1_show.shape
if ymax1 > ymax2:
ymax = ymax1
else:
ymax = ymax2
fig = plt.figure(figsize=((xmax1+xmax2)/1000, (ymax)/1000))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
plt.subplots_adjust(wspace=0, hspace=0)
ax1.imshow(img2_show, aspect='auto') #clip white boarder
ax2.imshow(img1_show, aspect='auto', cmap='Greys_r') #clip white boarder
pts1_draw = np.asarray(pts1, dtype=np.float)
pts2_draw = np.asarray(pts2, dtype=np.float)
if len(pts1_draw.shape) == 3:
x1,y1 = pts1_draw[:,:,0:1].flatten(), pts1[:,:,1:2].flatten()
x2,y2 = pts2_draw[:,:,0:1].flatten(), pts2[:,:,1:2].flatten()
else:
x1,y1 = pts1_draw[:,0:1].flatten(), pts1[:,1:2].flatten()
x2,y2 = pts2_draw[:,0:1].flatten(), pts2[:,1:2].flatten()
colors = self.color_spectrum(pts1_draw.tolist(), offset=1)
print 'plotting matches'
i = 0
lines = []
while i < nbr_match_draw:#pts1_draw.shape[0]:
transFigure = fig.transFigure.inverted()
coord1 = transFigure.transform(ax1.transData.transform([x1[i],y1[i]]))
coord2 = transFigure.transform(ax2.transData.transform([x2[i],y2[i]]))
line = plt.matplotlib.lines.Line2D((coord1[0],coord2[0]),(coord1[1],coord2[1]),
transform=fig.transFigure, color=colors[i]) #
plt.setp(line, color=colors[i], linewidth=0.2)
lines.append(line,)
ax1.plot(x1[i], y1[i], marker='o', ms=1, color='none', markeredgecolor=colors[i], markeredgewidth=0.2) # color=colors[i], markeredgecolor='none'
ax2.plot(x2[i], y2[i], marker='o', ms=1, color='none', markeredgecolor=colors[i], markeredgewidth=0.2)
ax1.imshow(img2_show, aspect='auto') #re-center image
ax2.imshow(img1_show, aspect='auto', cmap='Greys_r') #re-center image
i = i+1
fig.lines = lines
ax1.axis('off')
ax2.axis('off')
if save:
plt.savefig(os.path.join(directory_img, 'matches.jpg'), dpi=600)
else:
plt.show()
print 'plotting STAR matches done'
return fig
#draw image points on image
def plot_features(self, im, locs, circle=False):
'''Show image with features. input: im (image as array), locs (row, col, scale, orientation of each feature).'''
def draw_circle(c, r):
t = np.arange(0,1.01,.01)*2*np.pi
x = r*np.cos(t) + c[0]
y = r*np.sin(t) + c[1]
plt.plot(x,y,'b',linewidth=2)
plt.imshow(im)
if circle:
for p in locs:
draw_circle(p[:2],p[2])
else:
plt.plot(locs[:,0],locs[:,1],'ob')
plt.axis('off')
#help function to plot assigned SIFT features
def appendimages(self, im1, im2):
'''Return a new image that appends the two images side-by-side.'''
# select the image with the fewest rows and fill in enough empty rows
rows1 = im1.shape[0]
rows2 = im2.shape[0]
if rows1 < rows2:
im1 = np.vstack((im1, np.zeros((rows2-rows1, im1.shape[1], im1.shape[2]))))
elif rows1 > rows2:
im2 = np.vstack((im2, np.zeros((rows1-rows2, im2.shape[1], im2.shape[2]))))
# if none of these cases they are equal, no fillng needed.
return np.concatenate((im1, im2), axis=1) | 38.133333 | 163 | 0.54089 | 1,301 | 10,296 | 4.191391 | 0.239047 | 0.030259 | 0.012103 | 0.005502 | 0.265359 | 0.211443 | 0.188153 | 0.17495 | 0.17495 | 0.161379 | 0 | 0.041086 | 0.331002 | 10,296 | 270 | 164 | 38.133333 | 0.750581 | 0.122572 | 0 | 0.214286 | 0 | 0 | 0.0263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.005952 | 0.02381 | null | null | 0.011905 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7e7c81aac02ad87573439958e43598f569160606 | 7,325 | py | Python | batch_gnome/batch_gnome/TAP/TAP_ext/test_TAP_ext.py | dylanrighi/GnomeTools | 08ad6810194141ac89a27c95fa9b9499376000ae | [
"Unlicense"
] | 2 | 2017-02-15T20:45:42.000Z | 2020-10-09T16:00:00.000Z | batch_gnome/batch_gnome/TAP/TAP_ext/test_TAP_ext.py | dylanrighi/GnomeTools | 08ad6810194141ac89a27c95fa9b9499376000ae | [
"Unlicense"
] | 10 | 2015-06-25T23:42:11.000Z | 2021-06-22T16:19:19.000Z | batch_gnome/batch_gnome/TAP/TAP_ext/test_TAP_ext.py | dylanrighi/GnomeTools | 08ad6810194141ac89a27c95fa9b9499376000ae | [
"Unlicense"
] | 15 | 2016-01-11T20:49:10.000Z | 2020-10-15T18:02:20.000Z | #!/usr/bin/env python2.3
"""
A simple test routine that tests at least some of the TAP_ext package
"""
import unittest
from Numeric import *
class CalcPolygonsTestCase(unittest.TestCase):
def testCalcPolygons(self):
pass
class check_receptorsTestCase(unittest.TestCase):
def test_check_receptors(self):
"""
The Python and C versions of the receptor site hit test should return the same result
"""
from RandomArray import uniform, seed
from TAP_ext import check_receptors
from time import time
area = 200
num_LEs = 100
num_times = 10
num_sites = 4
sites = [array([(20,65),(40,35),(70,25),(75,45),(55,50),(45,75),(20,65)],Float)]*num_sites
# build site bounding boxes
BBs = []
for site in sites:
max_x = site[0,0]
min_x = site[0,0]
max_y = site[0,1]
min_y = site[0,1]
max_x = max(max_x, max(site[:,0]))
min_x = min(min_x, min(site[:,0]))
max_y = max(max_y, max(site[:,1]))
min_y = min(min_y, min(site[:,1]))
BBs.append(array((max_x,min_x,max_y,min_y),Float))
LEs = uniform(0,area,(num_times,num_LEs,2))
Hit_Table1 = zeros((num_LEs, num_sites),Int)
start = time()
hit_test(LEs,sites,BBs,Hit_Table1,0)
print "Python version took %.3f seconds"%(time()-start)
Hit_Table2 = zeros((num_LEs, num_sites),Int)
start = time()
check_receptors.hit_test(LEs,sites,BBs,Hit_Table2,0)
print "c version took %.3f seconds"%(time()-start)
assert alltrue(equal(Hit_Table1,Hit_Table2)), "Python and C version gave different results"
from TAP_ext import NumericExtras as NE
class NumericExtrasTestCase(unittest.TestCase):
def testFastclip(self):
print "testing fastclip"
A = arange(0,10,1,Float)
B = clip(A, 3, 5)
NE.fastclip(A, 3, 5)
assert alltrue(A == B), "fastclip and clip gave different answers"
def testByteswap(self):
A = arange(10)
B = A.copy()
NE.byteswap(B)
B = B.byteswapped()
assert alltrue(A == B), "NE.byteswap and Numeric.array.byteswapped gave different results"
def testChangetypeA(self):
"""
changetype should fail for non-contiguous arrays
"""
A = arange(18)
A.shape = 3,6
B = A[:,3]
self.assertRaises(ValueError,NE.changetype,B,Float)
def testChangetypeB(self):
"""
changetype should fail for arrays the wrong size for the type
"""
A = arange(25)
self.assertRaises(ValueError,NE.changetype,A,Float)
def testChangetypeC(self):
"""
changetype(m,typecode) should have the same result as:
m = fromstring(m.tostring(),typecode)
"""
A = arange(26)
B = A.copy()
NE.changetype(A,Float)
assert alltrue (A == fromstring(B.tostring(),Float))
## This is the Python version of the check_receptors code, used by the test code above
def hit_test(LEs,sites,BBs,Hit_Table,Start_step):
"""
hit_test computes the receptor site hits given a set of LE positions,
LEs, and the receptor sites, and the bounding boxes of the receptor sites.
LEs is a M X N X 2 NumPy array (of Floats ?)
N is the number of LEs (Num_LEs)
M is the number of timesteps (must be at least 2)
sites is a list of N X 2 NumPy arrays (of Floats)
N is the number of points in a receptor polygon
BBs is a list of 4 X 1 NumPy arrays (of Floats) of the bounding box of the sites (max_x,min_x,max_y,min_y)
Hit_Table is a NumPy array of Int16 (short) of size (Num_LEs, Num_sites),
it hold the values of the first timestep that the site was hit by a given LE.
***Hit_Table is ALTERED by this function!!!***
the function returns None
"""
N_LEs = LEs.shape[1]
N_times = LEs.shape[0]
N_sites = len(sites)
for T_ind in range(1,N_times): # loop over timesteps
for LE_ind in range(N_LEs): # loop over LEs
LE_line = (tuple(LEs[T_ind-1,LE_ind,:]),tuple(LEs[T_ind,LE_ind,:])) # LE-movement segment
# did the LE move?
if (LE_line[0] != LE_line[1]):
# check bounding boxes
bb_LE = (max(LE_line[0][0],LE_line[1][0]),min(LE_line[0][0],LE_line[1][0]),
max(LE_line[0][1],LE_line[1][1]),min(LE_line[0][1],LE_line[1][1]))
for site_ind in range(N_sites): # loop over sites
if BB_check(BBs[site_ind],bb_LE):
# do the line cross check
for segment in map(None,sites[site_ind][:-1],sites[site_ind][1:]):
if LCross(LE_line,segment):
if not Hit_Table[LE_ind,site_ind]:
Hit_Table[LE_ind,site_ind] = Start_step + T_ind
break
return None
def BB_check(bb_1, bb_2):
"""
bb_1 and bb_2 are two bounding boxes.
Each is a 4 element tuple of :
(max_x,min_x,max_y,min_y)
BB_check(bb_1, bb_2)
returns 1 if the two boxes intersect
returns 0 if the two boxes don't intersect
"""
if ( (bb_1[0] > bb_2[1]) and (bb_1[1] < bb_2[0]) and
(bb_1[2] > bb_2[3]) and (bb_1[3] < bb_2[2]) ):
return 1
else:
return 0
def LCross(S1,S2):
"""
S1 and S2 are two element tuples of two element tuples of
x,y coordinates of the two lines:
Routine to check if two line segments intersect
returns 0 if they don't intersect, 1 if they intersect
"""
((px1,py1),(px2,py2)) = S1
((px3,py3),(px4,py4)) = S2
# First some utility functions:
def SideOfLineCheck(x1,y1,x2,y2,Px,Py):
""" Given a line segment x1,y1 to x2,y2
it checks to see if point Px,Py is to the right
or to the left of the line segment looking from
point x1,y1 to point x2,y2.
If D is positive, then the point Px,Py is to the LEFT of the
line segment. If D is negative, P is to the right of segment.
If D is zero then, P is on the segment
If D =0 then that means that the point P is on the line
defined by the two points...they may not be on the segment
The check is done by taking the
cross product of the vectors x1,y1 to x2,y2
and x1,y1 to Px,Py
"""
def CrossProduct(x1,y1,x2,y2):
# Given vectors x1,y1 and x2,y2
# this routine returns the cross product
# which is also the determinant
return x1*y2 - y1*x2
dx = x2 - x1
dy = y2 - y1
dxp = Px - x1
dyp = Py - y1
return CrossProduct(dx,dy,dxp,dyp)
# Check to see if point 3 is to the left of segment 1
D1 = SideOfLineCheck(px1,py1,px2,py2,px3,py3)
# Now check if point 4 is to the left of segment 1
D2 = SideOfLineCheck(px1,py1,px2,py2,px4,py4)
# if points 3 and 4 are on the same side of line 1
# then things don't cross
if(D1*D2 > 0):
return 0
# now we need to check the other way...
#Check to see if point 1 is to the left of segment 2
D1 = SideOfLineCheck(px3,py3,px4,py4,px1,py1)
# Now check if point 2 is to the left of segment 2
D2 = SideOfLineCheck(px3,py3,px4,py4,px2,py2)
# if points 1 and 2 are on the same side of line 2 then things don't cross
if(D1*D2 > 0):
return 0
#if we get here, the hummers cross
return 1
if __name__ == "__main__":
## suite()
unittest.main()
| 26.348921 | 107 | 0.622526 | 1,226 | 7,325 | 3.619086 | 0.220228 | 0.016227 | 0.011044 | 0.014875 | 0.218165 | 0.152355 | 0.104124 | 0.054091 | 0.014875 | 0.014875 | 0 | 0.045719 | 0.271399 | 7,325 | 277 | 108 | 26.444043 | 0.785647 | 0.112355 | 0 | 0.100917 | 0 | 0 | 0.054016 | 0.005871 | 0 | 0 | 0 | 0 | 0.055046 | 0 | null | null | 0.009174 | 0.055046 | null | null | 0.027523 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7e7dd5c8d80c1193d34c16e490af85436be7f87e | 1,703 | py | Python | tests.py | snandasena/disaster-response-pipeline | 709af8c5fcb520dae82dc3b75c30ab2609402f53 | [
"MIT"
] | null | null | null | tests.py | snandasena/disaster-response-pipeline | 709af8c5fcb520dae82dc3b75c30ab2609402f53 | [
"MIT"
] | null | null | null | tests.py | snandasena/disaster-response-pipeline | 709af8c5fcb520dae82dc3b75c30ab2609402f53 | [
"MIT"
] | null | null | null | import sys
import numpy as np
from sklearn.externals import joblib
sys.path.append("common")
from train_classifier import *
staring_verb_extractor = StartingVerbExtractor()
verb_count_extractor = VerbCountExtractor()
starting_modal_extractor = StartingModalExtractor()
noun_count_extractor = NounCountExtractor()
def test_load_data(file_name):
return load_data(file_name)
def test_stating_verb_extract(text):
print(staring_verb_extractor.starting_verb(text))
def test_transform(X):
print(staring_verb_extractor.transform(X))
def test_tokenize(text):
print(tokenize_text(text))
def test_total_verb_counts(text):
print(verb_count_extractor.count_verbs(text))
def test_stating_modals(text):
print(starting_modal_extractor.starting_modals(text))
def test_total_noun_counts(text):
print(noun_count_extractor.count_nouns(text))
def test_evaluate_model(X, Y, col_names, model_path='./models/classifier.pkl'):
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
model = joblib.load(model_path)
evaluate_model(model, X_test, Y_test, col_names)
if __name__ == "__main__":
debug_data = ['.', './data/DisasterResponse.db', './models/classifier.pkl']
X, Y, col_names = load_data(debug_data[1])
# for text in X[:100].values:
# test_tokenize(text)
# for text in texts:
# test_stating_verb_extract(text)
#
# test_transform(X)
# for text in X[:100].values:
# test_total_verb_counts(text)
#
# for text in X[:100].values:
# test_total_noun_counts(text)
#
# for text in X[:100].values:
# test_stating_modals(text)
test_evaluate_model(X, Y, col_names)
| 24.681159 | 79 | 0.72754 | 239 | 1,703 | 4.824268 | 0.276151 | 0.048569 | 0.047702 | 0.034692 | 0.244579 | 0.152645 | 0.152645 | 0.085863 | 0.057242 | 0 | 0 | 0.010519 | 0.162654 | 1,703 | 68 | 80 | 25.044118 | 0.798036 | 0.178509 | 0 | 0 | 0 | 0 | 0.062861 | 0.052023 | 0 | 0 | 0 | 0 | 0 | 1 | 0.258065 | false | 0 | 0.129032 | 0.032258 | 0.419355 | 0.193548 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7e7e872c4d53c683d43e32b7bedc06b65ba0d11b | 3,734 | py | Python | sendim/tests/graph.py | ZuluPro/snafu | dbd79da4617db315ba7e76a0af6805cc3b1cbae2 | [
"BSD-3-Clause"
] | 1 | 2017-12-13T20:28:32.000Z | 2017-12-13T20:28:32.000Z | sendim/tests/graph.py | ZuluPro/snafu | dbd79da4617db315ba7e76a0af6805cc3b1cbae2 | [
"BSD-3-Clause"
] | null | null | null | sendim/tests/graph.py | ZuluPro/snafu | dbd79da4617db315ba7e76a0af6805cc3b1cbae2 | [
"BSD-3-Clause"
] | null | null | null | """
"""
from django.utils import unittest
from django.core import management
from referentiel.models import Supervisor
from sendim.tests.defs import create_event, create_alert, internet_is_on
class Graph_TestCase(unittest.TestCase):
"""
Test communicaton with metrology.
"""
def setUp(self):
management.call_command('loaddata', 'test_supervisor.json', database='default', verbosity=0)
def tearDown(self):
pass
@unittest.skipIf(not internet_is_on(), 'No internet connection available.')
def test_RRDTool(self):
"""
Test to get a list of graph from nagios.demo.netways.de
for host 'c1-activedirectory' and service 'win-mem+virtual'.
"""
# Trouver les host supervise sur http://nagios.demo.netways.de/nagios/cgi-bin/status.cgi
management.call_command('loaddata', 'test_rrdtool_host.json', database='default', verbosity=0)
GRAPH_LIST_URL = 'https://nagios.demo.netways.de/pnp4nagios/graph?host=c1-activedirectory-1&srv=win-mem+virtual&view=0'
GRAPH_URL = 'https://nagios.demo.netways.de/pnp4nagios/image?host=c1-activedirectory-1&srv=win-mem+virtual&view=0'
S = Supervisor.objects.get(name__icontains='Netways')
opener = S.getOpener()
A = create_alert(host='c1-activedirectory-1', service='win-mem virtual')
# Find graphs
## Test to get a graph list URL
graph_list_url = S.get_graph_url(alert=A, prefix='graph')
self.assertEqual(GRAPH_LIST_URL, graph_list_url)
## Test to open this URL
response = opener.open(graph_list_url)
info = response.info()
self.assertEqual(200,response.getcode())
self.assertIn('text/html', info['content-type'])
# Get graph
## Test to get a graph URL
graph_url = S.get_graph_url(alert=A)
self.assertEqual(GRAPH_URL, graph_url)
## Test to open this URL
response = opener.open(graph_url)
info = response.info()
self.assertEqual(200,response.getcode())
self.assertIn('image/png', info['content-type'])
@unittest.skipIf(not internet_is_on(), 'No internet connection available.')
def test_N2RDD(self):
"""
Test to get a list of graph from sysnetmon.diglinks.com
for host 'core.diglinks.com' and service '02_load'.
"""
# Trouver les host supervise sur http://nagios.demo.netways.de/nagios/cgi-bin/status.cgi
management.call_command('loaddata', 'test_n2rrd_host.json', database='default', verbosity=0)
GRAPH_LIST_URL = 'http://sysnetmon.diglinks.com/cgi-bin/rrd2graph.cgi?hostname=core.diglinks.com&service=02_load'
GRAPH_URL = 'http://sysnetmon.diglinks.com/cgi-bin/n2rrd_images_cache/core.diglinks.com/core.diglinks.com_load_Daily.png'
S = Supervisor.objects.get(name__icontains='SysNetmon')
opener = S.getOpener()
A = create_alert(host='core.diglinks.com', service='02_load')
# Find graphs
graph_list_url = S.get_graph_url(alert=A, prefix='graph')
self.assertEqual(GRAPH_LIST_URL, graph_list_url)
handle = opener.open(graph_list_url)
# TO FINISH
# Get graph
#graph_url = S.get_graph_url(alert=A)
#self.assertEqual(GRAPH_URL, graph_url)
#handle = opener.open(graph_url)
| 46.675 | 129 | 0.602839 | 446 | 3,734 | 4.890135 | 0.255605 | 0.055021 | 0.060523 | 0.043558 | 0.699679 | 0.635488 | 0.573132 | 0.482348 | 0.482348 | 0.414489 | 0 | 0.011291 | 0.288431 | 3,734 | 79 | 130 | 47.265823 | 0.80956 | 0.128281 | 0 | 0.3 | 0 | 0.1 | 0.250089 | 0.007849 | 0 | 0 | 0 | 0 | 0.175 | 0 | null | null | 0.025 | 0.1 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7e82cf4b8fa8c9cf39f0aaa01ba1006d96672fc2 | 3,760 | py | Python | engine/logic/Transform.py | wkta/Python-Bataille-Navale | b6f725519cf1cf559e60ec766aa4f059463b1493 | [
"MIT"
] | null | null | null | engine/logic/Transform.py | wkta/Python-Bataille-Navale | b6f725519cf1cf559e60ec766aa4f059463b1493 | [
"MIT"
] | null | null | null | engine/logic/Transform.py | wkta/Python-Bataille-Navale | b6f725519cf1cf559e60ec766aa4f059463b1493 | [
"MIT"
] | 1 | 2019-12-03T15:42:38.000Z | 2019-12-03T15:42:38.000Z | # Copyright © 2019 CAILLAUD Jean-Baptiste.
from copy import copy
from engine.logic.Math import Mat3x3, Vector2, IDENTITY_MATRIX, ZERO_VECTOR, UNIT_VECTOR
class Transform:
"""
Simple transform.
Used to give information about the objects in the scene.
Attributes:
parent Parent transform of this object.
position Position of the transform in parent-relative space.
rotation Rotation of the transform in parent-relative space.
scale Scale of the transform in parent-relative space.
gameobject GameObject instance attached to this transform.
children List of all the children of this transform.
"""
def __init__(self, parent, position=ZERO_VECTOR, offset=ZERO_VECTOR, rotation=0, scale=UNIT_VECTOR, gameobject=None):
"""
Class constructor.
Creates a new Transform instance.
:param parent: The parent of this transform.
:param offset: The offset of the transform.
:param position: The position of this transform.
:param rotation: The rotation of this transform.
:param scale: The scale of this transform.
:param gameobject: The game object that this transform represents.
"""
self.parent = parent
self.offset = copy(offset)
self.position = copy(position)
self.rotation = rotation
self.scale = scale
self.gameobject = gameobject
self.children = []
# If there is a parent.
if parent is not None:
# Append ourselves to the children list.
parent.children.append(self)
def apply(self, position, world=False):
"""
Applies the transform to the specified position.
:param position: The position to apply the transform to.
:param world: If True, applies world transformation instead of the local one.
:returns: The transformed position.
"""
return (self.get_world_matrix() if world else self.get_matrix()) * position
def get_matrix(self, ignore_position=False, ignore_rotation=False, ignore_scale=True):
"""
Returns the matrix generated by this transform.
"""
return Mat3x3.create_matrix(
ZERO_VECTOR if ignore_position else self.position,
0 if ignore_rotation else self.rotation,
UNIT_VECTOR if ignore_scale else self.scale)
def get_world_matrix(self):
"""
Returns the 3x3 matrix of this transform in world space.
"""
return (self.parent.get_world_matrix() if self.parent is not None else IDENTITY_MATRIX) * self.get_matrix()
def get_world_position(self):
"""
Returns the world position of this transform.
"""
# Compute the position of the center.
center = (self.parent.get_world_matrix() if self.parent is not None else IDENTITY_MATRIX) * self.position
offset = self.get_matrix(ignore_position=True) * self.offset
return center + offset
def set_world_position(self, position):
"""
Defines the world position of the object.
:param position: The position the object is expected to be in.
"""
self.position = position - (self.parent.get_world_position() if self.parent is not None else ZERO_VECTOR)
def get_world_rotation(self):
"""
Returns the world rotation of this transform.
"""
return self.rotation + (self.parent.get_world_rotation() if self.parent is not None else 0)
def get_world_scale(self):
"""
Returns the world scale of this transform.
"""
return self.scale * (self.parent.get_world_scale() if self.parent is not None else UNIT_VECTOR)
| 39.578947 | 121 | 0.655585 | 472 | 3,760 | 5.118644 | 0.197034 | 0.06457 | 0.055877 | 0.037252 | 0.150662 | 0.129967 | 0.129967 | 0.055464 | 0.055464 | 0.055464 | 0 | 0.005124 | 0.273404 | 3,760 | 94 | 122 | 40 | 0.878843 | 0.403191 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.0625 | 0 | 0.53125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7e852564331f423fb21ea2215be78aaea0deff4c | 507 | py | Python | my_lambdata/iqr_oop.py | karlmanalo/lambdata-karlmanalo | e6058fd1b4a5eb64d0afaebdfa14b902fedf931e | [
"MIT"
] | null | null | null | my_lambdata/iqr_oop.py | karlmanalo/lambdata-karlmanalo | e6058fd1b4a5eb64d0afaebdfa14b902fedf931e | [
"MIT"
] | null | null | null | my_lambdata/iqr_oop.py | karlmanalo/lambdata-karlmanalo | e6058fd1b4a5eb64d0afaebdfa14b902fedf931e | [
"MIT"
] | 1 | 2020-05-06T21:26:50.000Z | 2020-05-06T21:26:50.000Z | # my_lambdata\iqr_oop.py
# IQR function written in terms of OOP
# Must feed in list of numbers
import numpy
import pandas
class Numbers():
def __init__(self):
pass
def iqr(self, X):
q1 = numpy.percentile(X, 25, interpolation='midpoint')
q3 = numpy.percentile(X, 75, interpolation='midpoint')
print(f'The IQR for this data set is {q3 - q1}')
if __name__ == "__main__":
data = pandas.DataFrame([1,2,3,4,5])
nums = Numbers()
nums.iqr(data) | 22.043478 | 70 | 0.623274 | 73 | 507 | 4.136986 | 0.657534 | 0.099338 | 0.10596 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034483 | 0.25641 | 507 | 23 | 71 | 22.043478 | 0.766578 | 0.17357 | 0 | 0 | 0 | 0 | 0.149038 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0.076923 | 0.153846 | 0 | 0.384615 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
7e85b0b404fa718713288ae4337dab0afce1d4af | 2,760 | py | Python | app/vendors/migrations/0030_auto_20180305_2301.py | snakrani/discovery | 99690f186a194cabef6a5d1ad18fca715be1e187 | [
"CC0-1.0"
] | null | null | null | app/vendors/migrations/0030_auto_20180305_2301.py | snakrani/discovery | 99690f186a194cabef6a5d1ad18fca715be1e187 | [
"CC0-1.0"
] | null | null | null | app/vendors/migrations/0030_auto_20180305_2301.py | snakrani/discovery | 99690f186a194cabef6a5d1ad18fca715be1e187 | [
"CC0-1.0"
] | null | null | null | # Generated by Django 2.0.2 on 2018-03-05 23:01
from django.db import migrations, models
import django.db.models.deletion
def migrate_membership(apps, schema_editor):
PoolPIID = apps.get_model('vendors', 'PoolPIID')
PoolMembership = apps.get_model('vendors', 'PoolMembership')
PoolMembershipZone = apps.get_model('vendors', 'PoolMembershipZone')
for membership in PoolPIID.objects.all():
new_membership, created = PoolMembership.objects.get_or_create(
vendor=membership.vendor,
pool=membership.pool,
piid=membership.piid
)
if membership.zone:
PoolMembershipZone.objects.get_or_create(
membership=new_membership,
zone=membership.zone
)
class Migration(migrations.Migration):
dependencies = [
('categories', '0003_auto_20180301_2027'),
('vendors', '0029_auto_20180301_2027'),
]
operations = [
migrations.CreateModel(
name='PoolMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('piid', models.CharField(max_length=128)),
('pool', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='categories.Pool')),
],
),
migrations.CreateModel(
name='PoolMembershipZone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('membership', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='zone', to='vendors.PoolMembership')),
('zone', models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='categories.Zone')),
],
),
migrations.AlterField(
model_name='vendor',
name='pools',
field=models.ManyToManyField(through='vendors.PoolMembership', to='categories.Pool'),
),
migrations.AddField(
model_name='poolmembership',
name='vendor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='vendors.Vendor'),
),
migrations.RunPython(migrate_membership),
migrations.RemoveField(
model_name='poolpiid',
name='pool',
),
migrations.RemoveField(
model_name='poolpiid',
name='vendor',
),
migrations.RemoveField(
model_name='poolpiid',
name='zone',
),
migrations.DeleteModel(
name='PoolPIID',
)
]
| 34.936709 | 156 | 0.593841 | 257 | 2,760 | 6.233463 | 0.326848 | 0.029963 | 0.043695 | 0.068664 | 0.337079 | 0.337079 | 0.258427 | 0.249688 | 0.210986 | 0.210986 | 0 | 0.025368 | 0.28587 | 2,760 | 78 | 157 | 35.384615 | 0.787418 | 0.016304 | 0 | 0.348485 | 1 | 0 | 0.13638 | 0.033174 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015152 | false | 0 | 0.030303 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7e892bbc2f9093e6ea3899715b53105db05324f3 | 402 | py | Python | community/migrations/0013_auto_20191108_0918.py | akarakoc/Communityverse | 73ecf51eae3f96cca865e0d7cc526b92c8ad6b5e | [
"MIT"
] | null | null | null | community/migrations/0013_auto_20191108_0918.py | akarakoc/Communityverse | 73ecf51eae3f96cca865e0d7cc526b92c8ad6b5e | [
"MIT"
] | 22 | 2019-11-09T23:23:11.000Z | 2019-12-23T09:38:29.000Z | community/migrations/0013_auto_20191108_0918.py | akarakoc/CommunityVerse | 73ecf51eae3f96cca865e0d7cc526b92c8ad6b5e | [
"MIT"
] | null | null | null | # Generated by Django 2.2.6 on 2019-11-08 09:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('community', '0012_auto_20191107_1649'),
]
operations = [
migrations.AlterField(
model_name='communities',
name='communityPrv',
field=models.BooleanField(default=False),
),
]
| 21.157895 | 53 | 0.614428 | 41 | 402 | 5.926829 | 0.829268 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.106529 | 0.276119 | 402 | 18 | 54 | 22.333333 | 0.728522 | 0.11194 | 0 | 0 | 1 | 0 | 0.15493 | 0.064789 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7e8a45c72fe260a3444e4ee7e5fcd4dbca175324 | 935 | py | Python | scripts/count_git_changes.py | ShankarNara/shogun | 8ab196de16b8d8917e5c84770924c8d0f5a3d17c | [
"BSD-3-Clause"
] | 2,753 | 2015-01-02T11:34:13.000Z | 2022-03-25T07:04:27.000Z | scripts/count_git_changes.py | ShankarNara/shogun | 8ab196de16b8d8917e5c84770924c8d0f5a3d17c | [
"BSD-3-Clause"
] | 2,404 | 2015-01-02T19:31:41.000Z | 2022-03-09T10:58:22.000Z | scripts/count_git_changes.py | ShankarNara/shogun | 8ab196de16b8d8917e5c84770924c8d0f5a3d17c | [
"BSD-3-Clause"
] | 1,156 | 2015-01-03T01:57:21.000Z | 2022-03-26T01:06:28.000Z | import os
import sys
insertions=0
deletions=0
files=0
FROMVER=""
if len(sys.argv)>1:
FROMVER=sys.argv[1]
TOVER=""
if len(sys.argv)>2:
TOVER=sys.argv[2]
TMPNAME=os.tmpnam()
VER=""
if len(FROMVER)>0:
VER=FROMVER+'..'
if len(TOVER)>0:
if len(VER)==0:
VER='..'
VER=VER+TOVER
os.system('git log --oneline --shortstat %s >%s' % (VER,TMPNAME))
for line in file(TMPNAME).readlines():
if line.find('file') == -1:
continue
if line.find('changed') == -1:
continue
if line.find('insertion') == -1 and line.find('deletion') == -1:
continue
entries=line.split(',')
for e in entries:
if e.find('file') != -1:
files+=int(e.strip().split(' ')[0])
elif e.find('insertion') != -1:
insertions+=int(e.strip().split(' ')[0])
elif e.find('deletion') != -1:
deletions+=int(e.strip().split(' ')[0])
print "Files changed: %d" % files
print "Insertions: %d" % insertions
print "Deletions: %d" % deletions
os.unlink(TMPNAME)
| 20.777778 | 65 | 0.628877 | 147 | 935 | 4 | 0.292517 | 0.042517 | 0.05102 | 0.071429 | 0.171769 | 0.081633 | 0.081633 | 0.081633 | 0 | 0 | 0 | 0.025 | 0.144385 | 935 | 44 | 66 | 21.25 | 0.71 | 0 | 0 | 0.076923 | 0 | 0 | 0.146524 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.051282 | null | null | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7e97e798b9acfb3c7359d3f9ef7c17e46c59fef5 | 398 | py | Python | storage/models/production_data.py | aagarwal1999/194-web-app | 362efd32d964d780f213b4e5a97fd14b080329ba | [
"Apache-2.0"
] | null | null | null | storage/models/production_data.py | aagarwal1999/194-web-app | 362efd32d964d780f213b4e5a97fd14b080329ba | [
"Apache-2.0"
] | null | null | null | storage/models/production_data.py | aagarwal1999/194-web-app | 362efd32d964d780f213b4e5a97fd14b080329ba | [
"Apache-2.0"
] | null | null | null | from storage.shared import db
import datetime
import uuid
class ProductionData(db.Model):
id = db.Column(db.String(100), primary_key=True, default=lambda: str(uuid.uuid4()))
data = db.Column(db.Text, unique=False, nullable=False)
time = db.Column(db.DateTime, default=datetime.datetime.now)
one_line_summary = db.Column(db.Text)
one_paragraph_summary = db.Column(db.Text)
| 24.875 | 87 | 0.731156 | 59 | 398 | 4.847458 | 0.542373 | 0.13986 | 0.174825 | 0.146853 | 0.146853 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01173 | 0.143216 | 398 | 15 | 88 | 26.533333 | 0.826979 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
7e99f08d361c9ea4cb38904ff75221d141f1e187 | 1,838 | py | Python | Line_Bot_Server/LineUsers.py | STU-Idichi-Syoya/Linebot_ | 54941ec0baee40ac21eeed77a0e1d0da02631aea | [
"MIT"
] | null | null | null | Line_Bot_Server/LineUsers.py | STU-Idichi-Syoya/Linebot_ | 54941ec0baee40ac21eeed77a0e1d0da02631aea | [
"MIT"
] | null | null | null | Line_Bot_Server/LineUsers.py | STU-Idichi-Syoya/Linebot_ | 54941ec0baee40ac21eeed77a0e1d0da02631aea | [
"MIT"
] | null | null | null | from Line_Bot_Server.LineBot import line_bot_api
##### STATE #####
class LineUser:
def __init__(self,reply=None,userId=None):
'''line のユーザ情報クラス ここではreplayからUserIdを取得することも
できるし、そのままuserIdを入力できる。
そこからLine APIを通して、名前と ひとこと(status message)を取得する。
'''
#reply か userIdどちらも情報がない場合,userIdはNone とする。
if (reply or userId):
##replyからuserIdを取得
if reply:
self.userId = reply.source.user_id
else:
self.userId=userId
#replyから名前とひとことを取得
profile = line_bot_api.get_profile(self.userId)
self.status = (profile.status_message)
self.name=profile.display_name
else:
self.userId=None
self.name=None
self.status=None
def __eq__(self, other):
if type(other)==LineUser:
return self.userId == other.userId
else:
return self.userId== other
def __str__(self):
return f"userId::{self.userId}\n" \
f"userName::{self.name}\n" \
f"userStatus::{self.status}"
class LineSender:
def __init__(self,lineins:LineBotApi):
self.line_bot_api=lineins
def sendMessage(self,text:str,user_id:LineUser):
if isinstance(user_id,LineUser):
user_id=user_id.userId
msg=lineins.TextMessage(text=text)
self.line_bot_api.push_message(to=user_id,messages=msg)
class LineUsers:
def __init__(self):
self.Users = {}
def getState(self, user: LineUser):
return self.Users[user.userId]
def setState(self,user:LineUser,state):
self.Users[user.userId]=state
def __add__(self, other: LineUser):
self.Users[other.userId] = other
def __len__(self, other):
return len(self.Users)
| 28.276923 | 63 | 0.605005 | 210 | 1,838 | 5.066667 | 0.328571 | 0.065789 | 0.037594 | 0.039474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.291077 | 1,838 | 64 | 64 | 28.71875 | 0.816577 | 0.106094 | 0 | 0.069767 | 0 | 0 | 0.044626 | 0.044626 | 0 | 0 | 0 | 0 | 0 | 1 | 0.232558 | false | 0 | 0.023256 | 0.069767 | 0.44186 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7e99f6ca30156fb9b0a9d2f84235cc68fb4723a3 | 841 | py | Python | wx/stc/mixins/DrawColumnEndMixin.py | dejbug/dejan7 | 0e3d9f4068cd7b39a3a992f4b21fa5042c88227b | [
"MIT"
] | null | null | null | wx/stc/mixins/DrawColumnEndMixin.py | dejbug/dejan7 | 0e3d9f4068cd7b39a3a992f4b21fa5042c88227b | [
"MIT"
] | null | null | null | wx/stc/mixins/DrawColumnEndMixin.py | dejbug/dejan7 | 0e3d9f4068cd7b39a3a992f4b21fa5042c88227b | [
"MIT"
] | null | null | null | import wx
import wx.stc
from dejan7.async.DelayedCall import *
from dejan7.wx.stc import GetRowWidths
class DrawColumnEndMixin(object):
def __init__(self, parent, color="GREY", delay=0.3, margins=[]):
self.sci = parent
self.pen = wx.Pen(color)
self.margins = margins
self.DelayedPaint = DelayedCall(self.Paint, delay)
PaintMethod = self.OnPaintedDelayed if delay > 0 else self.OnPainted
self.sci.Bind(wx.stc.EVT_STC_PAINTED, PaintMethod)
def OnPaintedDelayed(self, e):
e.Skip()
self.DelayedPaint()
def OnPainted(self, e):
e.Skip()
self.Paint()
def Paint(self):
th = self.sci.TextHeight(0)
dc = wx.ClientDC(self.sci)
wx.DCPenChanger(dc, self.pen)
y = 0
for x in GetRowWidths(self.sci, margins=self.margins):
if x >= 0: dc.DrawLine(x, y, x, y + th)
y += th
| 22.72973 | 71 | 0.668252 | 121 | 841 | 4.595041 | 0.371901 | 0.06295 | 0.021583 | 0.035971 | 0.05036 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01194 | 0.203329 | 841 | 36 | 72 | 23.361111 | 0.81791 | 0 | 0 | 0.076923 | 0 | 0 | 0.004969 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.153846 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7e9db911f3ba28aac3dc562f5cd602f52c2d9c10 | 1,231 | py | Python | apps/common/lookups.py | gis4dis/poster_new | cee983bfcfc90f581b18712d558bc9d8a83a400a | [
"BSD-3-Clause"
] | 4 | 2017-10-17T10:52:27.000Z | 2020-08-30T10:13:46.000Z | apps/common/lookups.py | gis4dis/poster_new | cee983bfcfc90f581b18712d558bc9d8a83a400a | [
"BSD-3-Clause"
] | 138 | 2017-10-13T09:09:02.000Z | 2020-06-05T18:55:33.000Z | apps/common/lookups.py | gis4dis/poster_new | cee983bfcfc90f581b18712d558bc9d8a83a400a | [
"BSD-3-Clause"
] | 2 | 2018-01-21T19:44:51.000Z | 2018-02-15T11:27:39.000Z | from django.contrib.postgres.fields import DateTimeRangeField
from django.db.models import Lookup
from psycopg2.extras import NumericRange
@DateTimeRangeField.register_lookup
class Duration(Lookup):
lookup_name = 'duration'
def as_sql(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params = lhs_params + rhs_params
if type(rhs_params[0]) is NumericRange:
params = lhs_params + [rhs_params[0].lower] + lhs_params + [rhs_params[0].upper]
return "duration(%s) >= (%s || ' second')::interval AND duration(%s) < (%s || ' second')::interval" % \
(lhs, rhs, lhs, rhs), params
return "duration(%s) = (%s || ' second')::interval" % (lhs, rhs), params
@DateTimeRangeField.register_lookup
class Matches(Lookup):
lookup_name = 'matches'
def as_sql(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params = lhs_params + rhs_params
return "mod(cast(extract(epoch from lower(%s)) as int), %s)=0" % (lhs, rhs), params
| 38.46875 | 115 | 0.665313 | 151 | 1,231 | 5.271523 | 0.291391 | 0.113065 | 0.085427 | 0.090452 | 0.532663 | 0.477387 | 0.432161 | 0.356784 | 0.356784 | 0.356784 | 0 | 0.005133 | 0.208773 | 1,231 | 31 | 116 | 39.709677 | 0.812115 | 0 | 0 | 0.434783 | 0 | 0.043478 | 0.16247 | 0.017872 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.130435 | 0 | 0.521739 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
7ea13c2421a842ac77b2ae86ea0d9775b78d7084 | 955 | py | Python | test/test_utils.py | undecidedzogvisrainbowvitalispotent-360/i2plib | 6edf51cd5d21cc745aa7e23cb98c582144884fa8 | [
"MIT"
] | 25 | 2018-09-05T16:44:05.000Z | 2022-02-16T18:32:32.000Z | test/test_utils.py | undecidedzogvisvitalispotent8stars360/i2plib | 6edf51cd5d21cc745aa7e23cb98c582144884fa8 | [
"MIT"
] | 2 | 2018-10-24T19:57:16.000Z | 2019-01-26T14:30:40.000Z | test/test_utils.py | undecidedzogvisvitalispotent8stars360/i2plib | 6edf51cd5d21cc745aa7e23cb98c582144884fa8 | [
"MIT"
] | 5 | 2018-10-24T18:01:46.000Z | 2020-12-15T18:16:14.000Z | import unittest
import os
import i2plib.sam
import i2plib.utils
class TestUtils(unittest.TestCase):
def test_session_id_generation(self):
sid = i2plib.utils.generate_session_id()
self.assertEqual(len(sid), 13)
sid = i2plib.utils.generate_session_id(8)
self.assertEqual(len(sid), 15)
def test_sam_address_getter(self):
oldenv = os.environ
if "I2P_SAM_ADDRESS" in os.environ:
del os.environ["I2P_SAM_ADDRESS"]
a = i2plib.utils.get_sam_address()
self.assertEqual(a, i2plib.sam.DEFAULT_ADDRESS)
os.environ["I2P_SAM_ADDRESS"] = "127.0.0.1:11223"
a = i2plib.utils.get_sam_address()
self.assertEqual(a, ("127.0.0.1", 11223))
os.environ = oldenv
def test_port_utils(self):
p = i2plib.utils.get_free_port()
unavail_address = ("127.0.0.1", p)
self.assertFalse(i2plib.utils.is_address_accessible(unavail_address))
| 28.939394 | 77 | 0.66178 | 132 | 955 | 4.568182 | 0.333333 | 0.127695 | 0.064677 | 0.029851 | 0.369818 | 0.238806 | 0.135987 | 0.135987 | 0.135987 | 0 | 0 | 0.060565 | 0.22199 | 955 | 32 | 78 | 29.84375 | 0.751009 | 0 | 0 | 0.083333 | 1 | 0 | 0.081761 | 0 | 0 | 0 | 0 | 0 | 0.208333 | 1 | 0.125 | false | 0 | 0.166667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7ea185e9e7bf9f8a9049b8e9ecc9f23bee356df7 | 2,156 | py | Python | arm64/conditional.py | c01db33f/reil | 3deec3a3bb69aae51cc0d728d5f83156cfba2ab6 | [
"Apache-2.0"
] | 27 | 2015-03-16T13:28:00.000Z | 2021-08-02T02:58:23.000Z | arm64/conditional.py | c01db33f/pyreil | 3deec3a3bb69aae51cc0d728d5f83156cfba2ab6 | [
"Apache-2.0"
] | 2 | 2015-02-23T12:18:53.000Z | 2015-03-15T20:31:16.000Z | arm64/conditional.py | c01db33f/reil | 3deec3a3bb69aae51cc0d728d5f83156cfba2ab6 | [
"Apache-2.0"
] | 9 | 2016-03-22T18:59:12.000Z | 2022-02-05T08:18:28.000Z | # -*- coding: utf-8 -*-
# Copyright 2016 Mark Brand - c01db33f (at) gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""reil.arm64.conditional - ARMv8 translators
This module generates REIL (reverse engineering intermediate language)
IL from ARMv8 machine code.
This file contains helpers for conditional instructions
"""
from reil.shorthand import *
from reil.utilities import *
A = 0
EQ = 1
NE = 2
HS = 3
LO = 4
MI = 5
PL = 6
VS = 7
VC = 8
HI = 9
LS = 10
GE = 11
LT = 12
GT = 13
LE = 14
AL = 15
NV = 16
def condition(ctx, cc):
# we implement as per the architecture reference manual
# TODO: optimise instead.
cb = (cc >> 1) & 0b111
if cb == 0b111:
cond = imm(1, 8)
else:
cond = ctx.tmp(8)
# evaluate base condition
if cb == 0b000:
ctx.emit( bisnz_ (r('z', 8), cond))
elif cb == 0b001:
ctx.emit( bisnz_ (r('c', 8), cond))
elif cb == 0b010:
ctx.emit( bisnz_ (r('n', 8), cond))
elif cb == 0b011:
ctx.emit( bisnz_ (r('v', 8), cond))
elif cb == 0b100:
t0 = ctx.tmp(8)
t1 = ctx.tmp(8)
ctx.emit( bisnz_ (r('c', 8), t0))
ctx.emit( bisz_ (r('z', 8), t1))
ctx.emit( and_ (t0, t1, cond))
elif cb == 0b101:
ctx.emit( equ_ (r('n', 8), r('v', 8), cond))
elif cb == 0b110:
t0 = ctx.tmp(8)
t1 = ctx.tmp(8)
ctx.emit( equ_ (r('n', 8), r('v', 8), t0))
ctx.emit( bisz_ (r('z', 8), t1))
ctx.emit( and_ (t0, t1, cond))
if cc != 0b1111 and cc & 0b1 == 1:
ctx.emit( bisz_ (cond, cond))
return cond
| 25.666667 | 77 | 0.581169 | 330 | 2,156 | 3.760606 | 0.493939 | 0.067687 | 0.048348 | 0.052377 | 0.15552 | 0.15552 | 0.119259 | 0.119259 | 0.119259 | 0.099919 | 0 | 0.075472 | 0.287106 | 2,156 | 83 | 78 | 25.975904 | 0.731945 | 0.430427 | 0 | 0.16 | 0 | 0 | 0.009129 | 0 | 0 | 0 | 0 | 0.012048 | 0 | 1 | 0.02 | false | 0 | 0.04 | 0 | 0.08 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7ea4f26b832f9d8e48b3a6b69e37f65806d63b1b | 1,367 | py | Python | ImGen.py | ibaranov-cp/ImGen_Circuit_Maker | 50c328ec0f62a77908dc3179de8fbf123f38caec | [
"BSD-3-Clause"
] | null | null | null | ImGen.py | ibaranov-cp/ImGen_Circuit_Maker | 50c328ec0f62a77908dc3179de8fbf123f38caec | [
"BSD-3-Clause"
] | null | null | null | ImGen.py | ibaranov-cp/ImGen_Circuit_Maker | 50c328ec0f62a77908dc3179de8fbf123f38caec | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#BSD 3-Clause License
#Copyright (c) 2017, Ilia Baranov
#############################################
# CHANGE THESE VARS AS NEEDED
size = 10 #size of squares in mils
invert = False #Color invert the image
image_name = "test.png" #name of the image, can be BMP, PNG or JPG
#############################################
from PIL import Image
import numpy as np
im = Image.open(image_name)
im.load()
im = im.convert('1')
pixels = list(im.getdata())
width, height = im.size
pixels = [pixels[i * width:(i + 1) * width] for i in xrange(height)]
print height, width
def format_csv(i,x,y):
cv.write("\""+str(i)+"\",")
cv.write("\""+str(x*size)+"\",")
cv.write("\""+str((height - y - 1) *size)+"\",")
cv.write("\"\"\n")
with open(image_name[:-3]+"csv", 'w') as cv:
cv.write("\"Index\",\"X (mil)\",\"Y (mil)\",\"Arc Angle (Neg = CW)\"\n")
cv.write("\"0\",\"0\",\"0\",\"\"\n")
i = 1
comp = 0
if (invert): comp = 255
for y in range (0,height):
#print pixels[:][y] #For Debugging
for x in range (0,width):
if (pixels[y][x] == comp):
format_csv(i,x,y)
i+=1
format_csv(i,x,y-1)
i+=1
format_csv(i,x+1,y-1)
i+=1
format_csv(i,x+1,y)
i+=1
format_csv(i,x,y)
i+=1
cv.write("\""+str(i)+"\",")
cv.write("\"0\",\"0\",\"\"\n")
i+=1
| 23.982456 | 74 | 0.504755 | 219 | 1,367 | 3.109589 | 0.360731 | 0.023495 | 0.088106 | 0.096916 | 0.193833 | 0.161527 | 0.108664 | 0.089574 | 0.048458 | 0.048458 | 0 | 0.030556 | 0.209949 | 1,367 | 57 | 75 | 23.982456 | 0.6 | 0.159473 | 0 | 0.25641 | 0 | 0 | 0.107538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.051282 | null | null | 0.025641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7ea8f23726760ea2f3958ff9164e22aa48175835 | 2,031 | py | Python | i2c/motion_sensor.py | Matrix-Robotics/MatrixControl | 0e596be195b2324154bc6ec220172863e311ccb7 | [
"MIT"
] | 1 | 2021-07-15T07:33:43.000Z | 2021-07-15T07:33:43.000Z | i2c/motion_sensor.py | Matrix-Robotics/MatrixControl | 0e596be195b2324154bc6ec220172863e311ccb7 | [
"MIT"
] | 1 | 2021-08-12T09:08:10.000Z | 2021-08-12T09:08:10.000Z | i2c/motion_sensor.py | Matrix-Robotics/MatrixControl | 0e596be195b2324154bc6ec220172863e311ccb7 | [
"MIT"
] | null | null | null | class MotionSensor:
"""Get 9Dof data by using MotionSensor.
See [MatrixMotionSensor](https://matrix-robotics.github.io/MatrixMotionSensor/) for more details.
Parameters
----------
i2c_port : int
i2c_port is corresponding with I2C1, I2C2 ... sockets on board.
_dev : class
MatrixControl.Device class
"""
def __init__(self, _dev, i2c_port):
self.i2c_port = i2c_port
self._dev = _dev
def _complement(self, _buff):
if len(str(_buff)) > 1:
if _buff > 32767:
_buff -= 65536
return _buff
def getAccel(self, axis):
"""Get Accel Data. (unit: mm/s^2)
Parameters
----------
axis : str
options are "X", "Y" or "Z"
"""
_buff = "I2C{}_GETACCEL_{}".format(self.i2c_port, axis.upper())
self._dev._sendbuff(self._dev.protocol[_buff])
self._dev._readbuff()
return self._complement(self._dev._rxbuff)
def getGyro(self, axis):
"""Get Gyro Data. (unit: degree per second)
Parameters
----------
axis : str
options are "X", "Y" or "Z"
"""
_buff = "I2C{}_GETGYRO_{}".format(self.i2c_port, axis.upper())
self._dev._sendbuff(self._dev.protocol[_buff])
self._dev._readbuff()
return self._complement(self._dev._rxbuff)
def getRoll(self):
_buff = "I2C{}_GETROLL".format(self.i2c_port)
self._dev._sendbuff(self._dev.protocol[_buff])
self._dev._readbuff()
return self._complement(self._dev._rxbuff)
def getYaw(self):
_buff = "I2C{}_GETYAW".format(self.i2c_port)
self._dev._sendbuff(self._dev.protocol[_buff])
self._dev._readbuff()
return self._complement(self._dev._rxbuff)
def getPitch(self):
_buff = "I2C{}_GETPITCH".format(self.i2c_port)
self._dev._sendbuff(self._dev.protocol[_buff])
self._dev._readbuff()
return self._complement(self._dev._rxbuff)
| 29.867647 | 101 | 0.589857 | 236 | 2,031 | 4.733051 | 0.300847 | 0.137869 | 0.059087 | 0.076097 | 0.539839 | 0.539839 | 0.539839 | 0.539839 | 0.539839 | 0.539839 | 0 | 0.021666 | 0.272772 | 2,031 | 67 | 102 | 30.313433 | 0.734597 | 0.240768 | 0 | 0.441176 | 0 | 0 | 0.051064 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.205882 | false | 0 | 0 | 0 | 0.411765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7eab2e9233141229915ea54e3a9ad3980ec8ae2f | 1,418 | py | Python | nlt/debug/dataset.py | isabella232/neural-light-transport | 04acc39713be464e74e0393315c71416f67a3ef3 | [
"Apache-2.0"
] | 176 | 2020-08-13T02:31:07.000Z | 2022-03-24T05:50:55.000Z | nlt/debug/dataset.py | isabella232/neural-light-transport | 04acc39713be464e74e0393315c71416f67a3ef3 | [
"Apache-2.0"
] | 7 | 2020-11-16T05:07:08.000Z | 2022-02-07T04:19:44.000Z | nlt/debug/dataset.py | isabella232/neural-light-transport | 04acc39713be464e74e0393315c71416f67a3ef3 | [
"Apache-2.0"
] | 30 | 2020-08-13T07:03:34.000Z | 2022-03-23T18:55:26.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from os.path import join, dirname
from absl import app
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
sys.path.append('../')
import datasets
from util import io as ioutil
def main(_):
config_ini = join(dirname(__file__), '..', 'config', 'dragon_specular.ini')
config = ioutil.read_config(config_ini)
# Make training dataset
dataset_name = config.get('DEFAULT', 'dataset')
Dataset = datasets.get_dataset_class(dataset_name)
dataset = Dataset(config, 'train')
path = dataset.files[1]
ret = dataset._load_data(path)
# Iterate
no_batch = config.getboolean('DEFAULT', 'no_batch')
datapipe = dataset.build_pipeline(no_batch=no_batch)
for batch_i, batch in enumerate(datapipe):
from IPython import embed; embed()
if __name__ == '__main__':
app.run(main)
| 29.541667 | 79 | 0.727786 | 203 | 1,418 | 4.931034 | 0.576355 | 0.05994 | 0.025974 | 0.031968 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008569 | 0.17701 | 1,418 | 47 | 80 | 30.170213 | 0.849186 | 0.407616 | 0 | 0 | 0 | 0 | 0.087379 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.318182 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
7eae90d3c01eaa4dde6516942f1304a7d7bfc497 | 1,247 | py | Python | Curso Udemy 2022/Curso_Luiz_Otavio/aula_75_ex.py | Matheusfarmaceutico/Exercicios-Python | d1821bd9d11ea0707074c5fe11dead2e85476ebd | [
"MIT"
] | null | null | null | Curso Udemy 2022/Curso_Luiz_Otavio/aula_75_ex.py | Matheusfarmaceutico/Exercicios-Python | d1821bd9d11ea0707074c5fe11dead2e85476ebd | [
"MIT"
] | null | null | null | Curso Udemy 2022/Curso_Luiz_Otavio/aula_75_ex.py | Matheusfarmaceutico/Exercicios-Python | d1821bd9d11ea0707074c5fe11dead2e85476ebd | [
"MIT"
] | null | null | null | def separador():
print("-="*30)
"""
Considerando duas listas de inteiros ou floats (lista A e lista B)
Some os valores nas listas retornando uma nova lista com os valores somados:
Se uma lista for maior que a outra, a soma só vai considerar o tamanho da
menor.
Exemplo:
lista_a = [1, 2, 3, 4, 5, 6, 7]
lista_b = [1, 2, 3, 4]"""
separador()
#Minha solução (mais pythonica)
lista_a = [1,2,3,4,5,6,7]
lista_b = [1,2,3,4]
temp = zip(lista_a,lista_b)
for v in temp:
print(sum(v))
separador()
# Maneira mais lógica, comum a todas as linguagens
lista_soma = []
lista_a = [1,2,3,4,5,6,7]
lista_b = [1,2,3,4]
for i in range(len(lista_b)):
lista_soma.append(lista_a[i] + lista_b[i])
print(lista_soma)
separador()
#Uma outra maneira mais lógica, assim assim utilizando o enumerate, que está presente apenas no Python
lista_soma = []
lista_a = [1,2,3,4,5,6,7]
lista_b = [1,2,3,4]
for i, _ in enumerate(lista_b):
lista_soma.append(lista_a[i] + lista_b[i])
print(lista_soma)
separador()
#solucao do Luiz Otávio, julguei ser mais certa e correta, utilizando um modo ainda mais pythonico que o que eu desenvovi.
lista_a = [1,2,3,4,5,6,7]
lista_b = [1,2,3,4]
lista_soma = [x + y for x,y in (zip(lista_a, lista_b))]
print(lista_soma) | 26.531915 | 122 | 0.690457 | 245 | 1,247 | 3.395918 | 0.359184 | 0.086538 | 0.036058 | 0.048077 | 0.34976 | 0.313702 | 0.313702 | 0.313702 | 0.313702 | 0.313702 | 0 | 0.055126 | 0.17081 | 1,247 | 47 | 123 | 26.531915 | 0.749516 | 0.241379 | 0 | 0.703704 | 0 | 0 | 0.003125 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0 | 0 | 0.037037 | 0.185185 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7eb37405a08b46272d97b80f6cb1504d1a02dffc | 884 | py | Python | aoc2020/4/d4_2.py | kewbish/ka-algorithms | 7a893fdaebd99530eaf0d9633c2721763707e92f | [
"MIT"
] | null | null | null | aoc2020/4/d4_2.py | kewbish/ka-algorithms | 7a893fdaebd99530eaf0d9633c2721763707e92f | [
"MIT"
] | null | null | null | aoc2020/4/d4_2.py | kewbish/ka-algorithms | 7a893fdaebd99530eaf0d9633c2721763707e92f | [
"MIT"
] | null | null | null | from re import match
with open("input.txt") as x:
lines = x.read().strip().split("\n\n")
lines = [line.replace("\n", " ") for line in lines]
valid = 0
fields = {
'byr': lambda x: 1920 <= int(x) <= 2002,
'iyr': lambda x: 2010 <= int(x) <= 2020,
'eyr': lambda x: 2020 <= int(x) <= 2030,
'hgt': lambda x: (x[-2:] == 'cm' and 150 <= int(x[:-2]) <= 193) or (x[-2:] == 'in' and 59 <= int(x[:-2]) <= 76),
'hcl': lambda x: match(r"^#[a-f0-9]{6}$", x), # match only matches from beginning of string
'ecl': lambda x: x in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth'],
'pid': lambda x: x.isnumeric() and len(x) == 9
}
for passport in lines:
pd = dict(tuple(i.split(":")) for i in passport.split())
if all((field in pd.keys() and fields[field](pd[field])) for field in fields):
valid += 1
print(valid)
| 35.36 | 120 | 0.519231 | 142 | 884 | 3.232394 | 0.521127 | 0.106754 | 0.052288 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.066265 | 0.248869 | 884 | 24 | 121 | 36.833333 | 0.625 | 0.048643 | 0 | 0 | 0 | 0 | 0.091776 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.105263 | 0.052632 | 0 | 0.052632 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
7eb6be94d7cf79345f60800447c7a1dfb1e58dc6 | 1,126 | py | Python | dev/umm-exploration-has-calculator.py | fangohr/oommf-python | 9c9f617c4efe4b488f01703186c1126070ea5d3f | [
"BSD-2-Clause"
] | 7 | 2016-01-25T09:36:46.000Z | 2021-09-03T01:42:19.000Z | dev/umm-exploration-has-calculator.py | fangohr/oommf-python | 9c9f617c4efe4b488f01703186c1126070ea5d3f | [
"BSD-2-Clause"
] | 1 | 2016-03-07T17:11:44.000Z | 2016-03-07T17:11:44.000Z | dev/umm-exploration-has-calculator.py | fangohr/oommf-python | 9c9f617c4efe4b488f01703186c1126070ea5d3f | [
"BSD-2-Clause"
] | 9 | 2015-09-30T10:53:06.000Z | 2021-05-12T20:21:52.000Z | class MicromagneticModell:
def __init__(self, name, Ms, calc):
self.name = name
self.Ms = Ms
self.field = None
self.calc = calc
def __str__(self):
return "AbstractMicromagneticModell(name={})".format(self.name)
def relax(self):
self.calc.relax(self)
def set_H(self, field):
print("AbstractMicromagneticModell: setting field = {}")
self.field = field
def hysteresis(self, fieldlist):
print("AbstractMicromagneticModell: starting hysteresis")
for field in fieldlist:
self.set_H(field)
self.relax()
class OOMMFC():
def __init__(self):
pass
def __str__(self):
return "OOMMFC()"
def relax(self, mm):
print("Calling OOMMF to run relax() with H={}".format(mm.field))
#a = AbstractMicromagneticModell('simulation-name', 10)
#print(a)
#a.hysteresis([10, 20])
ocalc = OOMMFC()
o = MicromagneticModell(name='test', Ms=42, calc=ocalc)
print(o)
o.relax()
#f = FIDIMAGC(name='fidimag-simulation', Ms=8e6)
#print(o)
#f.relax()
#o.relax()
#o.hysteresis([10, 20, 30])
| 21.245283 | 72 | 0.617229 | 135 | 1,126 | 5.014815 | 0.333333 | 0.035451 | 0.032496 | 0.047267 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018692 | 0.239787 | 1,126 | 52 | 73 | 21.653846 | 0.772196 | 0.162522 | 0 | 0.068966 | 0 | 0 | 0.193583 | 0.098396 | 0 | 0 | 0 | 0 | 0 | 1 | 0.275862 | false | 0.034483 | 0 | 0.068966 | 0.413793 | 0.137931 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0e1e160e8d90a3c603ba44d2101e9bfc5828fd10 | 7,104 | py | Python | clickmodel-experiments/scripts/model/ClickModelExperiment.py | nut-hatch/LOVBench | 365ea4ad0d6258b840439506ce97acb3827c39e6 | [
"Apache-2.0"
] | null | null | null | clickmodel-experiments/scripts/model/ClickModelExperiment.py | nut-hatch/LOVBench | 365ea4ad0d6258b840439506ce97acb3827c39e6 | [
"Apache-2.0"
] | 1 | 2021-08-09T20:59:22.000Z | 2021-08-09T20:59:22.000Z | clickmodel-experiments/scripts/model/ClickModelExperiment.py | nut-hatch/LOVBench | 365ea4ad0d6258b840439506ce97acb3827c39e6 | [
"Apache-2.0"
] | null | null | null | __author__ = 'Anonymous'
import time
import csv
import os.path
import pyclick
from pyclick.utils.YandexRelPredChallengeParser import YandexRelPredChallengeParser
from pyclick.utils.Utils import Utils
from pyclick.click_models.Evaluation import LogLikelihood, Perplexity
from pyclick.click_models.UBM import UBM
from pyclick.click_models.DBN import DBN
from pyclick.click_models.SDBN import SDBN
from pyclick.click_models.DCM import DCM
from pyclick.click_models.CCM import CCM
from pyclick.click_models.CTR import DCTR, RCTR, GCTR
from pyclick.click_models.CM import CM
from pyclick.click_models.PBM import PBM
#
# Based on the original PyClick example from Ilya Markov: https://github.com/markovi/PyClick/blob/master/examples/Example.py
#
class ClickModelExperiment:
def __init__(self, output_path, search_log_file, click_model_name, search_sessions_num):
self.output_path = output_path
self.search_log_file = search_log_file
self.click_model_name = click_model_name
self.search_sessions_num = search_sessions_num
self.click_props_filename = "ClickProbability"
self.satisfaction_probs_filename = "SatisfactionProbability"
self.model_performance_filename = "PerformanceResults"
self.model_path = self.output_path + "models/"
try:
os.makedirs(self.model_path)
except OSError:
print ("folder exists")
else:
print ("folder created")
def run_experiment(self):
click_model = globals()[self.click_model_name]()
search_sessions = YandexRelPredChallengeParser().parse(self.search_log_file, self.search_sessions_num)
train_test_split = int(len(search_sessions) * 0.75)
train_sessions = search_sessions[:train_test_split]
train_queries = Utils.get_unique_queries(train_sessions)
test_sessions = Utils.filter_sessions(search_sessions[train_test_split:], train_queries)
test_queries = Utils.get_unique_queries(test_sessions)
print "-------------------------------"
print "Training on %d search sessions (%d unique queries)." % (len(train_sessions), len(train_queries))
print "-------------------------------"
start = time.time()
click_model.train(train_sessions)
end = time.time()
print "\tTrained %s click model in %i secs:\n%r" % (click_model.__class__.__name__, end - start, click_model)
self.evaluate_click_model(click_model, train_sessions, train_queries, test_sessions, test_queries)
model_file = self.model_path + click_model.__class__.__name__ + ".json"
with open(model_file, mode='w') as model_file:
model_file.write(click_model.to_json())
self.get_click_probs(click_model, search_sessions)
self.get_satisfaction_probs(click_model, search_sessions)
def evaluate_click_model(self, click_model, train_sessions, train_queries, test_sessions, test_queries):
print "-------------------------------"
print "Testing on %d search sessions (%d unique queries)." % (len(test_sessions), len(test_queries))
print "-------------------------------"
loglikelihood = LogLikelihood()
perplexity = Perplexity()
start = time.time()
ll_value_train = loglikelihood.evaluate(click_model, train_sessions)
ll_value_test = loglikelihood.evaluate(click_model, test_sessions)
end = time.time()
print "\tlog-likelihood: %f; time: %i secs" % (ll_value_test, end - start)
start = time.time()
perp_value_train = perplexity.evaluate(click_model, train_sessions)[0]
perp_value_test = perplexity.evaluate(click_model, test_sessions)[0]
end = time.time()
print "\tperplexity: %f; time: %i secs" % (perp_value_test, end - start)
model_performance_path = self.output_path + self.model_performance_filename + ".csv"
if not os.path.isfile(model_performance_path):
print "file does not exist"
with open(model_performance_path, mode='w') as model_performance_file:
performance_writer = csv.writer(model_performance_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
performance_writer.writerow(['TimeStamp', 'ClickLogFile', 'ClickModel', 'SearchSessions_Train', 'UniqueQueries_Train', 'SearchSessions_Test', 'UniqueQueries_Test', 'LogLikelihood_Train', 'LogLikelihood_Test', 'Perplexity_Train', 'Perplexity_Test'])
with open(model_performance_path, mode='a') as model_performance_file:
performance_writer = csv.writer(model_performance_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
performance_writer.writerow([time.time(), self.search_log_file, click_model.__class__.__name__, len(train_sessions), len(train_queries), len(test_sessions), len(test_queries), ll_value_train, ll_value_test, perp_value_train, perp_value_test])
def get_click_probs(self, click_model, search_sessions):
click_probabilites = []
query_cache = []
for search_session in search_sessions:
query = search_session.query
if query not in query_cache:
query_cache.append(query)
web_results = search_session.web_results
click_probs = click_model.get_full_click_probs(search_session)
for x in range(len(web_results)):
web_result = web_results[x]
click_prob = str(click_probs[x])
click_probabilites.append("\"" + query + "\",\"" + web_result.id + "\",\"" + click_prob + "\"\n")
# for rank, click_prob in enumerate(click_probs):
# print str(rank) + " " + str(click_prob)
# '../resources/output/VocabRankingClickProbabilities-v2.txt'
click_props_path = self.output_path + click_model.__class__.__name__ + "_" + self.click_props_filename + "_Raw.csv"
with open(click_props_path, 'w') as out:
out.writelines(click_probabilites)
out.close()
def get_satisfaction_probs(self, click_model, search_sessions):
satisfaction_probs = []
query_cache = []
for search_session in search_sessions:
query = search_session.query
if query not in query_cache:
query_cache.append(query)
web_results = search_session.web_results
for x in range(len(web_results)):
web_result = web_results[x]
satisfaction = click_model.predict_relevance(query, web_result.id)
# print query + " - " + web_result.id + " - " + str(relevance)
satisfaction_probs.append("\"" + query + "\",\"" + web_result.id + "\",\"" + str(satisfaction) + "\"\n")
satisfaction_probs_path = self.output_path + click_model.__class__.__name__ + "_" + self.satisfaction_probs_filename + "_Raw.csv"
with open(satisfaction_probs_path, 'w') as out:
out.writelines(satisfaction_probs)
out.close()
| 48.326531 | 265 | 0.6692 | 829 | 7,104 | 5.373945 | 0.193004 | 0.062851 | 0.032323 | 0.044444 | 0.398204 | 0.287318 | 0.236139 | 0.222222 | 0.18541 | 0.167003 | 0 | 0.001079 | 0.217342 | 7,104 | 146 | 266 | 48.657534 | 0.80018 | 0.047157 | 0 | 0.27027 | 0 | 0 | 0.114497 | 0.021746 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.135135 | null | null | 0.108108 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0e1e867c79f973710511eae3645a7ccaa06989d8 | 242 | py | Python | archive/2016/week5/homework/even.py | YAtOff/python0 | b5af5004131d64dd52d42746eddb72b6c43a13c7 | [
"Apache-2.0"
] | 6 | 2017-11-08T14:04:39.000Z | 2019-03-24T22:11:04.000Z | archive/2016/week5/homework/even.py | YAtOff/python0 | b5af5004131d64dd52d42746eddb72b6c43a13c7 | [
"Apache-2.0"
] | null | null | null | archive/2016/week5/homework/even.py | YAtOff/python0 | b5af5004131d64dd52d42746eddb72b6c43a13c7 | [
"Apache-2.0"
] | 7 | 2015-10-27T09:04:58.000Z | 2019-03-03T14:18:26.000Z | """
Дефинирайте фуннкция `is_even`, която приема число и върща `True` ако числото е
четно и `False` в противен случай.
>>> is_even(4)
True
>>> is_even(5)
False
"""
def is_even(number):
raise Exception('Not implemented')
| 18.615385 | 79 | 0.652893 | 35 | 242 | 4.4 | 0.742857 | 0.155844 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010582 | 0.219008 | 242 | 12 | 80 | 20.166667 | 0.804233 | 0.710744 | 0 | 0 | 0 | 0 | 0.241935 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0e1f37ae758638a4fc7ba5ee3a4f94668d053d38 | 2,165 | py | Python | piws/views/actions.py | neurospin/piws | 4ec6f60c6343623a82761c90c74642b4b372ffd1 | [
"CECILL-B"
] | null | null | null | piws/views/actions.py | neurospin/piws | 4ec6f60c6343623a82761c90c74642b4b372ffd1 | [
"CECILL-B"
] | 9 | 2015-05-18T09:04:35.000Z | 2017-03-24T10:47:23.000Z | piws/views/actions.py | neurospin/piws | 4ec6f60c6343623a82761c90c74642b4b372ffd1 | [
"CECILL-B"
] | 17 | 2015-03-16T08:27:47.000Z | 2017-08-04T16:26:29.000Z | ##########################################################################
# NSAp - Copyright (C) CEA, 2013
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
# System import
from packaging import version
# Cubicweb import
import cubicweb
cw_version = version.parse(cubicweb.__version__)
if cw_version >= version.parse("3.21.0"):
from cubicweb import _
from cubicweb.predicates import is_instance
from cubicweb.predicates import authenticated_user
from cubicweb.web.action import Action
from cubicweb.web.views.wdoc import HelpAction, AboutAction
from cubicweb.web.views.actions import PoweredByAction
from cubicweb.web.views.actions import UserPreferencesAction
from cubicweb.web.views.actions import UserInfoAction
from logilab.common.registry import yes
###############################################################################
# ACTIONS
###############################################################################
class NeurospinAction(Action):
__regid__ = "neurospin"
__select__ = yes()
category = "footer"
order = 1
title = _("NeuroSpin")
def url(self):
return "http://i2bm.cea.fr/drf/i2bm/NeuroSpin"
class LicenseAction(Action):
__regid__ = "license"
__select__ = yes()
category = "footer"
order = 2
title = _("License")
def url(self):
return self._cw.build_url("license")
class PIWSPoweredByAction(Action):
__regid__ = "poweredby"
__select__ = yes()
category = "footer"
order = 3
title = _("Powered by NSAp")
def url(self):
return "https://github.com/neurospin/piws"
def registration_callback(vreg):
# Update the footer
vreg.register_and_replace(PIWSPoweredByAction, PoweredByAction)
vreg.register(NeurospinAction)
vreg.register(LicenseAction)
vreg.unregister(HelpAction)
vreg.unregister(AboutAction)
vreg.unregister(UserPreferencesAction)
vreg.unregister(UserInfoAction)
| 28.116883 | 79 | 0.627714 | 223 | 2,165 | 5.901345 | 0.439462 | 0.072948 | 0.056991 | 0.06079 | 0.139058 | 0.075228 | 0 | 0 | 0 | 0 | 0 | 0.007609 | 0.150115 | 2,165 | 76 | 80 | 28.486842 | 0.707609 | 0.12933 | 0 | 0.2 | 0 | 0 | 0.100064 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088889 | false | 0 | 0.244444 | 0.066667 | 0.8 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
0e282455991d9cc519b5a1a793207bc93c3d9068 | 793 | py | Python | data_loaders/data_loader_interface.py | jennis0/pdf2vtt | 4aad37ef3dfce4d83f3a2744856879598cd4446f | [
"MIT"
] | null | null | null | data_loaders/data_loader_interface.py | jennis0/pdf2vtt | 4aad37ef3dfce4d83f3a2744856879598cd4446f | [
"MIT"
] | null | null | null | data_loaders/data_loader_interface.py | jennis0/pdf2vtt | 4aad37ef3dfce4d83f3a2744856879598cd4446f | [
"MIT"
] | null | null | null | import abc
from typing import List
from utils.datatypes import Source
class DataLoaderInterface(object):
@abc.abstractmethod
def get_name() -> str:
'''Returns an internal name for this loader'''
raise NotImplementedError("users must define a name for this loader")
@staticmethod
@abc.abstractmethod
def get_filetypes() -> List[str]:
'''Returns a list of file types supported by this data loader'''
raise NotImplementedError('users must define a list of supported filetypes.')
@abc.abstractmethod
def load_data_from_file(self, filepath: str) -> Source:
'''Reads file and extracts lines of texts. Returns one section per page'''
raise NotImplementedError("userers must define a function to load data from a file.") | 36.045455 | 93 | 0.706179 | 102 | 793 | 5.441176 | 0.5 | 0.091892 | 0.108108 | 0.082883 | 0.165766 | 0.165766 | 0.165766 | 0 | 0 | 0 | 0 | 0 | 0.214376 | 793 | 22 | 93 | 36.045455 | 0.890851 | 0.211854 | 0 | 0.214286 | 0 | 0 | 0.236066 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.214286 | false | 0 | 0.214286 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0e2930d0e963e50e57e9afabe5382cdd9bed7229 | 4,176 | py | Python | dbglang/dbp.py | thautwarm/dbg-lang | 22b5230bc1658494b93a2741aab3ef63a06fdee3 | [
"MIT"
] | 1 | 2018-04-09T11:43:52.000Z | 2018-04-09T11:43:52.000Z | dbglang/dbp.py | thautwarm/dbg-lang | 22b5230bc1658494b93a2741aab3ef63a06fdee3 | [
"MIT"
] | null | null | null | dbglang/dbp.py | thautwarm/dbg-lang | 22b5230bc1658494b93a2741aab3ef63a06fdee3 | [
"MIT"
] | 1 | 2019-06-21T08:33:24.000Z | 2019-06-21T08:33:24.000Z | from Ruikowa.ObjectRegex.Node import Ref, AstParser, SeqParser, LiteralParser, CharParser, MetaInfo, DependentAstParser
try:
from .etoken import token
except:
from etoken import token
import re
namespace = globals()
recurSearcher = set()
PrimaryDefList = AstParser([Ref('FieldDef'), SeqParser([LiteralParser(',', name='\',\''), Ref('FieldDef')])],
name='PrimaryDefList', toIgnore=[{}, {','}])
FieldDefList = AstParser([SeqParser([Ref('FieldDef'), SeqParser([LiteralParser('\n', name='\'\n\'')])]),
SeqParser([LiteralParser('\n', name='\'\n\'')])], name='FieldDefList', toIgnore=[{}, {'\n'}])
TableDef = AstParser(
[Ref('Symbol'), LiteralParser('(', name='\'(\''), Ref('PrimaryDefList'), LiteralParser(')', name='\')\''),
SeqParser([LiteralParser('\n', name='\'\n\'')]), LiteralParser('{', name='\'{\''),
SeqParser([LiteralParser('\n', name='\'\n\'')]), Ref('FieldDefList'),
SeqParser([Ref('ReprDef'), SeqParser([LiteralParser('\n', name='\'\n\'')])], atmost=1),
LiteralParser('}', name='\'}\'')], name='TableDef', toIgnore=[{}, {'{', '}', '(', ')', '\n'}])
FieldDef = AstParser([Ref('Symbol'), LiteralParser(':', name='\':\''), Ref('Type')], name='FieldDef',
toIgnore=[{}, {':'}])
Type = AstParser([Ref('Symbol'), SeqParser([Ref('Option')]),
SeqParser([LiteralParser('=', name='\'=\''), Ref('Default')], atmost=1)], name='Type',
toIgnore=[{}, {'='}])
Option = AstParser([LiteralParser('?', name='\'?\'')], [LiteralParser('!', name='\'!\'')],
[LiteralParser('~', name='\'~\'')], name='Option')
Default = AstParser([SeqParser([LiteralParser('.+', name='\'.+\'', isRegex=True)], atleast=1)], name='Default')
ReprDef = AstParser([LiteralParser('repr', name='\'repr\''), DependentAstParser(
[LiteralParser('{', name='\'{\''), SeqParser([LiteralParser('\n', name='\'\n\'')]), Ref('SymbolList'),
SeqParser([LiteralParser('\n', name='\'\n\'')]), LiteralParser('}', name='\'}\'')],
[LiteralParser('=', name='\'=\''), LiteralParser('all', name='\'all\'')])], name='ReprDef',
toIgnore=[{}, {'=', '{', '}', 'all', 'repr', '\n'}])
SymbolList = AstParser([Ref('Symbol'), SeqParser([LiteralParser(',', name='\',\''), Ref('Symbol')])], name='SymbolList',
toIgnore=[{}, {','}])
Comment = AstParser([LiteralParser('#', name='\'#\''), Ref('Default')], name='Comment')
Symbol = AstParser([LiteralParser('[a-zA-Z][a-zA-Z_]*', name='\'[a-zA-Z][a-zA-Z_]*\'', isRegex=True)], name='Symbol')
WeightedSymbol = AstParser([Ref('Symbol'), SeqParser([LiteralParser('^', name='\'^\'')])], name='WeightedSymbol')
Relation = AstParser(
[Ref('WeightedSymbol'), Ref('Left'), LiteralParser('-', name='\'-\''), Ref('Right'), Ref('WeightedSymbol'),
SeqParser([LiteralParser('\n', name='\'\n\'')]), LiteralParser('{', name='\'{\''),
SeqParser([LiteralParser('\n', name='\'\n\'')]), SeqParser([Ref('FieldDefList')], atmost=1),
LiteralParser('}', name='\'}\'')], name='Relation', toIgnore=[{}, {'-', '}', '{', '\n'}])
Left = AstParser([SeqParser([LiteralParser('<', name='\'<\'')], atleast=1, atmost=2)], name='Left')
Right = AstParser([SeqParser([LiteralParser('>', name='\'>\'')], atleast=1, atmost=2)], name='Right')
Stmts = AstParser(
[SeqParser([DependentAstParser([LiteralParser('\n', name='\'\n\'')], [Ref('Relation')], [Ref('TableDef')])])],
name='Stmts', toIgnore=[{}, {'\n'}])
PrimaryDefList.compile(namespace, recurSearcher)
FieldDefList.compile(namespace, recurSearcher)
TableDef.compile(namespace, recurSearcher)
FieldDef.compile(namespace, recurSearcher)
Type.compile(namespace, recurSearcher)
Option.compile(namespace, recurSearcher)
Default.compile(namespace, recurSearcher)
ReprDef.compile(namespace, recurSearcher)
SymbolList.compile(namespace, recurSearcher)
Comment.compile(namespace, recurSearcher)
Symbol.compile(namespace, recurSearcher)
WeightedSymbol.compile(namespace, recurSearcher)
Relation.compile(namespace, recurSearcher)
Left.compile(namespace, recurSearcher)
Right.compile(namespace, recurSearcher)
Stmts.compile(namespace, recurSearcher)
| 64.246154 | 120 | 0.608477 | 369 | 4,176 | 6.880759 | 0.140921 | 0.147302 | 0.182749 | 0.074833 | 0.313115 | 0.233557 | 0.144545 | 0.120126 | 0.100039 | 0.057503 | 0 | 0.002171 | 0.117577 | 4,176 | 64 | 121 | 65.25 | 0.686839 | 0 | 0 | 0.032258 | 0 | 0 | 0.107998 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.064516 | 0 | 0.064516 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0e2b1e0932a1e36d8ff5f038e7d31ff803df6266 | 1,794 | py | Python | pythonAlgorithm/datastrcture/Kth Smallest Number in Sorted Matrix.py | Sky-zzt/lintcodePractice | d6436b296c5865d85e55c8ad4fcdbb0165d4ebb1 | [
"MIT"
] | 1 | 2020-09-15T07:58:55.000Z | 2020-09-15T07:58:55.000Z | pythonAlgorithm/datastrcture/Kth Smallest Number in Sorted Matrix.py | Sky-zzt/lintcodePractice | d6436b296c5865d85e55c8ad4fcdbb0165d4ebb1 | [
"MIT"
] | null | null | null | pythonAlgorithm/datastrcture/Kth Smallest Number in Sorted Matrix.py | Sky-zzt/lintcodePractice | d6436b296c5865d85e55c8ad4fcdbb0165d4ebb1 | [
"MIT"
] | null | null | null | import heapq
class Solution:
"""
@param matrix: a matrix of integers
@param k: An integer
@return: the kth smallest number in the matrix
在一个排序矩阵中找从小到大的第 k 个整数。
排序矩阵的定义为:每一行递增,每一列也递增。
Example
样例 1:
输入:
[
[1 ,5 ,7],
[3 ,7 ,8],
[4 ,8 ,9],
]
k = 4
输出: 5
样例 2:
输入:
[
[1, 2],
[3, 4]
]
k = 3
输出: 3
Challenge
时间复杂度 O(klogn), n 是矩阵的宽度和高度的最大值
"""
# todo 用java的treeset 自带排序 remove还是log的
def kthSmallest(self, nums, k):
# write your code here
self.minheap, self.maxheap = [], []
medians = []
for i in range(len(nums)):
self.add(nums[i], i, k)
medians.append(self.median)
return medians
@property
def median(self):
if len(self.minheap) > len(self.maxheap):
return self.minheap[0]
return -self.maxheap[0]
def add(self, value, index, winsize):
if len(self.maxheap) + len(self.minheap) > winsize: # todo
self.remove(index - winsize)
if len(self.maxheap) == 0:
heapq.heappush(self.maxheap, -value)
return
if -self.maxheap[0] < value:
heapq.heappush(self.minheap, value)
else:
heapq.heappush(self.maxheap, -value)
self.modifyTwoHeapsSize()
def remove(self, idx):
if idx in self.minheap:
self.minheap.remove(idx)
else:
self.maxheap.remove(idx)
def modifyTwoHeapsSize(self):
if len(self.maxheap) + 2 == len(self.minheap):
heapq.heappush(self.maxheap, -heapq.heappop(self.minheap))
if len(self.minheap) + 2 == len(self.maxheap):
heapq.heappush(self.minheap, -heapq.heappop(self.maxheap))
| 23.92 | 70 | 0.544593 | 218 | 1,794 | 4.481651 | 0.344037 | 0.146366 | 0.046059 | 0.04913 | 0.116684 | 0.057318 | 0 | 0 | 0 | 0 | 0 | 0.020903 | 0.333333 | 1,794 | 74 | 71 | 24.243243 | 0.795987 | 0.264214 | 0 | 0.114286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013514 | 0 | 1 | 0.142857 | false | 0 | 0.028571 | 0 | 0.314286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0e2d0a35bac66187f607550adfc8a5821291ee5b | 3,493 | py | Python | test/test-funders.py | yurivict/habanero | 857897a88811153f7460472219fd78d4e68bdc12 | [
"MIT"
] | null | null | null | test/test-funders.py | yurivict/habanero | 857897a88811153f7460472219fd78d4e68bdc12 | [
"MIT"
] | 1 | 2015-10-07T02:20:24.000Z | 2015-10-07T02:20:24.000Z | test/test-funders.py | yurivict/habanero | 857897a88811153f7460472219fd78d4e68bdc12 | [
"MIT"
] | null | null | null | import pytest
import os
import requests
from habanero import exceptions, Crossref
from requests.exceptions import HTTPError
cr = Crossref()
@pytest.mark.vcr
def test_funders():
"funders - basic test"
res = cr.funders(limit=2)
assert dict == res.__class__
assert dict == res["message"].__class__
assert 2 == res["message"]["items-per-page"]
@pytest.mark.vcr
def test_funders_query():
"funders - param: query"
res = cr.funders(query="NSF", limit=2)
assert dict == res.__class__
assert dict == res["message"].__class__
assert 2 == res["message"]["items-per-page"]
@pytest.mark.vcr
def test_funders_sample_err():
with pytest.raises(exceptions.RequestError):
cr.funders(sample=2)
@pytest.mark.vcr
def test_funders_filter_fails_noidsworks():
with pytest.raises(exceptions.RequestError):
cr.funders(filter={"from_pub_date": "2014-03-03"})
@pytest.mark.vcr
def test_funders_filter_fails_noids():
with pytest.raises(exceptions.RequestError):
cr.funders(works=True, filter={"has_assertion": True})
@pytest.mark.vcr
def test_funders_filter_works():
"funders - filter works when used with id and works=True"
res = cr.funders(
ids="10.13039/100000001", works=True, filter={"has_assertion": True}
)
assert dict == res.__class__
assert 20 == res["message"]["items-per-page"]
@pytest.mark.vcr
def test_funders_fail_limit():
with pytest.raises(KeyError):
cr.funders(limit="things")
@pytest.mark.vcr
def test_funders_fail_offset():
with pytest.raises(KeyError):
cr.funders(offset="things")
@pytest.mark.vcr
def test_funders_fail_sort():
with pytest.raises(exceptions.RequestError):
cr.funders(sort="things")
@pytest.mark.vcr
def test_funders_field_queries():
"funders - param: kwargs - field queries work as expected"
res = cr.funders(
ids="10.13039/100000001",
works=True,
query_container_title="engineering",
filter={"type": "journal-article"},
limit=100,
)
titles = [x.get("title") for x in res["message"]["items"]]
assert dict == res.__class__
assert 5 == len(res["message"])
assert list == titles.__class__
assert 100 == len(titles)
@pytest.mark.vcr
def test_funders_query_filters_not_allowed_with_dois():
with pytest.raises(HTTPError):
cr.funders(ids="10.13039/100000001", query_container_title="engineering")
@pytest.mark.vcr
def test_funders_bad_id_warn():
"funders - param: warn"
with pytest.warns(UserWarning):
out = cr.funders(ids="10.13039/notarealdoi", warn=True)
assert out is None
@pytest.mark.vcr
def test_funders_mixed_ids_warn():
"funders - param: warn"
with pytest.warns(UserWarning):
out = cr.funders(ids=["10.13039/100000001", "10.13039/notarealdoi"], warn=True)
assert len(out) == 2
assert isinstance(out[0], dict)
assert out[1] is None
@pytest.mark.vcr
def test_funders_bad_id_works_warn():
"funders - param: warn"
with pytest.warns(UserWarning):
out = cr.funders(ids="10.13039/notarealdoi", works=True, warn=True)
assert out is None
@pytest.mark.vcr
def test_funders_mixed_ids_works_warn():
"funders - param: warn"
with pytest.warns(UserWarning):
out = cr.funders(
ids=["10.13039/100000001", "10.13039/notarealdoi"], works=True, warn=True
)
assert len(out) == 2
assert isinstance(out[0], dict)
assert out[1] is None
| 26.263158 | 87 | 0.681363 | 470 | 3,493 | 4.87234 | 0.206383 | 0.065502 | 0.085153 | 0.104803 | 0.735371 | 0.71441 | 0.638428 | 0.509607 | 0.408297 | 0.373362 | 0 | 0.047719 | 0.184082 | 3,493 | 132 | 88 | 26.462121 | 0.755789 | 0.069854 | 0 | 0.465347 | 0 | 0 | 0.177212 | 0 | 0 | 0 | 0 | 0 | 0.217822 | 1 | 0.148515 | false | 0 | 0.049505 | 0 | 0.19802 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0e2d51cb9fe9bb1546fc7bbea2cb77d09472d09a | 812 | py | Python | geomagio/api/ws/algorithms.py | alejandrodelcampillo/geomag-algorithms | 43a734d63a8eb2a696f14237e0054e21d36de7c3 | [
"CC0-1.0"
] | 1 | 2021-02-22T23:45:22.000Z | 2021-02-22T23:45:22.000Z | geomagio/api/ws/algorithms.py | alejandrodelcampillo/geomag-algorithms | 43a734d63a8eb2a696f14237e0054e21d36de7c3 | [
"CC0-1.0"
] | 1 | 2021-09-08T03:42:52.000Z | 2021-09-08T03:42:52.000Z | geomagio/api/ws/algorithms.py | alejandrodelcampillo/geomag-algorithms | 43a734d63a8eb2a696f14237e0054e21d36de7c3 | [
"CC0-1.0"
] | null | null | null | from fastapi import APIRouter, Depends
from starlette.responses import Response
from ... import TimeseriesFactory
from ...algorithm import DbDtAlgorithm
from .DataApiQuery import DataApiQuery
from .data import format_timeseries, get_data_factory, get_data_query, get_timeseries
router = APIRouter()
@router.get("/algorithms/dbdt/")
def get_dbdt(
query: DataApiQuery = Depends(get_data_query),
data_factory: TimeseriesFactory = Depends(get_data_factory),
) -> Response:
dbdt = DbDtAlgorithm()
# read data
raw = get_timeseries(data_factory, query)
# run dbdt
timeseries = dbdt.process(raw)
elements = [f"{element}_DT" for element in query.elements]
# output response
return format_timeseries(
timeseries=timeseries, format=query.format, elements=elements
)
| 29 | 85 | 0.748768 | 94 | 812 | 6.297872 | 0.361702 | 0.047297 | 0.047297 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.165025 | 812 | 27 | 86 | 30.074074 | 0.873156 | 0.041872 | 0 | 0 | 0 | 0 | 0.037468 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.315789 | 0 | 0.421053 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
0e2d851e83694bcead0aaa245c930030f6827cc1 | 1,478 | py | Python | lesson3/stage3/src/jvm/udacity/storm/resources/urltext.py | haitanle/storm-twitter | b68c90129d31eb11808922ec56ac9ac6535fdab2 | [
"MIT"
] | null | null | null | lesson3/stage3/src/jvm/udacity/storm/resources/urltext.py | haitanle/storm-twitter | b68c90129d31eb11808922ec56ac9ac6535fdab2 | [
"MIT"
] | null | null | null | lesson3/stage3/src/jvm/udacity/storm/resources/urltext.py | haitanle/storm-twitter | b68c90129d31eb11808922ec56ac9ac6535fdab2 | [
"MIT"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# https://github.com/apache/storm/blob/master/examples/storm-starter/multilang/resources/splitsentence.py
import storm
import urllib2
from bs4 import BeautifulSoup
class URLBolt(storm.BasicBolt):
def process(self, tup):
url = tup.values[0]
# python urllib2
try:
html = urllib2.urlopen(url).read()
# using BeautifulSoup, "Making the Soup"
soup = BeautifulSoup(html)
# return title and paragraph tags
urlText = soup.findAll({'title' : True, 'p' : True})
#emit tuple if string exists
if urlText:
[storm.emit([t.string]) for t in urlText]
except:
pass
URLBolt().run()
| 35.190476 | 105 | 0.699594 | 201 | 1,478 | 5.144279 | 0.606965 | 0.058027 | 0.025145 | 0.030948 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007819 | 0.221245 | 1,478 | 41 | 106 | 36.04878 | 0.89053 | 0.653586 | 0 | 0 | 0 | 0 | 0.01222 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0.066667 | 0.2 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
0e2e642553f3d2eca725be5123dcabd2612e0fef | 598 | py | Python | ncl/property.py | MichaelBittencourt/NCL-Generator-API | 8eecf2ea4948354fae9f64b68da2f24ad0663d60 | [
"MIT"
] | 1 | 2020-06-26T09:59:27.000Z | 2020-06-26T09:59:27.000Z | ncl/property.py | MichaelBittencourt/NCL-Generator-API | 8eecf2ea4948354fae9f64b68da2f24ad0663d60 | [
"MIT"
] | null | null | null | ncl/property.py | MichaelBittencourt/NCL-Generator-API | 8eecf2ea4948354fae9f64b68da2f24ad0663d60 | [
"MIT"
] | 1 | 2020-01-07T23:16:11.000Z | 2020-01-07T23:16:11.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 Michael Bittencourt <mchl.bittencourt@gmail.com>
#
# Distributed under terms of the MIT license.
"""
"""
from ncl.abstractelement import AbstractElement
class Property(AbstractElement):
def __init__(self, name, value=None, externable=None):
super().__init__("property", ["name", "value", "externable"], [])
self.set("name", name)
if value is not None:
self.set("value", value)
if externable is not None:
self.set("externable", externable)
pass
| 23.92 | 73 | 0.633779 | 72 | 598 | 5.166667 | 0.597222 | 0.056452 | 0.048387 | 0.069892 | 0.086022 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012848 | 0.219064 | 598 | 24 | 74 | 24.916667 | 0.781585 | 0.280936 | 0 | 0 | 0 | 0 | 0.110843 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0.1 | 0.1 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
0e30b1db2b544a801697916acc86f22b8a9e7d0e | 4,109 | py | Python | benchmarks/secure_data_SDK-benchmarks/bootloader/load_firmware.py | ghsecuritylab/BenchIoT | 4919427d35e578a7ff07ef5e0b4710b6455dd0b9 | [
"Apache-2.0"
] | 22 | 2019-05-03T03:39:09.000Z | 2022-02-26T17:14:15.000Z | benchmarks/secure_data_SDK-benchmarks/bootloader/load_firmware.py | ghsecuritylab/BenchIoT | 4919427d35e578a7ff07ef5e0b4710b6455dd0b9 | [
"Apache-2.0"
] | 3 | 2019-07-29T19:48:49.000Z | 2022-01-10T07:24:43.000Z | benchmarks/secure_data_SDK-benchmarks/bootloader/load_firmware.py | ghsecuritylab/BenchIoT | 4919427d35e578a7ff07ef5e0b4710b6455dd0b9 | [
"Apache-2.0"
] | 8 | 2019-05-16T08:02:33.000Z | 2021-08-03T03:41:37.000Z |
from struct import pack, unpack
import binascii
import socket
HOST = '192.168.0.10'
PORT = 1337
BUFF_SIZE = 1024
START_TOKEN = "init"
DONE_TOKEN = "done"
FAIL_TOKEN = "fail"
def create_test_application(load_addr=0x08002000, size=64*1024):
'''
Creates a test application that simply returns to the bootloader.
Creates and ISR Table that point to a infinte loop, except reset
vector that points to two instructions
' mov sp, r3'
' bx lr'
'''
SP_ADDR = 0x20050000 # Address of stack for loaded application
fw_list = []
fw_list.append(pack("<I",SP_ADDR))
fw_list.append(pack("<I",load_addr+1025))
# build rest of ISR
for isr in xrange(2,256):
fw_list.append(pack("<I",load_addr+1029)) # 4 bytes after end of ISR
# Add Code
fw_list.append('\x9d\x46\x70\x47') # mov sp,r3; bx lr
fw_list.append('\xfe\xbf\xff\xf7') # b.w
# Fill rest with garbage
i = 0
#TODO when bootloader does check sum update to be random data
while (len(fw_list)< size / 4):
fw_list.append(pack("<I",i))
i += 1
return ''.join(fw_list)
def tx(filename):
with open(filename,'rb') as fw_file:
fw_data = fw_file.read()
tx_data(fw_data)
def tx_data(fw_data):
client = socket.socket( socket.AF_INET, socket.SOCK_STREAM)
client.settimeout(50)
client.connect(( HOST, PORT ))
client.settimeout(None)
print "Sending Start Token:", START_TOKEN
client.send(START_TOKEN)
data = client.recv(len(START_TOKEN))
if data and data == START_TOKEN:
print "Got Start Token:", data
client.send(pack("<I", len(fw_data)))
print "Sent Length: ", len(fw_data)
print "Sending FW: ", len(fw_data)
for i in xrange(0,len(fw_data), 128):
client.send(fw_data[i:i+128])
data = client.recv(len(DONE_TOKEN))
if data and data == DONE_TOKEN:
print "Sent Successfully, Token: ", data
else:
print "Transmission Failed, Token: ", data
def rx():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', PORT))
s.listen(1)
conn, addr = s.accept()
print 'Connection address:', addr
data = conn.recv(5)
print data
if (data and data == START_TOKEN):
conn.send(START_TOKEN)
data = conn.recv(4)
size = unpack('<I', data)[0]
print "Size: ",size
received_count = 0
with open("outfile.bin",'wb') as outfile:
while (received_count < size):
request = size - received_count
if request > BUFF_SIZE:
request = BUFF_SIZE
data = conn.recv(request)
if (data):
received_count += len(data)
print "Received %i: %s..."% (len(data),
binascii.hexlify(data[0:10]))
outfile.write(data)
else:
print ("Failed")
conn.send(FAIL_TOKEN)
conn.close()
return
conn.send(DONE_TOKEN) # echo
print "Done"
else:
conn.send(FAIL_TOKEN)
conn.close()
if __name__ == "__main__":
from argparse import ArgumentParser
arg_parser = ArgumentParser()
arg_parser.add_argument('-f','--filename',metavar="FILE",
help='Firmware file to transmit (use ' + \
'arm-none-eabi-objcopy -O binary <file.elf> <outfile>)')
arg_parser.add_argument('--start_addr', default=0x08020000, type=int,
help='Start Address for generated test firmware')
arg_parser.add_argument('--size', default=16*1024, type=int,
help='Size of generated firmware to transmit')
args = arg_parser.parse_args()
#
if args.filename:
tx(args.filename)
else:
fw_data = create_test_application(args.start_addr, args.size)
with open('gen_fw.bin', 'wb') as outfile:
outfile.write(fw_data)
tx_data(fw_data)
| 29.35 | 80 | 0.578973 | 538 | 4,109 | 4.275093 | 0.336431 | 0.028696 | 0.031304 | 0.027826 | 0.133478 | 0.09913 | 0.056522 | 0.034783 | 0 | 0 | 0 | 0.034603 | 0.303724 | 4,109 | 139 | 81 | 29.561151 | 0.769311 | 0.048674 | 0 | 0.1 | 0 | 0 | 0.128149 | 0.00575 | 0 | 0 | 0.008215 | 0.007194 | 0 | 0 | null | null | 0 | 0.04 | null | null | 0.12 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0e333752c015ad9cb399b65e23737e7c8b00cd94 | 27,010 | py | Python | fauxfactory/__init__.py | sthirugn/fauxfactory | b320f46d34124d0fbc0b93bc6c56ff8231c8dbc5 | [
"Apache-2.0"
] | null | null | null | fauxfactory/__init__.py | sthirugn/fauxfactory | b320f46d34124d0fbc0b93bc6c56ff8231c8dbc5 | [
"Apache-2.0"
] | null | null | null | fauxfactory/__init__.py | sthirugn/fauxfactory | b320f46d34124d0fbc0b93bc6c56ff8231c8dbc5 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Generate random data for your tests."""
__all__ = (
'gen_alpha',
'gen_alphanumeric',
'gen_boolean',
'gen_choice',
'gen_cjk',
'gen_cyrillic',
'gen_date',
'gen_datetime',
'gen_email',
'gen_html',
'gen_integer',
'gen_ipaddr',
'gen_iplum',
'gen_latin1',
'gen_mac',
'gen_netmask',
'gen_negative_integer',
'gen_numeric_string',
'gen_positive_integer',
'gen_string',
'gen_time',
'gen_url',
'gen_utf8',
'gen_uuid',
)
import datetime
import random
import re
import string
import sys
import unicodedata
import uuid
import warnings
from collections import Iterable
from fauxfactory.constants import (
HTML_TAGS, LOREM_IPSUM_TEXT,
MAX_YEARS, MIN_YEARS,
SCHEMES, SUBDOMAINS, TLDS, VALID_NETMASKS
)
from functools import wraps
# Private Functions -----------------------------------------------------------
def _make_unicode(data):
"""Convert ``data`` to a unicode string if running Python 2.
:param str data: A string to be type cast.
:return: ``data``, but as unicode. ``data`` is never modified: if a type
cast is necessary, a copy of ``data`` is returned.
"""
if sys.version_info[0] == 2:
return unicode(data) # flake8:noqa pylint:disable=undefined-variable
return data
def _is_positive_int(length):
"""Check that ``length`` argument is an integer greater than zero.
:param int length: The desired length of the string
:raises: ``ValueError`` if ``length`` is not an ``int`` or is less than 1.
:returns: Nothing.
:rtype: None
"""
if not isinstance(length, int) or length <= 0:
raise ValueError("{0} is an invalid 'length'.".format(length))
def _unicode_letters_generator():
"""Generates unicode characters in the letters category
:return: a generator which will generates all unicode letters available
"""
if sys.version_info[0] == 2:
chr_function = unichr # pylint:disable=undefined-variable
range_function = xrange # pylint:disable=undefined-variable
else:
chr_function = chr
range_function = range
# Use sys.maxunicode instead of 0x10FFFF to avoid the exception below, in a
# narrow Python build (before Python 3.3)
# ValueError: unichr() arg not in range(0x10000) (narrow Python build)
# For more information, read PEP 261.
for i in range_function(sys.maxunicode):
char = chr_function(i)
if unicodedata.category(char).startswith('L'):
yield char
UNICODE_LETTERS = [c for c in _unicode_letters_generator()]
# Public Functions ------------------------------------------------------------
def gen_string(str_type, length=None):
"""A simple wrapper that calls other string generation methods.
:param str str_type: The type of string which should be generated.
:param int length: The length of the generated string. Must be 1 or
greater.
:raises: ``ValueError`` if an invalid ``str_type`` is specified.
:returns: A string.
:rtype: str
Valid values for ``str_type`` are as follows:
* alpha
* alphanumeric
* cjk
* cyrillic
* html
* latin1
* numeric
* utf8
"""
str_types_functions = {
u'alpha': gen_alpha,
u'alphanumeric': gen_alphanumeric,
u'cjk': gen_cjk,
u'cyrillic': gen_cyrillic,
u'html': gen_html,
u'latin1': gen_latin1,
u'numeric': gen_numeric_string,
u'utf8': gen_utf8,
}
str_type_lower = str_type.lower() # do not modify user data
if str_type_lower not in str_types_functions.keys():
raise ValueError(
'{0} is not a supported string type. Valid string types are {1}.'
''.format(str_type_lower, u','.join(str_types_functions.keys()))
)
method = str_types_functions[str_type_lower]
if length is None:
return method()
return method(length)
def gen_alpha(length=10):
"""Returns a random string made up of alpha characters.
:param int length: Length for random data.
:returns: A random string made up of alpha characters.
:rtype: str
"""
# Validate length argument
_is_positive_int(length)
output_string = u''.join(
random.choice(string.ascii_letters) for i in range(length)
)
return _make_unicode(output_string)
def gen_alphanumeric(length=10):
"""Returns a random string made up of alpha and numeric characters.
:param int length: Length for random data.
:returns: A random string made up of alpha and numeric characters.
:rtype: str
"""
# Validate length argument
_is_positive_int(length)
output_string = u''.join(
random.choice(
string.ascii_letters + string.digits
) for i in range(length))
return _make_unicode(output_string)
def gen_boolean():
"""Returns a random Boolean value.
:returns: A random Boolean value.
:rtype: bool
"""
choices = (True, False)
return gen_choice(choices)
def gen_choice(choices):
"""Returns a random choice from the available choices.
:param list choices: List of choices from which select a random value.
:raises: ``ValueError`` if ``choices`` is ``None`` or not ``Iterable`` or
a ``dict``.
:returns: A random element from ``choices``.
"""
# Validation for 'choices'
if choices is None:
raise ValueError("Choices argument cannot be None.")
# We don't want a single dictionary value.
if not isinstance(choices, Iterable) or isinstance(choices, dict):
raise ValueError("Choices argument is not iterable.")
if len(choices) == 0:
raise ValueError("Choices argument cannot be empty.")
# If only 1 item is present, return it right away
if len(choices) == 1:
return choices[0]
return random.choice(choices)
def gen_cjk(length=10):
"""Returns a random string made up of CJK characters.
(Source: Wikipedia - CJK Unified Ideographs)
:param int length: Length for random data.
:returns: A random string made up of CJK characters.
:rtype: str
"""
# Validate length argument
_is_positive_int(length)
# Generate codepoints, then convert the codepoints to a string. The
# valid range of CJK codepoints is 0x4E00 - 0x9FCC, inclusive. Python 2
# and 3 support the `unichr` and `chr` functions, respectively.
codepoints = [random.randint(0x4E00, 0x9FCC) for _ in range(length)]
if sys.version_info[0] == 2:
# pylint:disable=undefined-variable
output = u''.join(unichr(codepoint) for codepoint in codepoints)
else:
output = u''.join(chr(codepoint) for codepoint in codepoints)
return _make_unicode(output)
def gen_cyrillic(length=10):
"""Returns a random string made up of Cyrillic characters.
:param int length: Length for random data.
:returns: A random string made up of Cyrillic characters.
:rtype: str
"""
# Validate length argument
_is_positive_int(length)
# Generate codepoints, then convert the codepoints to a string. The
# valid range of Cyrillic codepoints is 0x410 - 0x4ff, inclusive. Python 2
# and 3 support the `unichr` and `chr` functions, respectively.
codepoints = [random.randint(0x0400, 0x04FF) for _ in range(length)]
try:
# (undefined-variable) pylint:disable=E0602
output = u''.join(unichr(codepoint) for codepoint in codepoints)
except NameError:
output = u''.join(chr(codepoint) for codepoint in codepoints)
return _make_unicode(output)
def gen_date(min_date=None, max_date=None):
"""Returns a random date value
:param min_date: A valid ``datetime.date`` object.
:param max_date: A valid ``datetime.date`` object.
:raises: ``ValueError`` if arguments are not valid ``datetime.date``
objects.
:returns: Random ``datetime.date`` object.
"""
_min_value = (datetime.date.today() -
datetime.timedelta(365 * MIN_YEARS))
_max_value = (datetime.date.today() +
datetime.timedelta(365 * MAX_YEARS))
if min_date is None:
min_date = _min_value
if max_date is None:
max_date = _max_value
# Validation
if not isinstance(min_date, datetime.date):
raise ValueError("%s is not a valid datetime.date object")
if not isinstance(max_date, datetime.date):
raise ValueError("%s is not a valid datetime.date object")
# Check that max_date is not before min_date
assert min_date < max_date
# Pick a day between min and max dates
diff = max_date - min_date
days = random.randint(0, diff.days)
date = min_date + datetime.timedelta(days=days)
return date
def gen_datetime(min_date=None, max_date=None):
"""Returns a random datetime value
:param min_date: A valid ``datetime.datetime`` object.
:param max_date: A valid ``datetime.datetime`` object.
:raises: ``ValueError`` if arguments are not valid ``datetime.datetime``
objects.
:returns: Random ``datetime.datetime`` object.
"""
_min_value = (datetime.datetime.now() -
datetime.timedelta(365 * MIN_YEARS))
_max_value = (datetime.datetime.now() +
datetime.timedelta(365 * MAX_YEARS))
if min_date is None:
min_date = _min_value
if max_date is None:
max_date = _max_value
# Validation
if not isinstance(min_date, datetime.datetime):
raise ValueError("%s is not a valid datetime.datetime object")
if not isinstance(max_date, datetime.datetime):
raise ValueError("%s is not a valid datetime.datetime object")
# Check that max_date is not before min_date
assert min_date < max_date
# Pick a time between min and max dates
diff = max_date - min_date
seconds = random.randint(0, diff.days * 3600 * 24 + diff.seconds)
return min_date + datetime.timedelta(seconds=seconds)
def gen_email(name=None, domain=None, tlds=None):
"""Generates a random email address.
:param str name: Email name.
:param str domain: Domain name.
:param str tlds: Top Level Domain Server
:returns: An email address.
:rtype: str
"""
# Generate a new name if needed
if name is None:
name = gen_alpha(8)
# Obtain a random domain if needed
if domain is None:
domain = gen_choice(SUBDOMAINS)
# Obtain a random top level domain if needed
if tlds is None:
tlds = gen_choice(TLDS)
email = u"{0}@{1}.{2}".format(name, domain, tlds)
return _make_unicode(email)
def gen_integer(min_value=None, max_value=None):
"""Returns a random integer value based on the current platform.
:param int min_value: The minimum allowed value.
:param int max_value: The maximum allowed value.
:raises: ``ValueError`` if arguments are not integers or if they are
less or greater than the system's allowed range for integers.
:returns: Returns a random integer value.
:rtype: int
"""
# Platform-specific value range for integers
_min_value = - sys.maxsize - 1
_max_value = sys.maxsize
if min_value is None:
min_value = _min_value
if max_value is None:
max_value = _max_value
if sys.version_info[0] < 3:
integer_types = (int, long,) # pylint:disable=undefined-variable
else:
integer_types = (int,)
# Perform some validations
if not isinstance(min_value, integer_types) or min_value < _min_value:
raise ValueError("\'%s\' is not a valid minimum." % min_value)
if not isinstance(max_value, integer_types) or max_value > _max_value:
raise ValueError("\'%s\' is not a valid maximum." % max_value)
value = random.randint(min_value, max_value)
return value
def gen_iplum(words=None, paragraphs=None):
"""Returns a lorem ipsum string. If no arguments are passed, then
return the entire default lorem ipsum string.
:param int words: The number of words to return.
:param int paragraphs: The number of paragraphs to return.
:raises: ``ValueError`` if ``words`` is not a valid positive integer.
:returns: A ``lorem ipsum`` string containing either the number of ``words``
or ``paragraphs``, extending and wrapping around the text as needed to
make sure that it has the specified length.
:rtype: str
"""
# Check parameters
if words is None or words == 0:
words = len(LOREM_IPSUM_TEXT.split())
if paragraphs is None:
paragraphs = 1
if not isinstance(words, int) or words < 0:
raise ValueError(
"Cannot generate a string with negative number of words.")
_is_positive_int(paragraphs)
# Original Lorem Ipsum string
all_words = LOREM_IPSUM_TEXT.split()
# How many words do we need?
total_words_needed = words * paragraphs
quotient = int(total_words_needed / len(all_words))
modulus = total_words_needed % len(all_words)
# Pool of words to use
all_words = all_words * (quotient + modulus)
result = u""
start_pos = 0
for _ in range(0, paragraphs):
sentence = u" ".join(
all_words[start_pos:start_pos + words])
# Remove comma from the end, if it exists
if sentence.endswith(','):
sentence = sentence.rstrip(',')
# Remove period from the end, if it exists
if sentence.endswith('.'):
sentence = sentence.rstrip('.')
# Each sentence should be properly capitalized
cap_sentence = [
frag.capitalize() + u'.' for frag in sentence.split('. ')]
# Add newline at the end
result += " ".join(cap_sentence) + u"\n"
# Increment positional counter
start_pos += words
return _make_unicode(result.rstrip())
def gen_latin1(length=10):
"""Returns a random string made up of UTF-8 characters.
(Font: Wikipedia - Latin-1 Supplement Unicode Block)
:param int length: Length for random data.
:returns: A random string made up of ``Latin1`` characters.
:rtype: str
"""
# Validate length argument
_is_positive_int(length)
range0 = range1 = range2 = []
range0 = ['00C0', '00D6']
range1 = ['00D8', '00F6']
range2 = ['00F8', '00FF']
output_array = []
for i in range(int(range0[0], 16), int(range0[1], 16)):
output_array.append(i)
for i in range(int(range1[0], 16), int(range1[1], 16)):
output_array.append(i)
for i in range(int(range2[0], 16), int(range2[1], 16)):
output_array.append(i)
if sys.version_info[0] == 2:
output_string = u''.join(
# pylint:disable=E0602
unichr(random.choice(output_array)) for _ in range(length)
)
else:
output_string = u''.join(
chr(random.choice(output_array)) for _ in range(length)
)
return _make_unicode(output_string)
def gen_negative_integer():
"""Returns a random negative integer based on the current platform.
:returns: Returns a random negative integer value.
:rtype: int
"""
max_value = 0
return gen_integer(max_value=max_value)
def gen_ipaddr(ip3=False, ipv6=False, prefix=()):
"""Generates a random IP address.
You can also specify an IP address prefix if you are interested in
local network address generation, etc.
:param bool ip3: Whether to generate a 3 or 4 group IP.
:param bool ipv6: Whether to generate IPv6 or IPv4
:param list prefix: A prefix to be used for an IP (e.g. [10, 0, 1]). It
must be an iterable with strings or integers. Can be left unspecified or
empty.
:returns: An IP address.
:rtype: str
:raises: ``ValueError`` if ``prefix`` would lead to no random fields at all.
This means the length that triggers the ``ValueError`` is 4 for regular
IPv4, 3 for IPv4 with ip3 and 8 for IPv6. It will be raised in any case
the prefix length reaches or exceeds those values.
"""
# Set the lengths of the randomly generated sections
if ipv6:
rng = 8
elif ip3:
rng = 3
else:
rng = 4
prefix = [str(field) for field in prefix]
# Prefix reduces number of random fields generated, so subtract the length
# of it from the rng to keep the IP address have correct number of fields
rng -= len(prefix)
if rng == 0:
raise ValueError(
"Prefix {} would lead to no randomness at all".format(
repr(prefix)))
elif rng < 0:
raise ValueError(
"Prefix {} is too long for this configuration".format(
repr(prefix)))
if ipv6:
# StackOverflow.com questions: generate-random-ipv6-address
random_fields = [
'{0:x}'.format(random.randint(0, 2**16 - 1)) for _ in range(rng)]
ipaddr = u':'.join(prefix + random_fields)
else:
random_fields = [str(random.randrange(0, 255, 1)) for _ in range(rng)]
ipaddr = u".".join(prefix + random_fields)
if ip3:
ipaddr = ipaddr + u".0"
return _make_unicode(ipaddr)
def gen_mac(delimiter=':', multicast=None, locally=None):
"""Generates a random MAC address.
For more information about how unicast or multicast and globally unique and
locally administered MAC addresses are generated check this link
https://en.wikipedia.org/wiki/MAC_address.
:param str delimeter: Valid MAC delimeter (e.g ':', '-').
:param bool multicast: Indicates if the generated MAC address should be
unicast or multicast. If no value is provided a random one will be
chosen.
:param bool locally: Indicates if the generated MAC address should be
globally unique or locally administered. If no value is provided a
random one will be chosen.
:returns: A random MAC address.
:rtype: str
"""
if delimiter not in [':', '-']:
raise ValueError('Delimiter is not a valid option: %s' % delimiter)
if multicast is None:
multicast = bool(random.randint(0, 1))
if locally is None:
locally = bool(random.randint(0, 1))
first_octet = random.randint(0, 255)
if multicast:
# Ensure that the first least significant bit is 1
first_octet |= 0b00000001
else:
# Ensure that the first least significant bit is 0
first_octet &= 0b11111110
if locally:
# Ensure that the second least significant bit is 1
first_octet |= 0b00000010
else:
# Ensure that the second least significant bit is 0
first_octet &= 0b11111101
octets = [first_octet]
octets.extend([
random.randint(0, 255) for _ in range(5)
])
mac = delimiter.join(['{0:02x}'.format(octet) for octet in octets])
return _make_unicode(mac)
def gen_netmask(min_cidr=1, max_cidr=31):
"""Generates a random valid netmask.
For more info: http://www.iplocation.net/tools/netmask.php
:param int min_cidr: Inferior CIDR limit
:param int max_cidr: Superior CIDR limit
:returns: The netmask is chosen from
:data:`fauxfactory.constants.VALID_NETMASKS` respecting the CIDR range
:rtype: str
:raises: ``ValueError`` if ``min_cidr`` or ``max_cidr`` have an invalid
value. For example, ``max_cidr`` cannot be 33.
"""
if min_cidr < 0:
raise ValueError(
'min_cidr must be 0 or greater, but is {0}'.format(min_cidr)
)
if max_cidr >= len(VALID_NETMASKS):
raise ValueError(
'max_cidr must be less than {0}, but is {1}'
.format(len(VALID_NETMASKS), max_cidr)
)
return VALID_NETMASKS[random.randint(min_cidr, max_cidr)]
def gen_numeric_string(length=10):
"""Returns a random string made up of numbers.
:param int length: Length for random data.
:returns: A random string made up of numbers.
:rtype: str
"""
# Validate length argument
_is_positive_int(length)
output_string = u''.join(
random.choice(string.digits) for i in range(length)
)
return _make_unicode(output_string)
def gen_positive_integer():
"""Returns a random positive integer based on the current platform.
:returns: A random positive integer value.
:rtype: int
"""
min_value = 0
return gen_integer(min_value=min_value)
def gen_time():
"""Generates a random time.
:returns: A random ``datetime.time`` object.
"""
return datetime.time(
random.randint(0, 23),
random.randint(0, 59),
random.randint(0, 59),
random.randint(0, 999999),
)
def gen_url(scheme=None, subdomain=None, tlds=None):
"""Generates a random URL address
:param str scheme: Either http, https or ftp.
:param str subdomain: A valid subdmain
:param str tlds: A qualified top level domain name (e.g. 'com', 'net')
:raises: ``ValueError`` if arguments are not valid.
:returns: A random URL address.
:rtype: str
"""
# Regex for subdomain names
subdomainator = re.compile(r"^[a-zA-Z0-9][-\w.~]*$")
# Regex for URL scheme
schemenator = re.compile(r"^(https?|ftp)$")
# Regex for TLDS
tldsnator = re.compile(r"^[a-zA-Z]{1,3}$")
if scheme:
if schemenator.match(scheme) is None:
raise ValueError("Protocol {0} is not valid.".format(scheme))
else:
scheme = gen_choice(SCHEMES)
if subdomain:
if subdomainator.match(subdomain) is None:
raise ValueError("Subdomain {0} is invalid.".format(subdomain))
else:
subdomain = gen_choice(SUBDOMAINS)
if tlds:
if tldsnator.match(tlds) is None:
raise ValueError("TLDS name {0} is invalid.".format(tlds))
else:
tlds = gen_choice(TLDS)
url = u"{0}://{1}.{2}".format(scheme, subdomain, tlds)
return _make_unicode(url)
def gen_utf8(length=10):
"""Returns a random string made up of UTF-8 letters characters, as per
`RFC 3629`_.
:param int length: Length for random data.
:returns: A random string made up of ``UTF-8`` letters characters.
:rtype: str
.. _`RFC 3629`: http://www.rfc-editor.org/rfc/rfc3629.txt
"""
# Validate length argument
_is_positive_int(length)
return u''.join([random.choice(UNICODE_LETTERS) for _ in range(length)])
def gen_uuid():
"""Generates a UUID string (universally unique identifiers).
:returns: Returns a string representation for a UUID.
:rtype: str
"""
output_uuid = _make_unicode(str(uuid.uuid4()))
return output_uuid
def gen_html(length=10):
"""Returns a random string made up of html characters.
:param int length: Length for random data.
:returns: A random string made up of html characters.
:rtype: str
"""
# Validate length argument
_is_positive_int(length)
html_tag = random.choice(HTML_TAGS)
output_string = u'<{0}>{1}</{2}>'.format(
html_tag, gen_string("alpha", length), html_tag)
return _make_unicode(output_string)
# Backward Compatibility ------------------------------------------------------
# Code borrowed from http://code.activestate.com/recipes/391367-deprecated/
def deprecated(func):
"""A decorator used to mark functions as deprecated.
Emit a warning when the decorated function is called.
"""
@wraps(func)
def wrapper(*args, **kwargs):
"""Emit a warning, then call ``func``."""
old_name = func.__name__
if old_name == 'codify':
new_name = '_make_unicode'
else:
new_name = old_name.replace('generate', 'gen')
warnings.warn(
'{0} is deprecated! Please use {1} instead.'
.format(old_name, new_name),
category=Warning
)
return func(*args, **kwargs)
return wrapper
@deprecated
def codify(data):
# pylint:disable=missing-docstring
return _make_unicode(data)
class FauxFactory(object):
# This issue is no longer relevant, as the class has been turned into a set
# of functions.
# pylint:disable=too-many-public-methods
#
# This code is not imported when `from fauxfactory import *` is called, nor
# does this code show up in Sphinx's output. See `__all__`.
# pylint:disable=missing-docstring
@classmethod
@deprecated
def generate_string(cls, str_type, length):
return gen_string(str_type, length)
@classmethod
@deprecated
def generate_alpha(cls, length=10):
return gen_alpha(length)
@classmethod
@deprecated
def generate_alphanumeric(cls, length=10):
return gen_alphanumeric(length)
@classmethod
@deprecated
def generate_boolean(cls):
return gen_boolean()
@classmethod
@deprecated
def generate_choice(cls, choices):
return gen_choice(choices)
@classmethod
@deprecated
def generate_cjk(cls, length=10):
return gen_cjk(length)
@classmethod
@deprecated
def generate_date(cls, min_date=None, max_date=None):
return gen_date(min_date, max_date)
@classmethod
@deprecated
def generate_datetime(cls, min_date=None, max_date=None):
return gen_datetime(min_date, max_date)
@classmethod
@deprecated
def generate_email(cls, name=None, domain=None, tlds=None):
return gen_email(name, domain, tlds)
@classmethod
@deprecated
def generate_integer(cls, min_value=None, max_value=None):
return gen_integer(min_value, max_value)
@classmethod
@deprecated
def generate_iplum(cls, words=None, paragraphs=None):
return gen_iplum(words, paragraphs)
@classmethod
@deprecated
def generate_latin1(cls, length=10):
return gen_latin1(length)
@classmethod
@deprecated
def generate_negative_integer(cls):
return gen_negative_integer()
@classmethod
@deprecated
def generate_ipaddr(cls, ip3=False, ipv6=False):
return gen_ipaddr(ip3, ipv6)
@classmethod
@deprecated
def generate_mac(cls, delimiter=":"):
return gen_mac(delimiter)
@classmethod
@deprecated
def generate_numeric_string(cls, length=10):
return gen_numeric_string(length)
@classmethod
@deprecated
def generate_positive_integer(cls):
return gen_integer()
@classmethod
@deprecated
def generate_time(cls):
return gen_time()
@classmethod
@deprecated
def generate_url(cls, scheme=None, subdomain=None, tlds=None):
return gen_url(scheme, subdomain, tlds)
@classmethod
@deprecated
def generate_utf8(cls, length=10):
return gen_utf8(length)
@classmethod
@deprecated
def generate_uuid(cls):
return gen_uuid()
@classmethod
@deprecated
def generate_html(cls, length=10):
return gen_html(length)
| 28.703507 | 80 | 0.646501 | 3,555 | 27,010 | 4.787342 | 0.148242 | 0.017275 | 0.025501 | 0.041366 | 0.376873 | 0.296962 | 0.269111 | 0.241201 | 0.20812 | 0.177096 | 0 | 0.017959 | 0.249611 | 27,010 | 940 | 81 | 28.734043 | 0.821739 | 0.375046 | 0 | 0.273731 | 1 | 0 | 0.079902 | 0.001316 | 0 | 0 | 0.001504 | 0 | 0.004415 | 1 | 0.11479 | false | 0 | 0.024283 | 0.050773 | 0.258278 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0e38d2aaf004540b815fbb6471e43af110b9c1be | 562 | py | Python | AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_ttk_textonly.py | CEOALT1/RefindPlusUDK | 116b957ad735f96fbb6d80a0ba582046960ba164 | [
"BSD-2-Clause"
] | 2,757 | 2018-04-28T21:41:36.000Z | 2022-03-29T06:33:36.000Z | AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_ttk_textonly.py | CEOALT1/RefindPlusUDK | 116b957ad735f96fbb6d80a0ba582046960ba164 | [
"BSD-2-Clause"
] | 20 | 2019-07-23T15:29:32.000Z | 2022-01-21T12:53:04.000Z | AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_ttk_textonly.py | CEOALT1/RefindPlusUDK | 116b957ad735f96fbb6d80a0ba582046960ba164 | [
"BSD-2-Clause"
] | 449 | 2018-05-09T05:54:05.000Z | 2022-03-30T14:54:18.000Z | import os
from test import test_support
# Skip this test if _tkinter does not exist.
test_support.import_module('_tkinter')
this_dir = os.path.dirname(os.path.abspath(__file__))
lib_tk_test = os.path.abspath(os.path.join(this_dir, '..', 'lib-tk', 'test'))
with test_support.DirsOnSysPath(lib_tk_test):
import runtktests
def test_main():
with test_support.DirsOnSysPath(lib_tk_test):
test_support.run_unittest(
*runtktests.get_tests(gui=False, packages=['test_ttk']))
if __name__ == '__main__':
test_main()
| 28.1 | 78 | 0.704626 | 79 | 562 | 4.594937 | 0.443038 | 0.151515 | 0.099174 | 0.15427 | 0.203857 | 0.203857 | 0.203857 | 0 | 0 | 0 | 0 | 0 | 0.172598 | 562 | 19 | 79 | 29.578947 | 0.780645 | 0.074733 | 0 | 0.153846 | 0 | 0 | 0.072144 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.307692 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
0e3c6ee65a6dc7c61f1f0de5840c630be1c19d33 | 57,419 | py | Python | ovs/extensions/db/arakoon/arakoon/ArakoonManagement.py | mflu/openvstorage_centos | 280a98d3e5d212d58297e0ffcecd325dfecef0f8 | [
"Apache-2.0"
] | 1 | 2015-08-29T16:36:40.000Z | 2015-08-29T16:36:40.000Z | ovs/extensions/db/arakoon/arakoon/ArakoonManagement.py | rootfs-analytics/openvstorage | 6184822340faea1d2927643330a7aaa781d92d36 | [
"Apache-2.0"
] | null | null | null | ovs/extensions/db/arakoon/arakoon/ArakoonManagement.py | rootfs-analytics/openvstorage | 6184822340faea1d2927643330a7aaa781d92d36 | [
"Apache-2.0"
] | null | null | null | """
Copyright (2010-2014) INCUBAID BVBA
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from Compat import X
import os
import ArakoonRemoteControl
import os.path
import itertools
import subprocess
import time
import types
import signal
import string
import logging
import Arakoon
from ArakoonExceptions import ArakoonNodeNotLocal
def which_arakoon():
path = '/'.join([X.appDir,"arakoon/bin/arakoon"])
if X.fileExists(path):
return path
else:
return "arakoon"
class ArakoonManagement:
def getCluster(self, clusterName):
"""
@type clusterName: string
@return a helper to config that cluster
"""
return ArakoonCluster(clusterName)
def listClusters(self):
"""
Returns a list with the existing clusters.
"""
fn = '/'.join ([X.cfgDir, "arakoonclusters"])
config = X.getConfig(fn)
return config.sections()
def start(self):
"""
Starts all clusters.
"""
[clus.start() for clus in [self.getCluster(cluster) for cluster in self.listClusters()]]
def stop(self):
"""
Stops all clusters.
"""
[clus.stop() for clus in [self.getCluster(cluster) for cluster in self.listClusters()]]
def restart(self):
"""
Restarts all clusters.
"""
self.stop()
self.start()
class ArakoonCluster:
def __init__(self, clusterName):
self.__validateName(clusterName)
"""
There's a difference between the clusterId and the cluster's name.
The name is used to construct the path to find the config file.
the id is what's inside the cfg file and
what you need to provide to a client that want's to talk to the cluster.
"""
self._clusterName = clusterName
self._binary = which_arakoon()
self._arakoonDir = '/'.join([X.cfgDir, "arakoon"])
self._clustersFNH = '/'.join([X.cfgDir, 'arakoonclusters'])
clusterConfig = X.getConfig(self._clustersFNH)
if not clusterConfig.has_section(self._clusterName):
clusterPath = '/'.join([X.cfgDir,"qconfig", "arakoon", clusterName])
clusterConfig.add_section(clusterName)
clusterConfig.set(clusterName, "path", clusterPath)
if not X.fileExists(self._arakoonDir):
X.createDir(self._arakoonDir)
if not X.fileExists(clusterPath):
X.createDir(clusterPath)
X.writeConfig(clusterConfig, self._clustersFNH)
self._clusterPath = clusterConfig.get(clusterName, "path" )
def _servernodes(self):
return '%s_local_nodes' % self._clusterName
def __repr__(self):
return "<ArakoonCluster:%s>" % self._clusterName
def _getConfigFileName(self):
p = X.getConfig(self._clustersFNH)
if not p.has_section(self._clusterName):
raise Exception("%s not present in %s" % (self._clusterName, self._clustersFNH))
cfgDir = p.get( self._clusterName, "path", False)
cfgFile = '/'.join([cfgDir, self._clusterName])
return cfgFile
def _saveConfig(self,config):
fn = self._getConfigFileName()
X.writeConfig(config,fn)
def _getConfigFile(self):
h = self._getConfigFileName()
return X.getConfig(h)
def _getClusterId(self):
clusterId = self._clusterName
try:
config = self._getConfigFile()
clusterId = config.get("global", "cluster_id")
except:
logging.info("setting cluster_id to %s", clusterId)
config.set("global","cluster_id",clusterId)
return clusterId
def addBatchedTransactionConfig(self,
name,
max_entries = None,
max_size = None):
"""
Add a batched transaction config section to the configuration of the supplied cluster
@param name the name of the batched transaction config section
@param max_entries the maximum amount of entries before the batched store will persist the changes to tokyo cabinet; default is None, which results in 200.
@param max_size the maximum combined size of the entries (in bytes) before the batched store will persist the changes to tokyo cabinet; default is None, which results in 100_000.
"""
config = self._getConfigFile()
config.addSection(name)
if max_entries is not None:
config.set(name, "max_entries", max_entries)
if max_size is not None:
config.set(name, "max_size", max_size)
config.write()
def addLogConfig(self,
name,
client_protocol = None,
paxos = None,
tcp_messaging = None):
"""
Add a log config section to the configuration of the supplied cluster
@param name the name of the log config section
@param client_protocol the log level for the client_protocol log section
@param paxos the log level for the paxos log section
@param tcp_messaging the log level for the tcp_messaging log section
"""
config = self._getConfigFile()
config.addSection(name)
if client_protocol is not None:
config.set(name, "client_protocol", client_protocol)
if paxos is not None:
config.set(name, "paxos", paxos)
if tcp_messaging is not None:
config.set(name, "tcp_messaging", tcp_messaging)
config.write()
def addNode(self,
name,
ip = "127.0.0.1",
clientPort = 7080,
messagingPort = 10000,
logLevel = "info",
logDir = None,
home = None,
tlogDir = None,
wrapper = None,
isLearner = False,
targets = None,
isLocal = False,
logConfig = None,
batchedTransactionConfig = None,
tlfDir = None,
headDir = None,
isWitness = False,
collapseSlowdown = None):
"""
Add a node to the configuration of the supplied cluster
@param name : the name of the node, should be unique across the environment
@param ip : the ip(s) this node should be contacted on (string or string list)
@param clientPort : the port the clients should use to contact this node
@param messagingPort : the port the other nodes should use to contact this node
@param logLevel : the loglevel (debug info notice warning error fatal)
@param logDir : the directory used for logging
@param home : the directory used for the nodes data
@param tlogDir : the directory used for tlogs (if none, home will be used)
@param wrapper : wrapper line for the executable (for example 'softlimit -o 8192')
@param isLearner : whether this node is a learner node or not
@param targets : for a learner node the targets (string list) it learns from
@param isLocal : whether this node is a local node and should be added to the local nodes list
@param logConfig : specifies the log config to be used for this node
@param batchedTransactionConfig : specifies the batched transaction config to be used for this node
@param tlfDir : the directory used for tlfs (if none, tlogDir will be used)
@param headDir : the directory used for head.db (if none, tlfDir will be used)
@param isWitness : whether this node is a witness or not
@param collapseSlowdown : the factor with which collapsing should be slowed down
"""
self.__validateName(name)
self.__validateLogLevel(logLevel)
if isinstance(ip, basestring):
ip = [ip]
config = self._getConfigFile()
nodes = self.__getNodes(config)
if name in nodes:
raise Exception("node %s already present" % name )
if not isLearner:
nodes.append(name)
config.add_section(name)
config.set(name, "ip", ', '.join(ip))
self.__validateInt("clientPort", clientPort)
config.set(name, "client_port", clientPort)
self.__validateInt("messagingPort", messagingPort)
config.set(name, "messaging_port", messagingPort)
config.set(name, "log_level", logLevel)
if logConfig is not None:
config.set(name, "log_config", logConfig)
if batchedTransactionConfig is not None:
config.set(name, "batched_transaction_config", batchedTransactionConfig)
if wrapper is not None:
config.set(name, "wrapper", wrapper)
if logDir is None:
logDir = '/'.join([X.logDir, self._clusterName, name])
config.set(name, "log_dir", logDir)
if home is None:
home = '/'.join([X.varDir, "db", self._clusterName, name])
config.set(name, "home", home)
if tlogDir:
config.set(name,"tlog_dir", tlogDir)
if tlfDir:
config.set(name,"tlf_dir", tlfDir)
if headDir:
config.set(name,"head_dir", headDir)
if isLearner:
config.set(name, "learner", "true")
if targets is None:
targets = self.listNodes()
config.set(name, "targets", string.join(targets,","))
if isWitness:
config.set(name, "witness", "true")
if collapseSlowdown:
config.set(name, "collapse_slowdown", collapseSlowdown)
if not config.has_section("global") :
config.add_section("global")
config.set("global", "cluster_id", self._clusterName)
config.set("global","cluster", ",".join(nodes))
self._saveConfig(config)
if isLocal:
self.addLocalNode(name)
def removeNode(self, name):
"""
Remove a node from the configuration of the supplied cluster
@param name the name of the node as configured in the config file
"""
self.__validateName(name)
config = self._getConfigFile()
nodes = self.__getNodes(config)
if name in nodes:
self.removeLocalNode(name)
config.remove_section(name)
nodes.remove(name)
config.set("global","cluster", ",".join(nodes))
self._saveConfig(config)
return
raise Exception("No node with name %s" % name)
def setMasterLease(self, duration=None):
"""
Set the master lease duration in the supplied cluster
@param duration The duration of the master lease in seconds
"""
section = "global"
key = "lease_period"
config = self._getConfigFile()
if not config.has_section( section ):
raise Exception("Section '%s' not found in config" % section )
if duration:
if not isinstance( duration, int ) :
raise AttributeError( "Invalid value for lease duration (expected int, got: '%s')" % duration)
config.set(section, key, duration)
else:
config.remove_option(section, key)
self._saveConfig(config)
def forceMaster(self, name=None, preferred = False):
"""
Force a master in the supplied cluster
@param name the name of the master to force. If None there is no longer a forced master
@param preferred: Set given node to be preferred master
@type preferred: `bool`
"""
config = self._getConfigFile()
g = 'global'
pm = 'preferred_master'
m = 'master'
if name:
nodes = self.__getNodes(config)
self.__validateName(name)
if not name in nodes:
raise Exception("No node with name %s configured in cluster %s" % (name,self._clusterName) )
config.set(g,m,name)
if preferred:
config.set(g,pm,'true')
else:
config.remove_option(g, m)
if config.has_option(g, pm):
config.remove_option(g, pm)
self._saveConfig(config)
def preferredMasters(self, nodes):
'''
Set a list of preferred master nodes
When the given list is empty, the configuration item is unset.
Since this option is incompatible with a fixed master, this method will
- raise an exception if 'master' is set and 'preferred_master' is false
(or not set, which defaults to false)
- unset 'master' and 'preferred_master' if both are set and
'preferred_master' is true
@param nodes: Names of preferred master nodes
@type nodes: `list` of `str`
'''
if isinstance(nodes, basestring):
raise TypeError('Expected list of strings, not string')
config = self._getConfigFile()
if not nodes:
if config.has_option('global', 'preferred_masters'):
config.remove_option('global', 'preferred_masters')
self._saveConfig(config)
return
section = 'global'
master = 'master'
preferred_master = 'preferred_master'
# Check existing master/preferred_master configuration. Bail out if
# incompatible.
if config.has_option(section, master):
preferred_master_setting = \
config.get(section, preferred_master).lower() \
if config.has_option(section, preferred_master) \
else 'false'
if preferred_master_setting != 'true':
raise Exception(
'Can\'t set both \'master\' and \'preferred_masters\'')
# If reached, 'master' was set and 'preferred_master' was true.
# We're free to remove both, since they're replaced by the
# 'preferred_masters' setting.
config.remove_option(section, master)
if config.has_option(section, preferred_master):
config.remove_option(section, preferred_master)
# Set up preferred_masters
preferred_masters = 'preferred_masters'
config.set(section, preferred_masters, ', '.join(nodes))
self._saveConfig(config)
def setLogConfig(self, logConfig, nodes=None):
if nodes is None:
nodes = self.listNodes()
else:
for n in nodes :
self.__validateName( n )
config = self._getConfigFile()
for n in nodes:
config.set(n, "log_config", logConfig)
config.write()
def setLogLevel(self, level, nodes=None):
if nodes is None:
nodes = self.listNodes()
else:
for n in nodes :
self.__validateName( n )
self.__validateLogLevel( level )
config = self._getConfigFile()
for n in nodes:
config.set( n, "log_level", level )
self._saveConfig(config)
def setCollapseSlowdown(self, collapseSlowdown, nodes=None):
if nodes is None:
nodes = self.listNodes()
else:
for n in nodes :
self.__validateName( n )
config = self._getConfigFile()
for n in nodes:
if collapseSlowdown:
config.set(n, "collapse_slowdown", collapseSlowdown)
else:
config.remove_option(n, "collapse_slowdown")
self._saveConfig(config)
def _setTlogCompression(self,nodes, compressor):
if nodes is None:
nodes = self.listNodes()
else:
for n in nodes:
self.__validateName(n)
config = self._getConfigFile()
for n in nodes:
config.remove_option(n, "disable_tlog_compression")
config.set(n, "tlog_compression", compressor)
self._saveConfig(config)
def enableTlogCompression(self, nodes=None, compressor='bz2'):
"""
Enables tlog compression for the given nodes (this is enabled by default)
@param nodes List of node names
@param compressor one of 'bz2', 'snappy', 'none'
"""
self._setTlogCompression(nodes,compressor)
def disableTlogCompression(self, nodes=None):
"""
Disables tlog compression for the given nodes
@param nodes List of node names
"""
self._setTlogCompression(nodes,"none")
def _changeFsync(self, nodes, value):
if nodes is None:
nodes = self.listNodes()
else:
for n in nodes:
self.__validateName(n)
config = self._getConfigFile()
for node in nodes:
config.set(node, 'fsync', value)
self._saveConfig(config)
def enableFsync(self, nodes=None):
'''Enable fsync'ing of tlogs after every operation'''
self._changeFsync(nodes, 'true')
def disableFsync(self, nodes=None):
'''Disable fsync'ing of tlogs after every operation'''
self._changeFsync(nodes, 'false')
def setTLSCACertificate(self, ca_cert_path):
'''Configure path to TLS CA certificate
This corresponds to the `tls_ca_cert` entry in the `global` section.
Set to `None` to unset/disable.
The path should point to a valid file, otherwise a `ValueError` will be
raised.
:param ca_cert_path: Path to CA certificate
:type ca_cert_path: `str`
'''
global_ = 'global'
tls_ca_cert = 'tls_ca_cert'
config = self._getConfigFile()
if ca_cert_path is None:
if config.has_option(global_, tls_ca_cert):
config.remove_option(global_, tls_ca_cert)
self._saveConfig(config)
return
if not os.path.isfile(ca_cert_path):
raise ValueError(
'Invalid ca_cert_path \'%s\': no such file' % ca_cert_path)
config.set(global_, tls_ca_cert, ca_cert_path)
self._saveConfig(config)
def enableTLSService(self):
'''Enable TLS on the client service
This corresponds to the `tls_service` entry in the `global` section.
Note `tls_ca_cert` should be configured before calling this method,
otherwise an `Exception` will be raised.
'''
global_ = 'global'
tls_ca_cert = 'tls_ca_cert'
tls_service = 'tls_service'
config = self._getConfigFile()
if not config.has_option(global_, tls_ca_cert):
raise Exception('No tls_ca_cert configured')
if config.has_option(global_, tls_service):
config.remove_option(global_, tls_service)
config.set(global_, tls_service, 'true')
self._saveConfig(config)
def disableTLSService(self):
'''Disable TLS on the client service
This corresponds to the `tls_service` entry in the `global` section.
'''
global_ = 'global'
tls_service = 'tls_service'
config = self._getConfigFile()
if config.has_option(global_, tls_service):
config.remove_option(global_, tls_service)
self._saveConfig(config)
def enableTLSServiceValidatePeer(self):
'''Enable TLS peer verification on the client service
This corresponds to the `tls_service_validate_peer` entry in the
`global` section.
Note `tls_service` should be enabled before calling this method,
otherwise an `Exception` is raised.
'''
global_ = 'global'
tls_service = 'tls_service'
tls_service_validate_peer = 'tls_service_validate_peer'
config = self._getConfigFile()
if (not config.has_option(global_, tls_service)) \
or (config.get(global_, tls_service).lower() != 'true'):
raise Exception('tls_service not enabled')
if config.has_option(global_, tls_service_validate_peer):
config.remove_option(global_, tls_service_validate_peer)
config.set(global_, tls_service_validate_peer, 'true')
self._saveConfig(config)
def disableTLSServiceValidatePeer(self):
'''Disable TLS peer verification on the client service
This corresponds to the `tls_service_validate_peer` entry in the
`global` section.
'''
global_ = 'global'
tls_service_validate_peer = 'tls_service_validate_peer'
config = self._getConfigFile()
if config.has_option(global_, tls_service_validate_peer):
config.remove_option(global_, tls_service_validate_peer)
self._saveConfig(config)
def setTLSCertificate(self, node, cert_path, key_path):
'''Set the TLS certificate & key paths for a node
This corresponds to the `tls_cert` and `tls_key` entries in a node
section.
Set both `cert_path` and `key_path` to `None` to unset the setting and
disable TLS usage.
Both paths should point to valid files, otherwise a `ValueError` is
raised.
`tls_ca_cert` should be configured before calling this method,
otherwise an `Exception` is raised.
:param node: Node name
:type node: `str`
:param cert_path: Path to node certificate file
:type cert_path: `str`
:param key_path: Path to node key file
:type key_path: `str`
'''
self.__validateName(node)
if cert_path is None and key_path is not None:
raise ValueError('cert_path is None but key_path isn\'t')
if cert_path is not None and key_path is None:
raise ValueError('key_path is None but cert_path isn\'t')
global_ = 'global'
tls_ca_cert = 'tls_ca_cert'
tls_cert = 'tls_cert'
tls_key = 'tls_key'
config = self._getConfigFile()
if cert_path is None and key_path is None:
if config.has_option(node, tls_cert):
config.remove_option(node, tls_cert)
if config.has_option(node, tls_key):
config.remove_option(node, tls_key)
self._saveConfig(config)
return
if not config.has_option(global_, tls_ca_cert):
raise Exception('No tls_ca_cert configured')
if not os.path.isfile(cert_path):
raise ValueError(
'Invalid cert_path \'%s\': no such file' % cert_path)
if not os.path.isfile(key_path):
raise ValueError(
'Invalid key_path \'%s\': no such file' % key_path)
if config.has_option(node, tls_cert):
config.remove_option(node, tls_cert)
if config.has_option(node, tls_key):
config.remove_option(node, tls_key)
config.set(node, tls_cert, cert_path)
config.set(node, tls_key, key_path)
self._saveConfig(config)
def setReadOnly(self, flag = True):
config = self._getConfigFile()
if flag and len(self.listNodes()) <> 1:
raise Exception("only for clusters of size 1")
g = "global"
p = "readonly"
if config.has_option(g,p):
config.remove_option(g, p)
if flag :
config.set(g, p, "true")
self._saveConfig(config)
def setQuorum(self, quorum=None):
"""
Set the quorum for the supplied cluster
The quorum dictates on how many nodes need to acknowledge the new value before it becomes accepted.
The default is (nodes/2)+1
@param quorum the forced quorum. If None, the default is used
"""
config = self._getConfigFile()
if quorum:
try :
if ( int(quorum) != quorum or
quorum < 0 or
quorum > len( self.listNodes())) :
raise Exception ( "Illegal value for quorum %s" % quorum )
except:
raise Exception("Illegal value for quorum %s " % quorum)
config.set("global", "quorum", int(quorum))
else:
config.remove("global", "quorum")
self._saveConfig(config)
def getClientConfig(self):
"""
Get an object that contains all node information in the supplied cluster
@return dict the dict can be used as param for the ArakoonConfig object
"""
config = self._getConfigFile()
clientconfig = dict()
nodes = self.__getNodes(config)
for name in nodes:
ips = config.get(name, "ip")
ip_list = map(lambda x: x.strip(), ips.split(","))
port = int(config.get(name, "client_port"))
clientconfig[name] = (ip_list, port)
return clientconfig
def getClient(self):
config = self.getClientConfig()
id = self._getClusterId()
client = Arakoon.ArakoonClient(Arakoon.ArakoonClientConfig(id, config))
return client
def listNodes(self):
"""
Get a list of all node names in the supplied cluster
@return list of strings containing the node names
"""
config = self._getConfigFile()
return self.__getNodes(config)
def getNodeConfig(self,name):
"""
Get the parameters of a node section
@param name the name of the node
@return dict keys and values of the nodes parameters
"""
self.__validateName(name)
config = self._getConfigFile()
nodes = self.__getNodes(config)
if config.has_section(name):
d = {}
for option in config.options(name):
d[option] = config.get(name,option,False)
return d
else:
raise Exception("No node with name %s configured" % name)
def createDirs(self, name):
"""
Create the Directories for a local arakoon node in the supplied cluster
@param name the name of the node as configured in the config file
"""
self.__validateName(name)
config = self._getConfigFile()
if config.has_section(name):
home = config.get(name, "home")
X.createDir(home)
if config.has_option(name, "tlog_dir"):
tlogDir = config.get(name, "tlog_dir")
X.createDir(tlogDir)
if config.has_option(name, "tlf_dir"):
tlfDir = config.get(name, "tlf_dir")
X.createDir(tlfDir)
if config.has_option(name, "head_dir"):
headDir = config.get(name, "head_dir")
X.createDir(headDir)
logDir = config.get(name, "log_dir")
X.createDir(logDir)
return
msg = "No node %s configured" % name
raise Exception(msg)
def removeDirs(self, name):
"""
Remove the Directories for a local arakoon node in the supplied cluster
@param name the name of the node as configured in the config file
"""
self.__validateName(name)
config = self._getConfigFile()
nodes = self.__getNodes(config)
if name in nodes:
home = config.get(name, "home")
X.removeDirTree(home)
if config.has_option(name, "tlog_dir"):
tlogDir = config.get(name, "tlog_dir")
X.removeDirTree(tlogDir)
logDir = config.get(name, "log_dir")
X.removeDirTree(logDir)
return
raise Exception("No node %s" % name )
def addLocalNode(self, name):
"""
Add a node to the list of nodes that have to be started locally
from the supplied cluster
@param name the name of the node as configured in the config file
"""
self.__validateName(name)
config = self._getConfigFile()
nodes = self.__getNodes(config)
config_name = self._servernodes()
if config.has_section(name):
config_name_path = '/'.join([self._clusterPath, config_name])
nodesconfig = X.getConfig(config_name_path)
if not nodesconfig.has_section("global"):
nodesconfig.add_section("global")
nodesconfig.set("global","cluster", "")
nodes = self.__getNodes(nodesconfig)
if name in nodes:
raise Exception("node %s already present" % name)
nodes.append(name)
nodesconfig.set("global","cluster", ",".join(nodes))
X.writeConfig(nodesconfig,config_name_path)
return
raise Exception("No node %s" % name)
def removeLocalNode(self, name):
"""
Remove a node from the list of nodes that have to be started locally
from the supplied cluster
@param name the name of the node as configured in the config file
"""
self.__validateName(name)
config_name = self._servernodes()
config_name_path = '/'.join([self._clusterPath, config_name])
config = X.getConfig(config_name_path)
if not config.has_section("global"):
return
node_str = config.get("global", "cluster").strip()
nodes = node_str.split(',')
if name in nodes:
nodes.remove(name)
node_str = ','.join(nodes)
config.set("global","cluster", node_str)
X.writeConfig(config, config_name_path)
def listLocalNodes(self):
"""
Get a list of the local nodes in the supplied cluster
@return list of strings containing the node names
"""
config_name = self._servernodes()
config_name_path = '/'.join([self._clusterPath, config_name])
config = X.getConfig(config_name_path)
return self.__getNodes(config)
def setUp(self, numberOfNodes, basePort = 7080):
"""
Sets up a local environment
@param numberOfNodes the number of nodes in the environment
@return the dict that can be used as a param for the ArakoonConfig object
"""
cid = self._clusterName
clientPort = basePort
messagingPort = basePort + 1
for i in range(0, numberOfNodes):
nodeName = "%s_%i" %(cid, i)
self.addNode(name = nodeName,
clientPort = clientPort,
messagingPort = messagingPort)
self.addLocalNode(nodeName)
self.createDirs(nodeName)
clientPort += 10
messagingPort += 10
if numberOfNodes > 0:
self.forceMaster("%s_0" % cid)
config = self._getConfigFile()
config.set( 'global', 'cluster_id', cid)
self._saveConfig(config)
def tearDown(self, removeDirs=True ):
"""
Tears down a local environment
@param removeDirs remove the log and home dir
@param cluster the name of the arakoon cluster
"""
config = self._getConfigFile()
nodes = self.__getNodes(config)
for node in nodes:
if removeDirs:
self.removeDirs(node)
self.removeNode(node)
if self.__getForcedMaster(config):
self.forceMaster(None)
self.remove()
def remove(self):
clients_fn = "%s/%s" % (X.cfgDir, "arakoonclients")
clientConf = X.getConfig(clients_fn)
clientConf.remove_section(self._clusterName)
X.writeConfig(clientConf,clients_fn)
fn = self._clustersFNH
clusterConf = X.getConfig(fn)
clusterConf.remove_section(self._clusterName)
X.writeConfig(clusterConf, fn)
X.removeDirTree(self._clusterPath)
def __getForcedMaster(self, config):
if not config.has_section("global"):
return []
if config.has_option("global", "master"):
return config.get("global", "master").strip()
else:
return []
def __getNodes(self, config):
if not config.has_section("global"):
return []
nodes = []
try:
if config.has_option("global", "cluster"):
line = config.get("global", "cluster").strip()
# "".split(",") -> ['']
if line == "":
nodes = []
else:
nodes = line.split(",")
nodes = map(lambda x: x.strip(), nodes)
else:
nodes = []
except LookupError:
pass
return nodes
def __validateInt(self,name, value):
typ = type(value)
if not typ == type(1):
raise Exception("%s=%s (type = %s) but should be an int" % (name, value, typ))
def __validateName(self, name):
if name is None or name.strip() == "":
raise Exception("A name should be passed. An empty name is not an option")
if not type(name) == type(str()):
raise Exception("Name should be of type string")
for char in [' ', ',', '#']:
if char in name:
raise Exception("name should not contain %s" % char)
def __validateLogLevel(self, name):
if not name in ["info", "debug", "notice", "warning", "error", "fatal"]:
raise Exception("%s is not a valid log level" % name)
def start(self):
"""
start all nodes in the cluster
"""
rcs = {}
for name in self.listLocalNodes():
rcs[name] = self._startOne(name)
return rcs
def stop(self):
"""
stop all nodes in the supplied cluster
@param cluster the arakoon cluster name
"""
rcs = {}
for name in self.listLocalNodes():
rcs[name] = self._stopOne(name)
return rcs
def restart(self):
"""
Restart all nodes in the supplied cluster
"""
rcs = {}
for name in self.listLocalNodes():
rcs[name] = self._restartOne(name)
return rcs
def getStatus(self):
"""
Get the status the cluster's nodes running on this machine
@return dict node name -> status (AppStatusType)
"""
status = {}
for name in self.listLocalNodes():
status[name] = self._getStatusOne(name)
return status
def _requireLocal(self, nodeName):
if not nodeName in self.listLocalNodes():
raise ArakoonNodeNotLocal( nodeName)
def startOne(self, nodeName):
"""
Start the node with a given name
@param nodeName The name of the node
"""
self._requireLocal(nodeName)
return self._startOne(nodeName)
def catchupOnly(self, nodeName):
"""
make the node catchup, but don't start it.
(This is handy if you want to minimize downtime before you,
go from a 1 node setup to a 2 node setup)
"""
self._requireLocal(nodeName)
cmd = [self._binary,
'-config',
'%s/%s.cfg' % (self._clusterPath, self._clusterName),
'--node',
nodeName,
'-catchup-only']
return subprocess.call(cmd)
def stopOne(self, nodeName):
"""
Stop the node with a given name
@param nodeName The name of the node
"""
self._requireLocal(nodeName)
return self._stopOne(nodeName)
def remoteCollapse(self, nodeName, n):
"""
Tell the targetted node to collapse all but n tlog files
@type nodeName: string
@type n: int
"""
config = self.getNodeConfig(nodeName)
ip_mess = config['ip']
ip = self._getIp(ip_mess)
port = int(config['client_port'])
clusterId = self._getClusterId()
ArakoonRemoteControl.collapse(ip,port,clusterId, n)
def copyDbToHead(self, nodeName, n):
"""
Tell the targetted node to take a copy of it's db to be used as head, removing all but n tlogs
@type nodeName: string
@type n: int
"""
config = self.getNodeConfig(nodeName)
ip_mess = config['ip']
ip = self._getIp(ip_mess)
port = int(config['client_port'])
clusterId = self._getClusterId()
ArakoonRemoteControl.copyDbToHead(ip,port,clusterId, n)
def optimizeDb(self, nodeName):
"""
Tell a node to optimize its database (only works on slaves)
@param nodeName The name of the node you want to optimize
@return void
"""
config = self.getNodeConfig(nodeName)
ip_mess = config['ip']
ip = self._getIp(ip_mess)
port = int(config['client_port'])
clusterId = self._getClusterId()
ArakoonRemoteControl.optimizeDb(ip,port, clusterId)
def injectAsHead(self, nodeName, newHead, force=False, inPlace=False):
"""
tell the node to use the file as its new head database
@param nodeName The (local) node where you want to inject the database
@param newHead a database file that can serve as head
@param force forces the database to be injected even when the current head is corrupt
@param inPlace Use in-place rename instead of copying `newHead`
@return Return code of inject-as-head call
"""
self._requireLocal(nodeName)
cmd = [self._binary,'--inject-as-head', newHead, nodeName, '-config',
'%s/%s.cfg' % (self._clusterPath, self._clusterName) ]
if force:
cmd.append('--force')
if inPlace:
cmd.append('--inplace')
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
output = p.communicate()[0]
rc = p.returncode
logging.debug("injectAsHead returned [%d] %s", rc, output)
return rc
def defragDb(self, nodeName):
"""
Tell a node to defrag its database (only works on slaves)
@param nodeName The name of the node you want to optimize
@return void
"""
config = self.getNodeConfig(nodeName)
ip_mess = config['ip']
ip = self._getIp(ip_mess)
port = int(config['client_port'])
clusterId = self._getClusterId()
ArakoonRemoteControl.defragDb(ip,port, clusterId)
def dropMaster(self, nodeName):
"""
Request a node to drop its master role
@param nodeName The name of the node you want to drop its master role
@return void
"""
config = self.getNodeConfig(nodeName)
ip_mess = config['ip']
ip = self._getIp(ip_mess)
port = int(config['client_port'])
clusterId = self._getClusterId()
ArakoonRemoteControl.dropMaster(ip,port, clusterId)
def flushStore(self, nodeName):
"""
Request a node to flush its batched store to disk
@param nodeName The name of the node you want to perform the flush of its store
@return void
"""
config = self.getNodeConfig(nodeName)
ip_mess = config['ip']
ip = self._getIp(ip_mess)
port = int(config['client_port'])
clusterId = self._getClusterId()
ArakoonRemoteControl.flushStore(ip,port, clusterId)
def restartOne(self, nodeName):
"""
Restart the node with a given name in the supplied cluster
@param nodeName The name of the node
"""
self._requireLocal( nodeName)
return self._restartOne(nodeName)
def getStatusOne(self, nodeName):
"""
Get the status node with a given name in the supplied cluster
@param nodeName The name of the node
"""
self._requireLocal(nodeName)
return self._getStatusOne(nodeName)
def backupDb(self, nodeName, location):
"""
Make a backup the live database to the specified file
@param nodeName The name of the node you want to backup
@param location The path to the file where the backup should be stored
@return void
"""
config = self.getNodeConfig(nodeName)
ip_mess = config['ip']
ip = self._getIp(ip_mess)
port = int(config['client_port'])
clusterId = self._getClusterId()
ArakoonRemoteControl.downloadDb(ip,port,clusterId, location)
def _cmd(self, name):
r = [self._binary,'--node',name,'-config',
'%s/%s.cfg' % (self._clusterPath, self._clusterName),
'-start']
return r
def _cmdLine(self, name):
cmd = self._cmd(name)
cmdLine = string.join(cmd, ' ')
return cmdLine
def _startOne(self, name):
if self._getStatusOne(name) == X.AppStatusType.RUNNING:
return
config = self.getNodeConfig(name)
cmd = []
if 'wrapper' in config :
wrapperLine = config['wrapper']
cmd = wrapperLine.split(' ')
command = self._cmd(name)
cmd.extend(command)
cmd.append('-daemonize')
logging.debug('calling: %s', str(cmd))
return subprocess.call(cmd, close_fds = True)
def _getIp(self,ip_mess):
t_mess = type(ip_mess)
if t_mess == types.StringType:
parts = ip_mess.split(',')
ip = string.strip(parts[0])
return ip
elif t_mess == types.ListType:
return ip_mess[0]
else:
raise Exception("should '%s' be a string or string list")
def _stopOne(self, name):
line = self._cmdLine(name)
cmd = ['pkill', '-f', line]
logging.debug("stopping '%s' with: %s",name, string.join(cmd, ' '))
rc = subprocess.call(cmd, close_fds = True)
logging.debug("%s=>rc=%i" % (cmd,rc))
i = 0
while(self._getStatusOne(name) == X.AppStatusType.RUNNING):
rc = subprocess.call(cmd, close_fds = True)
logging.debug("%s=>rc=%i" % (cmd,rc))
time.sleep(1)
i += 1
logging.debug("'%s' is still running... waiting" % name)
if i == 10:
msg = "Requesting '%s' to dump crash log information" % name
logging.debug(msg)
X.subprocess.call(['pkill', '-%d' % signal.SIGUSR2, '-f', line], close_fds=True)
time.sleep(1)
logging.debug("stopping '%s' with kill -9" % name)
rc = X.subprocess.call(['pkill', '-9', '-f', line], close_fds = True)
if rc == 0:
rc = 9
cnt = 0
while (self._getStatusOne(name) == X.AppStatusType.RUNNING ) :
logging.debug("'%s' is STILL running... waiting" % name)
time.sleep(1)
cnt += 1
if( cnt > 10):
break
break
else:
X.subprocess.call(cmd, close_fds=True)
if rc < 9:
rc = 0 # might be we looped one time too many.
return rc
def _restartOne(self, name):
self._stopOne(name)
return self._startOne(name)
def _getPid(self, name):
if self._getStatusOne(name) == X.AppStatusType.HALTED:
return None
line = self._cmdLine(name)
cmd = ['pgrep', '-o' ,'-f' , line]
try:
stdout = X.subprocess.check_output( cmd )
return int(stdout)
except:
return None
def _getStatusOne(self,name):
line = self._cmdLine(name)
cmd = ['pgrep','-fn', line]
proc = subprocess.Popen(cmd,
close_fds = True,
stdout=subprocess.PIPE)
pids = proc.communicate()[0]
pid_list = pids.split()
lenp = len(pid_list)
result = None
if lenp == 1:
result = X.AppStatusType.RUNNING
elif lenp == 0:
result = X.AppStatusType.HALTED
else:
for pid in pid_list:
try:
f = open('/proc/%s/cmdline' % pid,'r')
startup = f.read()
f.close()
logging.debug("pid=%s; cmdline=%s", pid, startup)
except:
pass
raise Exception("multiple matches", pid_list)
return result
def getStorageUtilization(self, node = None):
"""Calculate and return the disk usage of the supplied arakoon cluster on the system
When no node name is given, the aggregate consumption of all nodes
configured in the supplied cluster on the system is returned.
Return format is a dictionary containing 3 keys: 'db', 'tlog' and
'log', whose values denote the size of database files
(*.db, *.db.wall), TLog files (*.tlc, *.tlog) and log files (*).
:param node: Name of the node to check
:type node: `str`
:param cluster: Name of the arakoon cluster
:type cluster: `str`
:return: Storage utilization of the node(s)
:rtype: `dict`
:raise ValueError: No such local node
"""
local_nodes = self.listLocalNodes()
if node is not None and node not in local_nodes:
raise ArakoonNodeNotLocal ( node )
def helper(config):
home = config['home']
log_dir = config['log_dir']
real_tlog_dir = config.get('tlog_dir', home)
tlf_dir = config.get('tlf_dir', real_tlog_dir)
head_dir = config.get('head_dir', real_tlog_dir)
tlog_dirs = set([real_tlog_dir, tlf_dir])
# 'head_dir' might have a place in here, but head.db wasn't counted
# before (in most cases), so...
db_dirs = set([home])
log_dirs = set([log_dir])
files_in_dir = lambda dir_: itertools.ifilter(os.path.isfile,
(os.path.join(dir_, name) for name in os.listdir(dir_)))
files_in_dirs = lambda dirs: itertools.chain(*(files_in_dir(dir_)
for dir_ in dirs))
matching_files = lambda *exts: lambda files: \
(file_ for file_ in files
if any(file_.endswith(ext) for ext in exts))
tlog_files = matching_files('.tlc', '.tlog','.tlf')
db_files = matching_files('.db', '.db.wal')
log_files = matching_files('') # Every string ends with ''
sum_size = lambda files: sum(os.path.getsize(file_)
for file_ in files)
return {
'tlog': sum_size(tlog_files(files_in_dirs(tlog_dirs))),
'db': sum_size(db_files(files_in_dirs(db_dirs))),
'log': sum_size(log_files(files_in_dirs(log_dirs)))
}
nodes = (node, ) if node is not None else local_nodes
stats = (helper(self.getNodeConfig(node)) for node in nodes)
result = {}
for stat in stats:
for key, value in stat.iteritems():
result[key] = result.get(key, 0) + value
return result
def gatherEvidence(self,
destination,
clusterCredentials=None,
includeLogs=True,
includeDB=True,
includeTLogs=True,
includeConfig=True, test = False):
"""
@param destination : path INCLUDING FILENAME where the evidence archive is saved. Can be URI, in other words, ftp://..., smb://, /tmp, ...
@param clusterCredentials : dict of tuples e.g. {"node1" ('login', 'password'), "node2" ('login', 'password'), "node3" ('login', 'password')}
@param includeLogs : Boolean value indicating that the logs need to be included in the evidence archive, default is True
@param includeDB : Boolean value indicating that the Tokyo Cabinet db and db.wall files need to be included in the evidence archive, default is True
@param includeTLogs : Boolean value indicating that the tlogs need to be included in the evidence archive, default is True
@param includeConfig : Boolean value indicating that the arakoon configuration files should be included in the resulting archive
"""
nodes_list = self.listNodes()
diff_list = self.listNodes()
if q.qshellconfig.interactive or test:
if not clusterCredentials:
clusterCredentials = self._getClusterCredentials(nodes_list,diff_list,test)
elif len(clusterCredentials) < len(nodes_list):
nodes_list = [x for x in nodes_list if x not in clusterCredentials]
diff_list = [x for x in nodes_list if x not in clusterCredentials]
sub_clusterCredentials = self._getClusterCredentials(nodes_list, diff_list, test)
clusterCredentials.update(sub_clusterCredentials)
else:
q.gui.dialog.message("All Nodes have Credentials.")
self._transferFiles(destination,
clusterCredentials,
includeLogs,
includeDB,
includeTLogs,
includeConfig)
else:
if not clusterCredentials or len(clusterCredentials) < len(nodes_list):
raise NameError('Error: QShell is Not interactive')
else:
q.gui.dialog.message("All Nodes have Credentials.")
self._transferFiles(destination,
clusterCredentials,
includeLogs,
includeDB,
includeTLogs,
includeConfig)
def _getClusterCredentials(self,
nodes_list,
diff_list, test):
clusterCredentials = dict()
same_credentials_nodes = list()
for nodename in nodes_list:
node_passwd = ''
if not test:
if nodename in diff_list:
node_config = self.getNodeConfig(nodename)
node_ip_mess = node_config['ip']
node_ip = self._getIp(node_ip_mess)
node_login = q.gui.dialog.askString("Please provide login name for %s @ %s default 'root'" % (nodename, node_ip))
if node_login == '':
node_login = 'root'
while node_passwd == '':
node_passwd = q.gui.dialog.askPassword('Please provide password for %s @ %s' % (nodename, node_ip))
if node_passwd == '':
q.gui.dialog.message("Error: Password is Empty.")
clusterCredentials[nodename] = (node_login, node_passwd)
if len(diff_list) > 1:
same_credentials = q.gui.dialog.askYesNo('Do you want to set the same credentials for any other node?')
diff_list.remove(nodename)
if same_credentials:
same_credentials_nodes = q.gui.dialog.askChoiceMultiple("Please choose node(s) that will take same credentials:",diff_list)
for node in same_credentials_nodes:
clusterCredentials[node] = (node_login, node_passwd)
#end for
if len(same_credentials_nodes) == len(diff_list):
break
else:
diff_list = list(set(diff_list).difference(set(same_credentials_nodes)))
if test:
clusterCredentials[nodename] = ('hudson', 'hudson')
#end for
return clusterCredentials
def _transferFiles(self,
destination,
clusterCredentials,
includeLogs=True,
includeDB=True,
includeTLogs=True,
includeConfig=True):
"""
This function copies the logs, db, tlog and config files to a Temp folder on the machine running the script then compresses the Temp
folder and places a copy at the destination provided at the beginning
"""
nodes_list = self.listNodes()
archive_name = self._clusterName + "_cluster_details"
archive_folder = q.system.fs.joinPaths(q.dirs.tmpDir , archive_name)
cfs = q.cloud.system.fs
sfs = q.system.fs
for nodename in nodes_list:
node_folder = sfs.joinPaths( archive_folder, nodename)
sfs.createDir(node_folder)
configDict = self.getNodeConfig(nodename)
source_ip_mess = configDict['ip']
source_ip = self._getIp(source_ip_mess)
userName = clusterCredentials[nodename][0]
password = clusterCredentials[nodename][1]
source_path = 'sftp://' + userName + ':' + password + '@' + source_ip
if includeDB:
db_files = cfs.listDir( source_path + configDict['home'] )
files2copy = filter ( lambda fn : fn.startswith( nodename ), db_files )
for fn in files2copy :
full_db_file = source_path + configDict['home'] + "/" + fn
cfs.copyFile(full_db_file , 'file://' + node_folder)
if includeLogs:
for fname in cfs.listDir(source_path + configDict['log_dir']):
if fname.startswith(nodename):
fileinlog = q.system.fs.joinPaths(configDict['log_dir'] ,fname)
cfs.copyFile(source_path + fileinlog, 'file://' + node_folder)
if includeTLogs:
source_dir = None
if configDict.has_key('tlog_dir'):
source_dir = configDict['tlog_dir']
else:
source_dir = configDict['home']
full_source_dir = source_path + source_dir
for fname in q.cloud.system.fs.listDir( full_source_dir ):
if fname.endswith('.tlog') or fname.endswith('.tlc') or fname.endswith('.tlf'):
tlogfile = q.system.fs.joinPaths(source_dir ,fname)
cfs.copyFile(source_path + tlogfile, 'file://' + node_folder)
clusterId = self._clusterName + '.cfg'
clusterNodes = self._clusterName + '_local_nodes.cfg'
clusterPath = '/'.join(self._clusterPath, clusterId)
q.cloud.system.fs.copyFile(source_path + clusterPath, 'file://' + node_folder)
clusterNodesPath = q.system.fs.joinPaths(self._clusterPath, clusterNodes)
if q.cloud.system.fs.sourcePathExists('file://' + clusterNodesPath):
q.cloud.system.fs.copyFile(source_path + clusterNodesPath, 'file://' + node_folder)
archive_file = sfs.joinPaths( q.dirs.tmpDir, self._clusterName + '_cluster_evidence.tgz')
q.system.fs.targzCompress( archive_folder, archive_file)
cfs.copyFile('file://' + archive_file , destination)
q.system.fs.removeDirTree( archive_folder )
q.system.fs.unlink( archive_file )
def setNurseryKeeper(self, clusterId):
"""
Updates the cluster configuration file to the correct nursery keeper cluster.
If the keeper needs to be removed from the cluster config, specify None as clusterId
This requires a valid client configuration on the system that can be used to access the keeper cluster.
@param clusterId: The id of the cluster that will function as nursery keeper
@type clusterId: string / None
@return void
"""
config = self._getConfigFile()
if clusterId is None:
config.remove_section("nursery")
return
cliCfg = q.clients.arakoon.getClientConfig(clusterId)
nurseryNodes = cliCfg.getNodes()
if len(nurseryNodes) == 0:
raise RuntimeError("A valid client configuration is required for cluster '%s'" % (clusterId) )
config.add_section("nursery")
config.set("nursery", "cluster_id", clusterId)
config.set("nursery", "cluster", ",".join( nurseryNodes.keys() ))
for (id,(ip,port)) in nurseryNodes.iteritems() :
if isinstance(ip, basestring):
ip = [ip]
config.add_section(id)
config.set(id, "ip", ', '.join(ip))
config.set(id,"client_port",port)
self._saveConfig(config)
| 34.403235 | 186 | 0.579582 | 6,453 | 57,419 | 5.030528 | 0.113281 | 0.013031 | 0.021256 | 0.010997 | 0.368893 | 0.306112 | 0.270008 | 0.244563 | 0.210954 | 0.197277 | 0 | 0.002517 | 0.328933 | 57,419 | 1,668 | 187 | 34.423861 | 0.839951 | 0.007802 | 0 | 0.348932 | 0 | 0.004069 | 0.079564 | 0.002814 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.012208 | 0.013225 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0e3e2d475e4d28c9969630febc816f8bc00f2ddf | 626 | py | Python | lbworkflow/tests/purchase/models.py | wearypossum4770/django-lb-workflow | 8db36c7a8c5cf3aa2492048cad9fbf26d895c8c7 | [
"MIT"
] | 194 | 2017-04-24T15:28:16.000Z | 2021-12-29T03:35:28.000Z | lbworkflow/tests/purchase/models.py | wearypossum4770/django-lb-workflow | 8db36c7a8c5cf3aa2492048cad9fbf26d895c8c7 | [
"MIT"
] | 17 | 2018-05-31T07:45:42.000Z | 2021-12-16T08:55:44.000Z | lbworkflow/tests/purchase/models.py | wearypossum4770/django-lb-workflow | 8db36c7a8c5cf3aa2492048cad9fbf26d895c8c7 | [
"MIT"
] | 67 | 2017-05-18T02:28:28.000Z | 2022-01-20T02:05:10.000Z | from django.db import models
from lbworkflow.models import BaseWFObj
class Purchase(BaseWFObj):
title = models.CharField("Title", max_length=255)
reason = models.CharField("Reason", max_length=255)
def __str__(self):
return self.reason
class Item(models.Model):
purchase = models.ForeignKey(
Purchase,
on_delete=models.CASCADE,
)
name = models.CharField("Name", max_length=255)
qty = models.IntegerField("Qty")
note = models.CharField("Note", max_length=255)
class Meta:
verbose_name = "Purchase Item"
def __str__(self):
return self.name
| 22.357143 | 55 | 0.670927 | 75 | 626 | 5.413333 | 0.413333 | 0.147783 | 0.118227 | 0.078818 | 0.098522 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024641 | 0.222045 | 626 | 27 | 56 | 23.185185 | 0.809035 | 0 | 0 | 0.105263 | 0 | 0 | 0.055911 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.105263 | 0.105263 | 0.789474 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
0e3e3487ba35fcedce9a553cfee80ee9e1454f0d | 900 | py | Python | interface/python/test.py | gaubert/nessDB | 69eed5ec2d4e6bf06853cebf7a506d214eaf7d62 | [
"BSD-3-Clause"
] | 1 | 2020-09-24T02:41:23.000Z | 2020-09-24T02:41:23.000Z | interface/python/test.py | gaubert/nessDB | 69eed5ec2d4e6bf06853cebf7a506d214eaf7d62 | [
"BSD-3-Clause"
] | null | null | null | interface/python/test.py | gaubert/nessDB | 69eed5ec2d4e6bf06853cebf7a506d214eaf7d62 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#-*- coding:utf-8 -*-
# author : KDr2
# BohuTANG @2012
#
import sys
import random
import string
import time
import nessdb
def gen_random_str(len):
return ''.join([random.choice('abcdefghijklmnoprstuvwyxzABCDEFGHIJKLMNOPRSTUVWXYZ') for i in range(len)])
def ness_open(db_name):
return nessdb.NessDB(db_name)
def ness_write(db, c):
s_time = time.time()
for i in range(0, c):
key = gen_random_str(16)
db.db_add(key, "abcd")
if (i % 10000) == 0:
sys.stdout.write("\r\x1b[K ....write finished " + i.__str__())
sys.stdout.flush()
e_time = time.time()
print ""
print "---->count:<%i>,cost time:<%i>, %i/sec\n" %(c, e_time - s_time, c / (e_time - s_time))
if __name__ == '__main__':
if (len(sys.argv) > 2):
if (sys.argv[1] == "write"):
db = ness_open("test")
ness_write(db, int(sys.argv[2]))
db.db_close()
else:
print "test.py write <count>"
| 21.428571 | 106 | 0.645556 | 145 | 900 | 3.8 | 0.441379 | 0.058076 | 0.043557 | 0.039927 | 0.039927 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0253 | 0.165556 | 900 | 41 | 107 | 21.95122 | 0.708389 | 0.076667 | 0 | 0 | 0 | 0 | 0.193939 | 0.060606 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.178571 | null | null | 0.107143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0e4381d0af212201e83d5139e926caa71f20f745 | 2,472 | py | Python | cube2/server.py | bobssup/kripken | a19f0aca7b9251c08dd930fa670eaa72db0806f7 | [
"CC-BY-3.0"
] | 892 | 2015-01-06T13:51:18.000Z | 2022-03-28T08:54:27.000Z | cube2/server.py | bobssup/kripken | a19f0aca7b9251c08dd930fa670eaa72db0806f7 | [
"CC-BY-3.0"
] | 37 | 2015-01-24T19:50:38.000Z | 2022-03-11T01:51:17.000Z | cube2/server.py | bobssup/kripken | a19f0aca7b9251c08dd930fa670eaa72db0806f7 | [
"CC-BY-3.0"
] | 264 | 2015-01-31T14:31:47.000Z | 2022-03-30T16:46:54.000Z | #!/usr/bin/env python
'''
Sets up websocket server support to run the server in one HTML page and the client in another HTML page. Each connects to a websocket server, which we relay together, so the two pages think they are connected to each other (see websocket_bi tests in emscripten).
Instructions for websocket networking:
Mode 1: Two clients (one with embedded server)
1. Run this script
2. Run a webserver (e.g. python -m SimpleHTTPServer 8888)
3. Run http://localhost:8888/game.html?low,low,windowed,serve in one browser
4. Run http://localhost:8888/game.html?low,low,windowed in another browser
5. In the second browser, do /connect
'windowed' runs in non-fullscreen mode, useful to run two browsers at once - scroll
all the way down to see the canvas. 'serve' runs the embedded server in that
client.
Mode 2: Server and client
1. Run this script
2. Run a webserver (e.g. python -m SimpleHTTPServer 8888)
3. Run http://localhost:8888/server.html
4. Run http://localhost:8888/game.html?low,low
5. In the client, do /connect
Note that you likely need to run the server and client in different browsers or at least browser windows, since browsers throttle background tabs.
'''
import os, sys, multiprocessing, time
from subprocess import Popen, PIPE, STDOUT
__rootpath__ = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def path_from_root(*pathelems):
return os.path.join(__rootpath__, *pathelems)
sys.path += [path_from_root('tools/'), path_from_root('tools/websockify')]
import websockify
def websockify_func(wsp):
wsp.start_server()
client = websockify.WebSocketProxy(verbose=True, listen_port=28785, target_host="127.0.0.1", target_port=28786, run_once=True)
client_process = multiprocessing.Process(target=websockify_func, args=(client,))
client_process.start()
print 'client on process', client_process.pid
server = websockify.WebSocketProxy(verbose=True, listen_port=28780, target_host="127.0.0.1", target_port=28781, run_once=True)
server_process = multiprocessing.Process(target=websockify_func, args=(server,))
server_process.start()
print 'server on process', server_process.pid
def relay_server(child):
child.communicate()
relay_child = Popen(['python', path_from_root('tools', 'socket_relay.py'), '28781', '28786'])
relay_process = multiprocessing.Process(target=relay_server, args=(relay_child,))
relay_process.start()
print 'relay on process', relay_process.pid
while 1:
time.sleep(1)
| 37.454545 | 262 | 0.767395 | 381 | 2,472 | 4.855643 | 0.372703 | 0.015135 | 0.034595 | 0.043243 | 0.28973 | 0.272432 | 0.223784 | 0.166486 | 0.138378 | 0.084324 | 0 | 0.037106 | 0.127832 | 2,472 | 65 | 263 | 38.030769 | 0.820965 | 0.008091 | 0 | 0 | 0 | 0 | 0.100239 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.12 | null | null | 0.12 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0e4cabb3fb9a2919ca8974672393ca3fb3160fbf | 16,677 | py | Python | nt_m.py | kwj1399/ryu_app | df094994e09788053b47aad1c6164033e6bd4c14 | [
"MIT"
] | null | null | null | nt_m.py | kwj1399/ryu_app | df094994e09788053b47aad1c6164033e6bd4c14 | [
"MIT"
] | null | null | null | nt_m.py | kwj1399/ryu_app | df094994e09788053b47aad1c6164033e6bd4c14 | [
"MIT"
] | null | null | null | '''
FileName:
Author:KWJ(kyson)
UpdateTime:2016/10/10
Introduction:
'''
from __future__ import division
import copy
from operator import attrgetter
from ryu.base import app_manager
from ryu.base.app_manager import lookup_service_brick
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER, DEAD_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib import hub
from ryu.lib.packet import packet
from ryu.controller import ofp_event
from ryu.lib.packet import ethernet
from ryu.lib.packet import arp
from ryu.lib.packet import ether_types
import setting
import redis
class NetworkMonitor(app_manager.RyuApp):
"""
NetworkMonitor is a Ryu app for collecting traffic information.
"""
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(NetworkMonitor, self).__init__(*args, **kwargs)
self.name = 'monitor'
self.datapaths = {}
self.port_stats = {}
self.port_speed = {}
self.flow_stats = {}
self.flow_speed = {}
self.stats = {}
self.port_features = {}
self.free_bandwidth = {}
self.mac_to_port = {}
self.ip_to_port = {}
self.ipfreebw = {}
# Start to green thread to monitor traffic and calculating
# free bandwidth of links respectively.
self.monitor_thread = hub.spawn(self._monitor)
@set_ev_cls(ofp_event.EventOFPStateChange,
[MAIN_DISPATCHER, DEAD_DISPATCHER])
def _state_change_handler(self, ev):
"""
Record datapath's info
"""
datapath = ev.datapath
if ev.state == MAIN_DISPATCHER:
if not datapath.id in self.datapaths:
self.logger.debug('register datapath: %016x', datapath.id)
self.datapaths[datapath.id] = datapath
elif ev.state == DEAD_DISPATCHER:
if datapath.id in self.datapaths:
self.logger.debug('unregister datapath: %016x', datapath.id)
del self.datapaths[datapath.id]
def _monitor(self):
"""
Main entry method of monitoring traffic.
"""
while True:
self.stats['flow'] = {}
self.stats['port'] = {}
self._save_ipfreebw(self.free_bandwidth,self.ip_to_port,self.ipfreebw)
pool = redis.ConnectionPool(host='127.0.0.1',port=6379,db=0)
r = redis.StrictRedis(connection_pool=pool)
for key in self.ipfreebw.keys():
r.set(key,self.ipfreebw[key])
print(self.free_bandwidth)
print (self.ip_to_port)
print (self.ipfreebw)
for dp in self.datapaths.values():
self.port_features.setdefault(dp.id, {})
self._request_stats(dp)
# refresh data.
hub.sleep(setting.MONITOR_PERIOD)
if self.stats['flow'] or self.stats['port']:
self.show_stat('flow')
self.show_stat('port')
hub.sleep(1)
def _request_stats(self, datapath):
"""
Sending request msg to datapath
"""
self.logger.debug('send stats request: %016x', datapath.id)
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
req = parser.OFPPortDescStatsRequest(datapath, 0)
datapath.send_msg(req)
req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY)
datapath.send_msg(req)
req = parser.OFPFlowStatsRequest(datapath)
datapath.send_msg(req)
def _save_freebandwidth(self, dpid, port_no, speed):
# Calculate free bandwidth of port and save it.
port_state = self.port_features.get(dpid).get(port_no)
if port_state:
capacity = port_state[2]
curr_bw = self._get_free_bw(capacity, speed)
key = (dpid,port_no)
if key not in setting.SW_PORT:
self.free_bandwidth.setdefault(key, None)
self.free_bandwidth[(dpid, port_no)] = curr_bw
else:
self.logger.info("Fail in getting port state")
def _save_stats(self, _dict, key, value, length):
if key not in _dict:
_dict[key] = []
_dict[key].append(value)
if len(_dict[key]) > length:
_dict[key].pop(0)
def _get_speed(self, now, pre, period):
if period:
return (now - pre) / (period)
else:
return 0
def _get_free_bw(self, capacity, speed):
# capacity:OFPPortDescStatsReply default is kbit/s
return max(capacity*10**3 - speed * 8, 0)
def _get_time(self, sec, nsec):
return sec + nsec / (10 ** 9)
def _get_period(self, n_sec, n_nsec, p_sec, p_nsec):
return self._get_time(n_sec, n_nsec) - self._get_time(p_sec, p_nsec)
def _save_ipfreebw(self,freebw,ip_port,ipfreebw):
for key in ip_port.keys():
ipfreebw[ip_port[key]]=freebw[key]
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# install table-miss flow entry
#
# We specify NO BUFFER to max_len of the output action due to
# OVS bug. At this moment, if we specify a lesser number, e.g.,
# 128, OVS will send Packet-In with invalid buffer_id and
# truncated packet data. In that case, we cannot output packets
# correctly. The bug has been fixed in OVS v2.1.0.
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
def add_flow(self, datapath, priority, match, actions, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes",
ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
# ignore lldp packet
return
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
#self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
# mac_of_port
for p in pkt.get_protocols(arp.arp):
key = (dpid, in_port)
value = p.src_ip
if key not in setting.SW_PORT:
self.ip_to_port.setdefault(key, value)
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
# verify if we have a valid buffer_id, if yes avoid to send both
# flow_mod & packet_out
if msg.buffer_id != ofproto.OFP_NO_BUFFER:
self.add_flow(datapath, 1, match, actions, msg.buffer_id)
return
else:
self.add_flow(datapath, 1, match, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
@set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
def _flow_stats_reply_handler(self, ev):
"""
Save flow stats reply info into self.flow_stats.
Calculate flow speed and Save it.
"""
body = ev.msg.body
dpid = ev.msg.datapath.id
self.stats['flow'][dpid] = body
self.flow_stats.setdefault(dpid, {})
self.flow_speed.setdefault(dpid, {})
for stat in sorted([flow for flow in body if flow.priority == 1],
key=lambda flow: (flow.match.get('in_port'),
flow.match.get('eth_dst'))):
key = (stat.match['in_port'], stat.match.get('eth_dst'),
stat.instructions[0].actions[0].port)
value = (stat.packet_count, stat.byte_count,
stat.duration_sec, stat.duration_nsec)
self._save_stats(self.flow_stats[dpid], key, value, 5)
# Get flow's speed.
pre = 0
period = setting.MONITOR_PERIOD
tmp = self.flow_stats[dpid][key]
if len(tmp) > 1:
pre = tmp[-2][1]
period = self._get_period(tmp[-1][2], tmp[-1][3],
tmp[-2][2], tmp[-2][3])
speed = self._get_speed(self.flow_stats[dpid][key][-1][1],
pre, period)
self._save_stats(self.flow_speed[dpid], key, speed, 5)
@set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)
def _port_stats_reply_handler(self, ev):
"""
Save port's stats info
Calculate port's speed and save it.
"""
body = ev.msg.body
dpid = ev.msg.datapath.id
self.stats['port'][dpid] = body
for stat in sorted(body, key=attrgetter('port_no')):
port_no = stat.port_no
if port_no != ofproto_v1_3.OFPP_LOCAL:
key = (dpid, port_no)
value = (stat.tx_bytes, stat.rx_bytes, stat.rx_errors,
stat.duration_sec, stat.duration_nsec)
self._save_stats(self.port_stats, key, value, 5)
# Get port speed.
pre = 0
period = setting.MONITOR_PERIOD
tmp = self.port_stats[key]
if len(tmp) > 1:
pre = tmp[-2][0] + tmp[-2][1]
period = self._get_period(tmp[-1][3], tmp[-1][4],
tmp[-2][3], tmp[-2][4])
speed = self._get_speed(
self.port_stats[key][-1][0] + self.port_stats[key][-1][1],
pre, period)
self._save_stats(self.port_speed, key, speed, 5)
self._save_freebandwidth(dpid, port_no, speed)
@set_ev_cls(ofp_event.EventOFPPortDescStatsReply, MAIN_DISPATCHER)
def port_desc_stats_reply_handler(self, ev):
"""
Save port description info.
"""
msg = ev.msg
dpid = msg.datapath.id
ofproto = msg.datapath.ofproto
config_dict = {ofproto.OFPPC_PORT_DOWN: "Down",
ofproto.OFPPC_NO_RECV: "No Recv",
ofproto.OFPPC_NO_FWD: "No Farward",
ofproto.OFPPC_NO_PACKET_IN: "No Packet-in"}
state_dict = {ofproto.OFPPS_LINK_DOWN: "Down",
ofproto.OFPPS_BLOCKED: "Blocked",
ofproto.OFPPS_LIVE: "Live"}
ports = []
for p in ev.msg.body:
ports.append('port_no=%d hw_addr=%s name=%s config=0x%08x '
'state=0x%08x curr=0x%08x advertised=0x%08x '
'supported=0x%08x peer=0x%08x curr_speed=%d '
'max_speed=%d' %
(p.port_no, p.hw_addr,
p.name, p.config,
p.state, p.curr, p.advertised,
p.supported, p.peer, p.curr_speed,
p.max_speed))
if p.config in config_dict:
config = config_dict[p.config]
else:
config = "up"
if p.state in state_dict:
state = state_dict[p.state]
else:
state = "up"
port_feature = (config, state, p.curr_speed*100)
self.port_features[dpid][p.port_no] = port_feature
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def _port_status_handler(self, ev):
"""
Handle the port status changed event.
"""
msg = ev.msg
reason = msg.reason
port_no = msg.desc.port_no
dpid = msg.datapath.id
ofproto = msg.datapath.ofproto
reason_dict = {ofproto.OFPPR_ADD: "added",
ofproto.OFPPR_DELETE: "deleted",
ofproto.OFPPR_MODIFY: "modified", }
if reason in reason_dict:
print "switch%d: port %s %s" % (dpid, reason_dict[reason], port_no)
else:
print "switch%d: Illeagal port state %s %s" % (port_no, reason)
def show_stat(self, type):
'''
Show statistics info according to data type.
type: 'port' 'flow'
'''
if setting.TOSHOW is False:
return
bodys = self.stats[type]
if(type == 'flow'):
print('datapath '' in-port ip-dst '
'out-port packets bytes flow-speed(B/s)')
print('---------------- '' -------- ----------------- '
'-------- -------- -------- -----------')
for dpid in bodys.keys():
for stat in sorted(
[flow for flow in bodys[dpid] if flow.priority == 1],
key=lambda flow: (flow.match.get('in_port'),
flow.match.get('eth_dst'))):
print('%016x %8x %17s %8x %8d %8d %8.1f' % (
dpid,
stat.match['in_port'], stat.match['eth_dst'],
stat.instructions[0].actions[0].port,
stat.packet_count, stat.byte_count,
abs(self.flow_speed[dpid][
(stat.match.get('in_port'),
stat.match.get('eth_dst'),
stat.instructions[0].actions[0].port)][-1])))
print '\n'
if(type == 'port'):
print('datapath port ''rx-pkts rx-bytes rx-error '
'tx-pkts tx-bytes tx-error port-speed(B/s)'
' current-capacity(Kbps) '
'port-stat link-stat')
print('---------------- -------- ''-------- -------- -------- '
'-------- -------- -------- '
'---------------- ---------------- '
' ----------- -----------')
format = '%016x %8x %8d %8d %8d %8d %8d %8d %8.1f %16d %16s %16s'
for dpid in bodys.keys():
for stat in sorted(bodys[dpid], key=attrgetter('port_no')):
if stat.port_no != ofproto_v1_3.OFPP_LOCAL:
print(format % (
dpid, stat.port_no,
stat.rx_packets, stat.rx_bytes, stat.rx_errors,
stat.tx_packets, stat.tx_bytes, stat.tx_errors,
abs(self.port_speed[(dpid, stat.port_no)][-1]),
self.port_features[dpid][stat.port_no][2],
self.port_features[dpid][stat.port_no][0],
self.port_features[dpid][stat.port_no][1]))
print '\n'
| 39.518957 | 86 | 0.538166 | 1,978 | 16,677 | 4.349848 | 0.167341 | 0.016736 | 0.007438 | 0.008949 | 0.301953 | 0.233496 | 0.195258 | 0.147141 | 0.096234 | 0.051139 | 0 | 0.014614 | 0.347604 | 16,677 | 421 | 87 | 39.612827 | 0.776195 | 0.054566 | 0 | 0.176282 | 0 | 0.003205 | 0.073958 | 0.00147 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.051282 | null | null | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0e5101dd7fe4f67284e5978fa838a8443cbd53ae | 1,397 | py | Python | export_readiness/migrations/0058_auto_20190912_1326.py | uktrade/directory-cms | 8c8d13ce29ea74ddce7a40f3dd29c8847145d549 | [
"MIT"
] | 6 | 2018-03-20T11:19:07.000Z | 2021-10-05T07:53:11.000Z | export_readiness/migrations/0058_auto_20190912_1326.py | uktrade/directory-cms | 8c8d13ce29ea74ddce7a40f3dd29c8847145d549 | [
"MIT"
] | 802 | 2018-02-05T14:16:13.000Z | 2022-02-10T10:59:21.000Z | export_readiness/migrations/0058_auto_20190912_1326.py | uktrade/directory-cms | 8c8d13ce29ea74ddce7a40f3dd29c8847145d549 | [
"MIT"
] | 6 | 2019-01-22T13:19:37.000Z | 2019-07-01T10:35:26.000Z | # Generated by Django 2.2.4 on 2019-09-12 13:26
from django.db import migrations
INDUSTRY_NAMES = (
'Advanced manufacturing',
'Aerospace',
'Agri-technology',
'Automotive',
'Biotechnology',
'Cleantech',
'Construction ',
'Consumer products',
'Cyber security',
'E-commerce',
'Education',
'Energy',
'Engineering',
'Financial services',
'Fintech',
'Food and drink',
'Healthcare',
'Infrastructure',
'International organisations',
'Life sciences',
'Low carbon',
'Luxury',
'Offshore wind',
'Oil and gas',
'Pharmaceuticals',
'Professional services',
'Renewables',
'Retail',
'Safety',
'Security',
'Smart cities',
'Technology',
'Training',
)
def create_industry_tags(apps, schema_editor):
IndustryTag = apps.get_model('export_readiness', 'IndustryTag')
objs = (IndustryTag(name=name) for name in INDUSTRY_NAMES)
IndustryTag.objects.bulk_create(objs)
def delete_industry_tags(apps, schema_editor):
IndustryTag = apps.get_model('export_readiness', 'IndustryTag')
IndustryTag.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('export_readiness', '0057_auto_20190912_1326'),
]
operations = [
migrations.RunPython(create_industry_tags, reverse_code=delete_industry_tags, elidable=True)
]
| 22.174603 | 100 | 0.65927 | 138 | 1,397 | 6.514493 | 0.681159 | 0.053393 | 0.040044 | 0.048943 | 0.171301 | 0.171301 | 0.171301 | 0.171301 | 0.171301 | 0.171301 | 0 | 0.028182 | 0.212598 | 1,397 | 62 | 101 | 22.532258 | 0.789091 | 0.032212 | 0 | 0.04 | 1 | 0 | 0.362963 | 0.017037 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.02 | 0 | 0.12 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0e5337ed20b60dbcf33738cf8d37c01caaa201c9 | 1,968 | py | Python | imagetagger/imagetagger/annotations/migrations/0006_auto_20170826_1431.py | jbargu/imagetagger | 216ac5e73902abadc1880321e285e68c55bdfd3d | [
"MIT"
] | 212 | 2017-07-06T23:01:44.000Z | 2022-03-24T04:44:49.000Z | imagetagger/imagetagger/annotations/migrations/0006_auto_20170826_1431.py | jbargu/imagetagger | 216ac5e73902abadc1880321e285e68c55bdfd3d | [
"MIT"
] | 157 | 2017-07-24T10:03:41.000Z | 2022-03-12T01:03:47.000Z | imagetagger/imagetagger/annotations/migrations/0006_auto_20170826_1431.py | jbargu/imagetagger | 216ac5e73902abadc1880321e285e68c55bdfd3d | [
"MIT"
] | 54 | 2017-11-07T00:40:50.000Z | 2022-02-26T14:22:13.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-26 12:31
from __future__ import unicode_literals
import json
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('annotations', '0005_auto_20170826_1424'),
]
def forward_func(apps, schema_editor):
Annotation = apps.get_model("annotations", "Annotation")
db_alias = schema_editor.connection.alias
# Copy all valid annotations from raw_vector to vector
for annotation in Annotation.objects.using(db_alias).all():
try:
vector = json.loads(annotation.raw_vector)
for key, value in vector.items():
try:
# try to convert all numeric vector values to integer
vector[key] = int(value)
except ValueError:
continue
annotation.vector = vector
annotation.save()
except ValueError:
# Annotation is invalid, delete it
annotation.delete()
def backward_func(apps, schema_editor):
Annotation = apps.get_model("annotations", "Annotation")
db_alias = schema_editor.connection.alias
# Copy all annotations from vector to raw_vector
for annotation in Annotation.objects.using(db_alias).all():
annotation.raw_vector = json.dumps(annotation.vector)
annotation.save()
operations = [
migrations.RenameField(
model_name='annotation',
old_name='vector',
new_name='raw_vector',
),
migrations.AddField(
model_name='annotation',
name='vector',
field=django.contrib.postgres.fields.jsonb.JSONField(null=True),
),
migrations.RunPython(forward_func, backward_func, atomic=True),
]
| 33.355932 | 77 | 0.601626 | 204 | 1,968 | 5.651961 | 0.431373 | 0.039029 | 0.036427 | 0.046834 | 0.32784 | 0.272333 | 0.272333 | 0.272333 | 0.272333 | 0.272333 | 0 | 0.024336 | 0.310976 | 1,968 | 58 | 78 | 33.931034 | 0.825959 | 0.128557 | 0 | 0.380952 | 1 | 0 | 0.069087 | 0.013466 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.095238 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0e609bb7fbbaaaa62dd52003ec698921717c72aa | 458 | py | Python | tests/test_elasticsearch.py | ankane/python-timeouts | bf939d5ef3bc8391d9ca8c2750c765fbd0bf63ae | [
"MIT"
] | 6 | 2021-10-01T06:38:22.000Z | 2022-03-23T09:22:06.000Z | tests/test_elasticsearch.py | ankane/python-timeouts | bf939d5ef3bc8391d9ca8c2750c765fbd0bf63ae | [
"MIT"
] | null | null | null | tests/test_elasticsearch.py | ankane/python-timeouts | bf939d5ef3bc8391d9ca8c2750c765fbd0bf63ae | [
"MIT"
] | null | null | null | from .conftest import TestTimeouts
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionError
class TestElasticsearch(TestTimeouts):
def test_connect(self):
with self.raises(ConnectionError):
Elasticsearch([self.connect_url()], timeout=1).cluster.health()
def test_read(self):
with self.raises(ConnectionError):
Elasticsearch([self.read_url()], timeout=1).cluster.health()
| 32.714286 | 75 | 0.733624 | 48 | 458 | 6.916667 | 0.4375 | 0.10241 | 0.072289 | 0.108434 | 0.445783 | 0.301205 | 0.301205 | 0 | 0 | 0 | 0 | 0.005249 | 0.168122 | 458 | 13 | 76 | 35.230769 | 0.866142 | 0 | 0 | 0.2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.3 | 0 | 0.6 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
0e60e9c2253d2e72211a2da9715165e839d0f5a9 | 1,179 | py | Python | mediasync/management/commands/syncmedia.py | kennethreitz-archive/django-mediasync | 93baadda6c8edfebf065e92a63aba588b9a0311e | [
"BSD-3-Clause"
] | 2 | 2015-11-05T08:54:40.000Z | 2016-03-01T22:14:29.000Z | mediasync/management/commands/syncmedia.py | kennethreitz-archive/django-mediasync | 93baadda6c8edfebf065e92a63aba588b9a0311e | [
"BSD-3-Clause"
] | null | null | null | mediasync/management/commands/syncmedia.py | kennethreitz-archive/django-mediasync | 93baadda6c8edfebf065e92a63aba588b9a0311e | [
"BSD-3-Clause"
] | null | null | null | from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from mediasync.conf import msettings
import mediasync
import time
class Command(BaseCommand):
help = "Sync local media with remote client"
args = '[options]'
requires_model_validation = False
option_list = BaseCommand.option_list + (
make_option("-F", "--force", dest="force", help="force files to sync", action="store_true"),
make_option("-q", "--quiet", dest="verbose", help="disable output", action="store_false", default=True),
)
def handle(self, *args, **options):
msettings['SERVE_REMOTE'] = True
msettings['VERBOSE'] = options.get('verbose')
force = options.get('force') or False
try:
start_time = time.time()
mediasync.sync(force=force)
end_time = time.time()
secs = (end_time - start_time)
print 'sync finished in %0.3f seconds' % secs
except ValueError, ve:
raise CommandError('%s\nUsage is mediasync %s' % (ve.message, self.args)) | 32.75 | 112 | 0.597116 | 131 | 1,179 | 5.267176 | 0.526718 | 0.046377 | 0.034783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002381 | 0.287532 | 1,179 | 36 | 113 | 32.75 | 0.819048 | 0 | 0 | 0 | 0 | 0 | 0.181356 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.2 | null | null | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0e6501df6c0fb85542839cee838fc4b4c8a3f879 | 868 | py | Python | lahja/tools/benchmark/typing.py | vaporyproject/lahja | fafc38106863f89ae04a52f6327e6ec5f42b2beb | [
"MIT"
] | null | null | null | lahja/tools/benchmark/typing.py | vaporyproject/lahja | fafc38106863f89ae04a52f6327e6ec5f42b2beb | [
"MIT"
] | null | null | null | lahja/tools/benchmark/typing.py | vaporyproject/lahja | fafc38106863f89ae04a52f6327e6ec5f42b2beb | [
"MIT"
] | null | null | null | from typing import (
NamedTuple,
)
from lahja import (
BaseEvent,
)
class RawMeasureEntry(NamedTuple):
sent_at: float
received_at: float
class CrunchedMeasureEntry(NamedTuple):
sent_at: float
received_at: float
duration: float
class PerfMeasureEvent(BaseEvent):
def __init__(self, payload: bytes, index: int, sent_at: float) -> None:
self.payload = payload
self.index = index
self.sent_at = sent_at
class ShutdownEvent(BaseEvent):
pass
class Total(NamedTuple):
caption: str
num_total: int
duration_fastest: float
duration_slowest: float
duration_avg: float
total_aggregated_time: float
total_duration: float
first_sent: float
last_received: float
class TotalRecordedEvent(BaseEvent):
def __init__(self, total: Total) -> None:
self.total = total
| 17.714286 | 75 | 0.691244 | 99 | 868 | 5.818182 | 0.373737 | 0.052083 | 0.057292 | 0.072917 | 0.125 | 0.125 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0.235023 | 868 | 48 | 76 | 18.083333 | 0.86747 | 0 | 0 | 0.121212 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0.030303 | 0.060606 | 0 | 0.727273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
0e6ba2de9241ced5ccf9acb46a7611acaa103fa7 | 641 | py | Python | insta/migrations/0003_auto_20190522_1122.py | eddyyonnie/instanicer | ae5c9994a5fcf0291a7785921655be0d5293a790 | [
"MIT"
] | null | null | null | insta/migrations/0003_auto_20190522_1122.py | eddyyonnie/instanicer | ae5c9994a5fcf0291a7785921655be0d5293a790 | [
"MIT"
] | 4 | 2021-03-19T00:52:54.000Z | 2021-09-08T01:00:47.000Z | insta/migrations/0003_auto_20190522_1122.py | eddyyonnie/instanicer | ae5c9994a5fcf0291a7785921655be0d5293a790 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.1 on 2019-05-22 08:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('insta', '0002_pictures'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.CharField(max_length=400)),
],
),
migrations.RenameField(
model_name='profile',
old_name='bio',
new_name='about',
),
]
| 24.653846 | 114 | 0.549142 | 63 | 641 | 5.460317 | 0.746032 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.050691 | 0.322933 | 641 | 25 | 115 | 25.64 | 0.741935 | 0.070203 | 0 | 0.105263 | 1 | 0 | 0.085859 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.052632 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0e6f3670c8eeedc919a111469dde8bf049ce3484 | 365 | py | Python | awards/migrations/0005_rename_avg_rate_rating_average.py | Maryan23/Laurels | cde7332ac9e5a4032a4d2da725bb5b70c66ce9ff | [
"MIT"
] | null | null | null | awards/migrations/0005_rename_avg_rate_rating_average.py | Maryan23/Laurels | cde7332ac9e5a4032a4d2da725bb5b70c66ce9ff | [
"MIT"
] | null | null | null | awards/migrations/0005_rename_avg_rate_rating_average.py | Maryan23/Laurels | cde7332ac9e5a4032a4d2da725bb5b70c66ce9ff | [
"MIT"
] | null | null | null | # Generated by Django 3.2.9 on 2021-12-13 21:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('awards', '0004_auto_20211213_1253'),
]
operations = [
migrations.RenameField(
model_name='rating',
old_name='avg_rate',
new_name='average',
),
]
| 19.210526 | 47 | 0.586301 | 40 | 365 | 5.175 | 0.85 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.121569 | 0.30137 | 365 | 18 | 48 | 20.277778 | 0.690196 | 0.123288 | 0 | 0 | 1 | 0 | 0.157233 | 0.072327 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0e7351729964b02b462566b09dcddbc2209fa1c8 | 3,561 | py | Python | levelheap-micha-4d/levels/gamma.py | triffid/kiki | b64b8524063c149a5cc9118f48d80afec1d8a942 | [
"Unlicense"
] | 2 | 2020-01-04T23:44:10.000Z | 2020-07-12T17:10:09.000Z | levelheap-micha-4d/levels/gamma.py | triffid/kiki | b64b8524063c149a5cc9118f48d80afec1d8a942 | [
"Unlicense"
] | null | null | null | levelheap-micha-4d/levels/gamma.py | triffid/kiki | b64b8524063c149a5cc9118f48d80afec1d8a942 | [
"Unlicense"
] | 1 | 2022-03-16T05:43:33.000Z | 2022-03-16T05:43:33.000Z | # level design by Michael Abel
schemes=[test_scheme, tron_scheme,candy_scheme, default_scheme,
green_scheme, yellow_scheme, blue_scheme, red_scheme, metal_scheme, bronze_scheme]
# .................................................................................................................
def func_gamma():
s = world.getSize()
world.switch_countera = 0
world.switch_counter = 0
def aswitched ():
applyColorScheme (schemes[world.switch_countera])
if world.switch_countera==len(schemes)-1 :
world.switch_countera=0
else:
world.switch_countera+=1
def switched (switch):
world.switch_counter += switch.isActive() and 1 or -1
exit = kikiObjectToGate(world.getObjectWithName("exit"))
exit.setActive(world.switch_counter == 4)
aswitch = KikiSwitch()
bswitch = KikiSwitch()
cswitch = KikiSwitch()
dswitch = KikiSwitch()
eswitch = KikiSwitch()
aswitch.getEventWithName("switched").addAction ( continuous ( aswitched ))
bswitch.getEventWithName("switched").addAction ( continuous (lambda s= bswitch : switched(s) ))
cswitch.getEventWithName("switched").addAction ( continuous (lambda s= cswitch : switched(s) ))
dswitch.getEventWithName("switched").addAction ( continuous (lambda s= dswitch : switched(s) ))
eswitch.getEventWithName("switched").addAction ( continuous (lambda s= eswitch : switched(s) ))
world.addObjectAtPos (aswitch , KikiPos (s.x-1,0,0))
world.addObjectAtPos (bswitch , KikiPos (0,0,0))
world.addObjectAtPos (KikiMutant() , KikiPos (s.x/2,0,0))
world.addObjectLine(KikiWall, KikiPos(0,0,1), KikiPos(s.x,0,1))
world.addObjectLine(KikiWall, KikiPos(0,1,0), KikiPos(s.x,1,0))
world.addObjectLine(KikiWall, KikiPos(0,2,2), KikiPos(s.x-3,2,2))
# world.addObjectAtPos (KikiSwitch() , KikiPos (s.x-3,2,2))
world.addObjectLine(KikiWall, KikiPos(2,2,2), KikiPos(2,2,s.z-3))
# world.addObjectAtPos (KikiSwitch() , KikiPos (2,2,s.z-3))
world.addObjectLine(KikiWall, KikiPos(2,2,4), KikiPos(2,s.y-3,4))
#exit world.addObjectAtPos (KikiSwitch() , KikiPos (2,s.y-3,4))
world.addObjectLine(KikiWall, KikiPos(2,4,4), KikiPos(s.x-4,4,4))
world.addObjectAtPos (cswitch , KikiPos (s.x-3,4,4))
world.addObjectLine(KikiWall, KikiPos(4,4,4), KikiPos(4,4,s.z-4))
world.addObjectAtPos (dswitch , KikiPos (4,4,s.z-3))
world.addObjectLine(KikiWall, KikiPos(4,4,6), KikiPos(4,s.y-4,6))
world.addObjectAtPos (eswitch , KikiPos (4,s.y-3,6))
level_dict["gamma"] = {
"scheme": "tron_scheme",
"size": (10,10,10),
"intro": "gamma",
"help": (
"",
"",
""
),
"player": { "coordinates": (0,5,0),
"nostatus": 0,
},
"exits": [
{
"name": "exit",
"active": 0,
#"position": (0,0,0),
"coordinates": (2,7,4), #absolute coord
},
],
"create": func_gamma,
}
# .................................................................................................................
| 42.392857 | 115 | 0.511092 | 362 | 3,561 | 4.966851 | 0.220994 | 0.095106 | 0.040044 | 0.14683 | 0.365962 | 0.275306 | 0.073415 | 0 | 0 | 0 | 0 | 0.038966 | 0.293738 | 3,561 | 83 | 116 | 42.903614 | 0.675944 | 0.133951 | 0 | 0.065574 | 0 | 0 | 0.047154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0e75465f62c387ef9aa64e8ae316dc9423908eba | 2,151 | py | Python | jzl/utils/wrappers.py | elijahc/jzlsdk | fc04a60383e6e71d50d27e8d04dff96f18ee7b06 | [
"MIT"
] | null | null | null | jzl/utils/wrappers.py | elijahc/jzlsdk | fc04a60383e6e71d50d27e8d04dff96f18ee7b06 | [
"MIT"
] | 7 | 2019-12-16T20:48:16.000Z | 2022-02-09T23:31:19.000Z | jzl/utils/wrappers.py | elijahc/jzl | fc04a60383e6e71d50d27e8d04dff96f18ee7b06 | [
"MIT"
] | null | null | null | import scipy.io as sio
import numpy as np
class MatWrapper(object):
def __init__(self,mat_file):
self.mat_fp = mat_file
self.data = None
class NeuroSurgMat(MatWrapper):
def __init__(self, mat_file):
self.mat_fp = mat_file
self.data = None
self._clfp = None
self._cmacro_lfp = None
self._metadata = None
@property
def CLFP(self):
# Lazy load CLFP files
if self.data is None:
self.data = sio.loadmat(self.mat_fp)
if self._clfp is None:
clfp = np.empty((3,self.data['CLFP_01'].shape[1]))
for i in np.arange(3):
clfp[i,:] = np.squeeze(self.data['CLFP_0'+str(i+1)])
self._clfp = clfp
return self._clfp
@property
def CMacro_LFP(self):
if self.data is None:
self.data = sio.loadmat(self.mat_fp)
if self._cmacro_lfp is None:
cmacro_lfp = np.empty((3,self.data['CMacro_LFP_01'].shape[1]))
for i in np.arange(3):
cmacro_lfp[i,:] = np.squeeze(self.data['CMacro_LFP_0'+str(i+1)])
self._cmacro_lfp = cmacro_lfp
return self._cmacro_lfp
@property
def metadata(self):
if self.data is None:
self.data = sio.loadmat(self.mat_fp)
if self._metadata is None:
self._metadata = {
'lfp':{'sampFreqHz':None,'timeStart':None,'timeEnd':None},
'mer':{'sampFreqHz':None,'timeStart':None,'timeEnd':None},
'eeg':{'sampFreqHz':None,'timeStart':None,'timeEnd':None},
}
for rec in list(self._metadata.keys()):
self._metadata[rec]['sampFreqHz']=self.data[rec][0][0][0][0][0]
self._metadata[rec]['timeStart']=np.squeeze(self.data[rec][0][0][1]).item()
self._metadata[rec]['timeEnd']=np.squeeze(self.data[rec][0][0][2]).item()
return self._metadata
class NeuroSurgDataset(object):
def __init__(self, data_dir):
self.data_dir = data_dir
# TODO Check if manifest file exists, if not create empty one
| 33.609375 | 91 | 0.566713 | 288 | 2,151 | 4.045139 | 0.215278 | 0.116738 | 0.038627 | 0.058369 | 0.47382 | 0.387124 | 0.28927 | 0.251502 | 0.251502 | 0.212017 | 0 | 0.016556 | 0.298001 | 2,151 | 63 | 92 | 34.142857 | 0.754967 | 0.037192 | 0 | 0.333333 | 0 | 0 | 0.073017 | 0 | 0 | 0 | 0 | 0.015873 | 0 | 1 | 0.117647 | false | 0 | 0.039216 | 0 | 0.27451 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0e7d4d108567f466b35fc7926c86a921b3e00477 | 1,374 | py | Python | api/views.py | rukbotto/reviews-django | 28ff12a490d160b76ca75f5cc450fc7556996962 | [
"MIT"
] | null | null | null | api/views.py | rukbotto/reviews-django | 28ff12a490d160b76ca75f5cc450fc7556996962 | [
"MIT"
] | null | null | null | api/views.py | rukbotto/reviews-django | 28ff12a490d160b76ca75f5cc450fc7556996962 | [
"MIT"
] | 1 | 2018-09-06T18:52:33.000Z | 2018-09-06T18:52:33.000Z | from django.http import Http404
from django.shortcuts import render
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from api.models import Review
from api.serializers import ReviewSerializer
class ReviewListView(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request, *args, **kwargs):
serializer = ReviewSerializer(request.user.reviews.all(), many=True)
return Response(serializer.data)
def post(self, request, *args, **kwargs):
data = request.data
data['ip_address'] = request.META.get('REMOTE_ADDR')
serializer = ReviewSerializer(data=data)
if serializer.is_valid():
serializer.save(user=request.user)
return Response(serializer.data, status.HTTP_201_CREATED)
return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)
class ReviewDetailView(APIView):
def get(self, request, *args, **kwargs):
try:
review = Review.objects.get(pk=kwargs['pk'])
if request.user != review.user:
return Response({}, status.HTTP_403_FORBIDDEN)
except Review.DoesNotExist:
raise Http404
serializer = ReviewSerializer(review)
return Response(serializer.data)
| 35.230769 | 76 | 0.703057 | 154 | 1,374 | 6.175325 | 0.402597 | 0.073607 | 0.071504 | 0.066246 | 0.056782 | 0.056782 | 0 | 0 | 0 | 0 | 0 | 0.013774 | 0.207424 | 1,374 | 38 | 77 | 36.157895 | 0.859504 | 0 | 0 | 0.129032 | 0 | 0 | 0.016739 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.258065 | 0 | 0.612903 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
0e7f4389c015b9220c6189155e2e9c20712acc78 | 209 | py | Python | Lesson_n1/logger/simple-logger.py | LemuelPuglisi/TutoratoTap | 81ce9d0f2163d18683c5aacde42b39f5a2e618a6 | [
"MIT"
] | 8 | 2022-03-22T10:12:03.000Z | 2022-03-28T17:44:08.000Z | Lesson_n1/logger/simple-logger.py | LemuelPuglisi/TutoratoTap | 81ce9d0f2163d18683c5aacde42b39f5a2e618a6 | [
"MIT"
] | null | null | null | Lesson_n1/logger/simple-logger.py | LemuelPuglisi/TutoratoTap | 81ce9d0f2163d18683c5aacde42b39f5a2e618a6 | [
"MIT"
] | null | null | null | import time
def log():
""" Python dummy logger example.
"""
t = 0
while True:
print(f'time: {t} \t log sent.')
t += 1
time.sleep(1)
if __name__ == '__main__': log() | 17.416667 | 40 | 0.492823 | 28 | 209 | 3.392857 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022059 | 0.349282 | 209 | 12 | 41 | 17.416667 | 0.676471 | 0.133971 | 0 | 0 | 0 | 0 | 0.177515 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.25 | 0.125 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0e873042296e41b0fec86717d2fc6d5d5d3721f0 | 3,818 | py | Python | reviewboard/admin/views.py | vigneshsrinivasan/reviewboard | 4775130c1c1022f81edc11928e02b1b6c069f6ed | [
"MIT"
] | 1 | 2020-02-11T07:09:14.000Z | 2020-02-11T07:09:14.000Z | reviewboard/admin/views.py | vigneshsrinivasan/reviewboard | 4775130c1c1022f81edc11928e02b1b6c069f6ed | [
"MIT"
] | null | null | null | reviewboard/admin/views.py | vigneshsrinivasan/reviewboard | 4775130c1c1022f81edc11928e02b1b6c069f6ed | [
"MIT"
] | null | null | null | import logging
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.models import User
from django.core.cache import cache
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from djblets.siteconfig.views import site_settings as djblets_site_settings
from reviewboard.admin.checks import check_updates_required
from reviewboard.admin.cache_stats import get_cache_stats, get_has_cache_stats
from reviewboard.admin.forms import SSHSettingsForm
from reviewboard.reviews.models import Group, DefaultReviewer
from reviewboard.scmtools.models import Repository
from reviewboard.scmtools import sshutils
@staff_member_required
def dashboard(request, template_name="admin/dashboard.html"):
"""
Displays the administration dashboard, containing news updates and
useful administration tasks.
"""
return render_to_response(template_name, RequestContext(request, {
'user_count': User.objects.count(),
'reviewgroup_count': Group.objects.count(),
'defaultreviewer_count': DefaultReviewer.objects.count(),
'repository_count': Repository.objects.accessible(request.user).count(),
'has_cache_stats': get_has_cache_stats(),
'title': _("Dashboard"),
'root_path': settings.SITE_ROOT + "admin/db/"
}))
@staff_member_required
def cache_stats(request, template_name="admin/cache_stats.html"):
"""
Displays statistics on the cache. This includes such pieces of
information as memory used, cache misses, and uptime.
"""
cache_stats = get_cache_stats()
return render_to_response(template_name, RequestContext(request, {
'cache_hosts': cache_stats,
'cache_backend': cache.__module__,
'title': _("Server Cache"),
'root_path': settings.SITE_ROOT + "admin/db/"
}))
@staff_member_required
def site_settings(request, form_class,
template_name="siteconfig/settings.html"):
return djblets_site_settings(request, form_class, template_name, {
'root_path': settings.SITE_ROOT + "admin/db/"
})
@staff_member_required
def ssh_settings(request, template_name='admin/ssh_settings.html'):
key = sshutils.get_user_key()
if request.method == 'POST':
form = SSHSettingsForm(request.POST, request.FILES)
if form.is_valid():
try:
form.create(request.FILES)
return HttpResponseRedirect('.')
except Exception, e:
# Fall through. It will be reported inline and in the log.
logging.error('Uploading SSH key failed: %s' % e)
else:
form = SSHSettingsForm()
public_key = ''
if key:
fingerprint = sshutils.humanize_key(key)
else:
fingerprint = None
return render_to_response(template_name, RequestContext(request, {
'title': _('SSH Settings'),
'key': key,
'fingerprint': fingerprint,
'public_key': sshutils.get_public_key(key),
'form': form,
}))
def manual_updates_required(request,
template_name="admin/manual_updates_required.html"):
"""
Checks for required manual updates and displays informational pages on
performing the necessary updates.
"""
updates = check_updates_required()
return render_to_response(template_name, RequestContext(request, {
'updates': [render_to_string(template_name,
RequestContext(request, extra_context))
for (template_name, extra_context) in updates],
}))
| 35.027523 | 80 | 0.701676 | 435 | 3,818 | 5.926437 | 0.296552 | 0.055857 | 0.03685 | 0.064003 | 0.198216 | 0.198216 | 0.178045 | 0.147013 | 0.061676 | 0.061676 | 0 | 0 | 0.2077 | 3,818 | 108 | 81 | 35.351852 | 0.852231 | 0.014667 | 0 | 0.223684 | 0 | 0 | 0.116987 | 0.036632 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.223684 | null | null | 0.039474 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0e8a4f5e382a07daf6ff75e3e2955a20dbb893fe | 4,898 | py | Python | python/protobufs/services/team/actions/get_teams_pb2.py | getcircle/protobuf-registry | 20ad8463b7ac6e2cf279c08bcd3e953993fe9153 | [
"MIT"
] | null | null | null | python/protobufs/services/team/actions/get_teams_pb2.py | getcircle/protobuf-registry | 20ad8463b7ac6e2cf279c08bcd3e953993fe9153 | [
"MIT"
] | null | null | null | python/protobufs/services/team/actions/get_teams_pb2.py | getcircle/protobuf-registry | 20ad8463b7ac6e2cf279c08bcd3e953993fe9153 | [
"MIT"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: protobufs/services/team/actions/get_teams.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from protobufs.services.common import containers_pb2 as protobufs_dot_services_dot_common_dot_containers__pb2
from protobufs.services.team import containers_pb2 as protobufs_dot_services_dot_team_dot_containers__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='protobufs/services/team/actions/get_teams.proto',
package='services.team.actions.get_teams',
syntax='proto3',
serialized_pb=b'\n/protobufs/services/team/actions/get_teams.proto\x12\x1fservices.team.actions.get_teams\x1a*protobufs/services/common/containers.proto\x1a(protobufs/services/team/containers.proto\"\x8c\x01\n\tRequestV1\x12<\n\ninflations\x18\x01 \x01(\x0b\x32(.services.common.containers.InflationsV1\x12\x34\n\x06\x66ields\x18\x02 \x01(\x0b\x32$.services.common.containers.FieldsV1\x12\x0b\n\x03ids\x18\x03 \x03(\t\"=\n\nResponseV1\x12/\n\x05teams\x18\x01 \x03(\x0b\x32 .services.team.containers.TeamV1b\x06proto3'
,
dependencies=[protobufs_dot_services_dot_common_dot_containers__pb2.DESCRIPTOR,protobufs_dot_services_dot_team_dot_containers__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_REQUESTV1 = _descriptor.Descriptor(
name='RequestV1',
full_name='services.team.actions.get_teams.RequestV1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='inflations', full_name='services.team.actions.get_teams.RequestV1.inflations', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fields', full_name='services.team.actions.get_teams.RequestV1.fields', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ids', full_name='services.team.actions.get_teams.RequestV1.ids', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=171,
serialized_end=311,
)
_RESPONSEV1 = _descriptor.Descriptor(
name='ResponseV1',
full_name='services.team.actions.get_teams.ResponseV1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='teams', full_name='services.team.actions.get_teams.ResponseV1.teams', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=313,
serialized_end=374,
)
_REQUESTV1.fields_by_name['inflations'].message_type = protobufs_dot_services_dot_common_dot_containers__pb2._INFLATIONSV1
_REQUESTV1.fields_by_name['fields'].message_type = protobufs_dot_services_dot_common_dot_containers__pb2._FIELDSV1
_RESPONSEV1.fields_by_name['teams'].message_type = protobufs_dot_services_dot_team_dot_containers__pb2._TEAMV1
DESCRIPTOR.message_types_by_name['RequestV1'] = _REQUESTV1
DESCRIPTOR.message_types_by_name['ResponseV1'] = _RESPONSEV1
RequestV1 = _reflection.GeneratedProtocolMessageType('RequestV1', (_message.Message,), dict(
DESCRIPTOR = _REQUESTV1,
__module__ = 'protobufs.services.team.actions.get_teams_pb2'
# @@protoc_insertion_point(class_scope:services.team.actions.get_teams.RequestV1)
))
_sym_db.RegisterMessage(RequestV1)
ResponseV1 = _reflection.GeneratedProtocolMessageType('ResponseV1', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEV1,
__module__ = 'protobufs.services.team.actions.get_teams_pb2'
# @@protoc_insertion_point(class_scope:services.team.actions.get_teams.ResponseV1)
))
_sym_db.RegisterMessage(ResponseV1)
# @@protoc_insertion_point(module_scope)
| 38.566929 | 519 | 0.780523 | 623 | 4,898 | 5.805778 | 0.194222 | 0.0564 | 0.058059 | 0.078795 | 0.622615 | 0.580868 | 0.554327 | 0.520321 | 0.398396 | 0.36909 | 0 | 0.034475 | 0.105757 | 4,898 | 126 | 520 | 38.873016 | 0.791324 | 0.070641 | 0 | 0.553398 | 1 | 0.009709 | 0.234103 | 0.20374 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.067961 | 0 | 0.067961 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0e9e025b01eb969d408f82347a8fc498cac9ab24 | 766 | py | Python | tests/frontend/analysis_frontend.py | CNR-ITTIG/plasodfaxp | 923797fc00664fa9e3277781b0334d6eed5664fd | [
"Apache-2.0"
] | 1 | 2019-09-26T08:16:30.000Z | 2019-09-26T08:16:30.000Z | tests/frontend/analysis_frontend.py | CNR-ITTIG/plasodfaxp | 923797fc00664fa9e3277781b0334d6eed5664fd | [
"Apache-2.0"
] | null | null | null | tests/frontend/analysis_frontend.py | CNR-ITTIG/plasodfaxp | 923797fc00664fa9e3277781b0334d6eed5664fd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the analysis front-end object."""
import unittest
from plaso.frontend import analysis_frontend
from plaso.storage import zip_file as storage_zip_file
from tests.frontend import test_lib
class AnalysisFrontendTests(test_lib.FrontendTestCase):
"""Tests for the analysis front-end object."""
def testOpenStorage(self):
"""Tests the OpenStorage function."""
test_front_end = analysis_frontend.AnalysisFrontend()
storage_file_path = self._GetTestFilePath([u'psort_test.proto.plaso'])
storage_file = test_front_end.OpenStorage(storage_file_path)
self.assertIsInstance(storage_file, storage_zip_file.StorageFile)
storage_file.Close()
if __name__ == '__main__':
unittest.main()
| 25.533333 | 74 | 0.763708 | 97 | 766 | 5.721649 | 0.453608 | 0.099099 | 0.03964 | 0.068468 | 0.118919 | 0.118919 | 0.118919 | 0 | 0 | 0 | 0 | 0.001502 | 0.130548 | 766 | 29 | 75 | 26.413793 | 0.831832 | 0.198433 | 0 | 0 | 0 | 0 | 0.050251 | 0.036851 | 0 | 0 | 0 | 0 | 0.076923 | 1 | 0.076923 | false | 0 | 0.307692 | 0 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
0ea2730c361b568ac507b349f4219e90dc8e1b4e | 388 | py | Python | forms/forms/constants.py | dowjcr/forms | daba956b779dc2b8054aeab841835d3748c1c50f | [
"MIT"
] | null | null | null | forms/forms/constants.py | dowjcr/forms | daba956b779dc2b8054aeab841835d3748c1c50f | [
"MIT"
] | 1 | 2021-10-03T10:23:17.000Z | 2021-10-03T10:23:17.000Z | forms/forms/constants.py | dowjcr/forms | daba956b779dc2b8054aeab841835d3748c1c50f | [
"MIT"
] | null | null | null | """Stores constants used as numbers for readability that are used across all apps"""
class AdminRoles:
""" """
JCRTREASURER = 1
SENIORTREASURER = 2
BURSARY = 3
ASSISTANTBURSAR = 4
CHOICES = (
(JCRTREASURER, 'JCR Treasurer'),
(SENIORTREASURER, 'Senior Treasurer'),
(BURSARY, 'Bursary'),
(ASSISTANTBURSAR, 'Assistant Bursar')
)
| 24.25 | 84 | 0.615979 | 35 | 388 | 6.828571 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014184 | 0.273196 | 388 | 15 | 85 | 25.866667 | 0.833333 | 0.201031 | 0 | 0 | 0 | 0 | 0.175084 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0.545455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
0ea30d061c0f6731c0b56ded5ff02ccd54a258dd | 647 | py | Python | simpsonMethod.py | Existence-glitch/PythonCodes | a49121855364a5e0ac2ff227ef91f19086a52f09 | [
"MIT"
] | 1 | 2021-06-08T06:53:50.000Z | 2021-06-08T06:53:50.000Z | simpsonMethod.py | Existence-glitch/PythonCodes | a49121855364a5e0ac2ff227ef91f19086a52f09 | [
"MIT"
] | null | null | null | simpsonMethod.py | Existence-glitch/PythonCodes | a49121855364a5e0ac2ff227ef91f19086a52f09 | [
"MIT"
] | null | null | null | import numpy as np
from numpy import log
#Se define la función a integrar
def f(x):
return 1 / log(x)
#Implementación del método de Simpson
#Parámetros:
#f es la función a integrar
#a el límite inferior de la integral
#b el límite superior de la integral
#n el número de intervalos
def simpson (f, a, b ,n):
h = (b - a) / n
g = f(a) + f(b)
#Suma de áreas
for i in range (1, n // 2):
g = g + 2 * f(a + 2 * i * h)
for i in range (0, n // 2):
g = g + 4 * f(a + (2 * i + 1) * h)
return h * g / 3
def main():
li = simpson(f, 2, 3 ,16)
print("Li(3): ", li)
main()
| 22.310345 | 46 | 0.525502 | 118 | 647 | 2.881356 | 0.415254 | 0.023529 | 0.058824 | 0.105882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037736 | 0.344668 | 647 | 28 | 47 | 23.107143 | 0.764151 | 0.330757 | 0 | 0 | 0 | 0 | 0.017632 | 0 | 0 | 0 | 0 | 0.035714 | 0 | 1 | 0.1875 | false | 0 | 0.125 | 0.0625 | 0.4375 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0eaded91ac94008a42cad76b8f191ad6073a2d0e | 681 | py | Python | app/utils/scrapy.py | edementyev/py-telegram-broker | 7cf6ab9a243e1309e4edd8257efb0f6a7340bfae | [
"MIT"
] | null | null | null | app/utils/scrapy.py | edementyev/py-telegram-broker | 7cf6ab9a243e1309e4edd8257efb0f6a7340bfae | [
"MIT"
] | 7 | 2020-12-07T09:11:01.000Z | 2022-03-02T18:15:01.000Z | app/utils/scrapy.py | edementyev/py-telegram-broker | 7cf6ab9a243e1309e4edd8257efb0f6a7340bfae | [
"MIT"
] | null | null | null | from loguru import logger
from scrapy.crawler import CrawlerProcess
from scrapy.utils.log import DEFAULT_LOGGING
from scrapy.utils.project import get_project_settings
from scrape_magic.spiders.gatherer_spider import GathererSpider
from scrape_magic.spiders.starcity_spider import StarcitySpider
settings = get_project_settings()
DEFAULT_LOGGING["loggers"] = dict(scrapy={"level": "INFO"}, twisted={"level": "ERROR"})
process = CrawlerProcess(settings, install_root_handler=False)
def update_items():
process.crawl(StarcitySpider)
process.crawl(GathererSpider)
try:
process.start(stop_after_crawl=False)
except RuntimeError as e:
logger.error(e)
| 32.428571 | 87 | 0.790015 | 84 | 681 | 6.22619 | 0.535714 | 0.057361 | 0.057361 | 0.08413 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.123348 | 681 | 20 | 88 | 34.05 | 0.876047 | 0 | 0 | 0 | 0 | 0 | 0.038179 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.375 | 0 | 0.4375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
0eae926a9f1f0cb7eec42a2b4785dbee7f9eaa01 | 1,842 | py | Python | app/services/auth.py | sloppysid/faunadb-hipflask | 554fb8d35a26d990ac1207af1d475a36720b102f | [
"MIT"
] | 1 | 2022-01-16T15:03:48.000Z | 2022-01-16T15:03:48.000Z | app/services/auth.py | sloppysid/faunadb-hipflask | 554fb8d35a26d990ac1207af1d475a36720b102f | [
"MIT"
] | null | null | null | app/services/auth.py | sloppysid/faunadb-hipflask | 554fb8d35a26d990ac1207af1d475a36720b102f | [
"MIT"
] | 1 | 2021-11-30T08:08:02.000Z | 2021-11-30T08:08:02.000Z | from flask import (
Blueprint, request, jsonify, render_template, session, redirect, url_for
)
from app.models import User
bp = Blueprint('auth', __name__, url_prefix='/auth')
@bp.route('/login', methods=['GET','POST'])
def login():
if request.method == 'GET':
return render_template("login.html")
email_address = request.form['email_address']
password = request.form['password']
if request.method == 'POST':
email_address = request.form['email_address']
password = request.form['password']
"""
ADD POST LOGIN LOGIC HERE
"""
else:
error = "Incorrect login details"
return render_template("login.html", error=error)
@bp.route('/logout', methods=['GET'])
def logout():
session.pop('user')
return redirect(url_for('auth.login'))
@bp.route('/update_password', methods=['GET', 'POST'])
def update_password():
if request.method == 'GET':
token = request.args.get('token')
user_id = request.args.get('user_id')
#Check if token is valid
# if
return render_template('error_page.html', error=error)
User.update()
elif request.method == 'POST':
"""
ADD PASSWORD RESET LOGIC HERE
"""
return render_template('login.html', message=message)
| 29.709677 | 480 | 0.445711 | 156 | 1,842 | 5.128205 | 0.339744 | 0.0875 | 0.1 | 0.09375 | 0.26375 | 0.155 | 0.155 | 0.155 | 0.155 | 0.155 | 0 | 0 | 0.448426 | 1,842 | 61 | 481 | 30.196721 | 0.787402 | 0.014115 | 0 | 0.2 | 0 | 0 | 0.119883 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0.133333 | 0.066667 | 0 | 0.333333 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
0eaff1b214599d29b4c8677703247eacd71b69b2 | 671 | py | Python | learning_algorithm/neural_network.py | Bermuhz/DataMiningCompetitionFirstPrize | e0202ceeb99fc6cb869906fd8e7cc79a173a315e | [
"MIT"
] | 128 | 2017-03-20T12:42:38.000Z | 2022-03-12T07:06:55.000Z | learning_algorithm/neural_network.py | Bermuhz/DataMiningCompetitionFirstPrize | e0202ceeb99fc6cb869906fd8e7cc79a173a315e | [
"MIT"
] | 4 | 2017-03-18T04:37:36.000Z | 2017-07-09T14:11:18.000Z | learning_algorithm/neural_network.py | Bermuhz/DataMiningCompetitionFirstPrize | e0202ceeb99fc6cb869906fd8e7cc79a173a315e | [
"MIT"
] | 77 | 2017-03-19T06:49:39.000Z | 2022-03-12T07:06:56.000Z | from sklearn.neural_network import MLPClassifier
from commons import variables
from commons import tools
from scipy.stats import mode
def learn(x, y, test_x):
(temp_x, temp_y) = tools.simple_negative_sample(x, y, variables.select_rate_nn)
clf = MLPClassifier(hidden_layer_sizes=(variables.unit_num_nn,), random_state=2017, max_iter=2000,
alpha=variables.alpha_nn,
learning_rate_init=variables.learning_rate_init_nn,solver="adam",activation="relu").fit(temp_x, temp_y)
prediction_list = clf.predict(test_x)
prediction_list_prob = clf.predict_proba(test_x)
return prediction_list,prediction_list_prob
| 39.470588 | 127 | 0.746647 | 95 | 671 | 4.947368 | 0.526316 | 0.119149 | 0.07234 | 0.042553 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014363 | 0.169896 | 671 | 16 | 128 | 41.9375 | 0.829443 | 0 | 0 | 0 | 0 | 0 | 0.011923 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.333333 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
7ebae62d64d2c33a33e2537a143db8237e76925d | 3,992 | py | Python | GithubP1.py | rcamposm/ChallengePython | 014d32b38d90a1a3be3d102d5123d9e34bacc2dc | [
"MIT"
] | null | null | null | GithubP1.py | rcamposm/ChallengePython | 014d32b38d90a1a3be3d102d5123d9e34bacc2dc | [
"MIT"
] | null | null | null | GithubP1.py | rcamposm/ChallengePython | 014d32b38d90a1a3be3d102d5123d9e34bacc2dc | [
"MIT"
] | null | null | null | ******************* PARTE I *******************************
#Instalamos git en la terminal de VSC
$sudo apt-get install git -y
#Revisamos la versión del Git que hemos instalado
$git --version
# Podemos ver también un resumen de las principales funcionalidades de Git
$git
#Creamos una carpeta
$mkdir ChallengePython
#Accedemos a la carpeta
$cd ChallengePython/
******************* PARTE II *******************************
$git init //inicializar el repositorio
#Con el `$git init` se crean dos áreas.
- Un área en memoria RAM llamada `Staging`
- Un área llamada repositorio `/.git/`
#Conectare a GitHub
#Configurar como variable global tu usuario
$git config --global user.name "rcamposm" //(nombre de github , sino tiene crear)
#Configurar como variable global tu correo
$git config --global user.email "raquelcamposm@gmail.com"
$git config --global color.ul true
$git config --list
******************* PARTE III *******************************
1. Nos logueamos a GitHub
2. Creamos un repositorio: "PythonChallenge"
- Public Mode
- Add a README File
- Choose a license: MIT License
# En la consola del VSC luego de abrir la carpeta donde queremos guardar el proyecto
$git clone https://github.com/rcamposm/ChallengePython.git
******************* PARTE IV *******************************
#Lista el contenido~
$ls -ltr
$git status // visualizar cambios
# Los archivos que salen en rojo se encuentran en el Working Directory.
# Los archivos que salen en verde se encuentran en el Staging Area.
# Crear el archivo HelloPython.py con un print("Hola Immuners!)
$git status // visualizar cambios
#Con el git add se pasa el archivo al staging área y espera que lo pases al repositorio o que lo remuevas con rm.
$git add HelloPython.py //Agregamos el archivo al repositorio
$git add . // Agregar los cambios de la carpeta en la que nos encontramos agregar todo
$git commit -m "First python program" HelloPython.py// Agregamos los cambios para el repositorio
$git log nombre_de_archivos.extensión //histórico de cambios con detalles
$git log HelloPython.py
$git log #muestra el hostorial de los registros (Commits) del proyecto con sus respectivos autores, hora específica y descripciones.
$git show archivo.extensión //Muestra todos los cambios
******************* PARTE V *******************************
#Subimos al repositorio Remoto
$ git push -u origin main
Username raquelcamposm@gmail.com
pass:
# Got push envía los commits al repositorio remoto de GitHub
#Verificamos que los archivos se hayan subido al repositorio remoto (GitHub)
$ git push //envia a otro repositorio remoto lo que estamos haciendo
$ git pull //traer repositorio remoto
******************* PARTE VI *******************************
#git branch + name: Crear una rama.
#git branch: Lista de ramas y saber en que ramas estamos.
#git checkout + branch: Para movernos entre ramas.
#Para verificar donde estamos posicionados
$ git log --oneline
$ git branch
#Para crear una rama
$ git branch ramaParrafo
# Modificamos el archivo HelloPython.py --> print("Hola Immuners! hoy estamos 22/11")
$git add HelloPython.py //Agregamos el archivo al repositorio
$git commit -m "Actualizamos el párrafo" HelloPython.py// Agregamos los cambios para el repositorio
#Luego nos movemos a la rama master para poder copiar los cambios de la rama al master
$ git checkout main
#Agregamos los cambios de la rama al master
# Merge para fusionar las ramas
$ git merge ramaParrafo
#Subimos al repositorio remoto
$ git push
# Verificamos que el archivo HelloPython.py está actualizado también en el Github
******************* PARTE VII *******************************
# Subir la rama al repositorio remoto (GitHub)
$ git branch
$ git checkout ramaParrafo
$ git push
$ git push --set-upstream origin ramaParrafo
******************* MÁS ************************************
Git reset vs. Git rm
*********************************************************************************************
| 31.68254 | 132 | 0.667585 | 531 | 3,992 | 5.015066 | 0.39548 | 0.039054 | 0.035674 | 0.024784 | 0.19602 | 0.120916 | 0.096132 | 0.076605 | 0.039805 | 0.039805 | 0 | 0.00178 | 0.155561 | 3,992 | 125 | 133 | 31.936 | 0.788193 | 0.408567 | 0 | 0.145455 | 0 | 0 | 0.03823 | 0.00988 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.018182 | 0 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7ec2240f63eb96b7be86ac80819b426a891d9e4a | 7,341 | py | Python | test/test_cpy_compat.py | gracinet/hpy | 3f850ae562c6977ed3088d5f5bb31a1ef4155d18 | [
"MIT"
] | null | null | null | test/test_cpy_compat.py | gracinet/hpy | 3f850ae562c6977ed3088d5f5bb31a1ef4155d18 | [
"MIT"
] | null | null | null | test/test_cpy_compat.py | gracinet/hpy | 3f850ae562c6977ed3088d5f5bb31a1ef4155d18 | [
"MIT"
] | null | null | null | from .support import HPyTest
class TestCPythonCompatibility(HPyTest):
# One note about the should_check_refcount() in the tests below: on
# CPython, handles are actually implemented as INCREF/DECREF, so we can
# check e.g. after an HPy_Dup the refcnt is += 1. However, on PyPy they
# are implemented in a completely different way which is unrelated to the
# refcnt (this is the whole point of HPy, after all :)). So in many of the
# following ttests, checking the actual result of the function doesn't
# really make sens on PyPy. We still run the functions to ensure they do
# not crash, though.
def test_frompyobject(self):
mod = self.make_module("""
#include <Python.h>
HPy_DEF_METH_NOARGS(f)
static HPy f_impl(HPyContext ctx, HPy self)
{
PyObject *o = PyList_New(0);
Py_ssize_t initial_refcount = o->ob_refcnt;
HPy h = HPy_FromPyObject(ctx, o);
Py_ssize_t final_refcount = o->ob_refcnt;
PyList_Append(o, PyLong_FromLong(1234));
PyList_Append(o, PyLong_FromSsize_t(final_refcount -
initial_refcount));
Py_DECREF(o);
return h;
}
@EXPORT f HPy_METH_NOARGS
@INIT
""")
x = mod.f()
assert x[0] == 1234
assert len(x) == 2
if self.should_check_refcount():
assert x == [1234, +1]
def test_aspyobject(self):
mod = self.make_module("""
#include <Python.h>
HPy_DEF_METH_O(f)
static HPy f_impl(HPyContext ctx, HPy self, HPy arg)
{
PyObject *o = HPy_AsPyObject(ctx, arg);
long val = PyLong_AsLong(o);
Py_DecRef(o);
return HPyLong_FromLong(ctx, val*2);
}
@EXPORT f HPy_METH_O
@INIT
""")
assert mod.f(21) == 42
def test_aspyobject_custom_class(self):
mod = self.make_module("""
#include <Python.h>
HPy_DEF_METH_O(f)
static HPy f_impl(HPyContext ctx, HPy self, HPy arg)
{
PyObject *o = HPy_AsPyObject(ctx, arg);
PyObject *o_res = PyObject_CallMethod(o, "foo", "");
HPy h_res = HPy_FromPyObject(ctx, o_res);
Py_DecRef(o);
Py_DecRef(o_res);
return h_res;
}
@EXPORT f HPy_METH_O
@INIT
""")
class MyClass:
def foo(self):
return 1234
obj = MyClass()
assert mod.f(obj) == 1234
def test_hpy_close(self):
mod = self.make_module("""
#include <Python.h>
HPy_DEF_METH_NOARGS(f)
static HPy f_impl(HPyContext ctx, HPy self)
{
PyObject *o = PyList_New(0);
HPy h = HPy_FromPyObject(ctx, o);
Py_ssize_t initial_refcount = o->ob_refcnt;
HPy_Close(ctx, h);
Py_ssize_t final_refcount = o->ob_refcnt;
Py_DECREF(o);
return HPyLong_FromLong(ctx, (long)(final_refcount -
initial_refcount));
}
@EXPORT f HPy_METH_NOARGS
@INIT
""")
x = mod.f()
if self.should_check_refcount():
assert x == -1
def test_hpy_dup(self):
mod = self.make_module("""
#include <Python.h>
HPy_DEF_METH_NOARGS(f)
static HPy f_impl(HPyContext ctx, HPy self)
{
PyObject *o = PyList_New(0);
HPy h = HPy_FromPyObject(ctx, o);
Py_ssize_t initial_refcount = o->ob_refcnt;
HPy h2 = HPy_Dup(ctx, h);
Py_ssize_t final_refcount = o->ob_refcnt;
HPy_Close(ctx, h);
HPy_Close(ctx, h2);
Py_DECREF(o);
return HPyLong_FromLong(ctx, (long)(final_refcount -
initial_refcount));
}
@EXPORT f HPy_METH_NOARGS
@INIT
""")
x = mod.f()
if self.should_check_refcount():
assert x == +1
def test_many_handles(self):
mod = self.make_module("""
#include <Python.h>
#define NUM_HANDLES 10000
HPy_DEF_METH_NOARGS(f)
static HPy f_impl(HPyContext ctx, HPy self)
{
PyObject *o = PyList_New(0);
Py_ssize_t result = -42;
HPy handles[NUM_HANDLES];
int i;
Py_ssize_t initial_refcount = o->ob_refcnt;
for (i = 0; i < NUM_HANDLES; i++)
handles[i] = HPy_FromPyObject(ctx, o);
for (i = 0; i < NUM_HANDLES; i++)
if (HPy_IsNull(handles[i]))
goto error;
for (i = 0; i < NUM_HANDLES; i++)
HPy_Close(ctx, handles[i]);
Py_ssize_t final_refcount = o->ob_refcnt;
result = final_refcount - initial_refcount;
error:
return HPyLong_FromLong(ctx, (long)result);
}
@EXPORT f HPy_METH_NOARGS
@INIT
""")
assert mod.f() == 0
def test_meth_cpy_noargs(self):
mod = self.make_module("""
#include <Python.h>
static PyObject *f(PyObject *self, PyObject *args)
{
return PyLong_FromLong(1234);
}
@EXPORT f METH_NOARGS
@INIT
""")
assert mod.f() == 1234
def test_meth_cpy_o(self):
mod = self.make_module("""
#include <Python.h>
static PyObject *f(PyObject *self, PyObject *arg)
{
long x = PyLong_AsLong(arg);
return PyLong_FromLong(x * 2);
}
@EXPORT f METH_O
@INIT
""")
assert mod.f(45) == 90
def test_meth_cpy_varargs(self):
mod = self.make_module("""
#include <Python.h>
static PyObject *f(PyObject *self, PyObject *args)
{
long a, b, c;
if (!PyArg_ParseTuple(args, "lll", &a, &b, &c))
return NULL;
return PyLong_FromLong(100*a + 10*b + c);
}
@EXPORT f METH_VARARGS
@INIT
""")
assert mod.f(4, 5, 6) == 456
def test_meth_cpy_keywords(self):
mod = self.make_module("""
#include <Python.h>
static PyObject *f(PyObject *self, PyObject *args, PyObject *kwargs)
{
static char *kwlist[] = { "a", "b", "c", NULL };
long a, b, c;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "lll", kwlist, &a, &b, &c))
return NULL;
return PyLong_FromLong(100*a + 10*b + c);
}
@EXPORT f METH_VARARGS | METH_KEYWORDS
@INIT
""")
assert mod.f(c=6, b=5, a=4) == 456
| 33.368182 | 90 | 0.48522 | 833 | 7,341 | 4.064826 | 0.189676 | 0.020673 | 0.032487 | 0.0443 | 0.593621 | 0.585351 | 0.540165 | 0.505907 | 0.465741 | 0.440933 | 0 | 0.019429 | 0.418063 | 7,341 | 219 | 91 | 33.520548 | 0.773174 | 0.069337 | 0 | 0.55914 | 0 | 0.005376 | 0.777517 | 0.082808 | 0 | 0 | 0 | 0 | 0.064516 | 1 | 0.05914 | false | 0 | 0.005376 | 0.005376 | 0.145161 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7ec2bfc9ccb6ee726f5d61b2d45451c02e9d41ea | 553 | py | Python | blog/migrations/0003_auto_20200321_0543.py | Sergey19940808/blog | 26beea5b218ddfe3347e251994c5c2f500975df0 | [
"MIT"
] | null | null | null | blog/migrations/0003_auto_20200321_0543.py | Sergey19940808/blog | 26beea5b218ddfe3347e251994c5c2f500975df0 | [
"MIT"
] | null | null | null | blog/migrations/0003_auto_20200321_0543.py | Sergey19940808/blog | 26beea5b218ddfe3347e251994c5c2f500975df0 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2020-03-21 05:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20200320_1150'),
]
operations = [
migrations.RemoveField(
model_name='subscribebyblog',
name='user',
),
migrations.AlterField(
model_name='subscriberecord',
name='is_read',
field=models.BooleanField(default=False, verbose_name='Запись прочитана пользователем'),
),
]
| 24.043478 | 100 | 0.605787 | 54 | 553 | 6.074074 | 0.796296 | 0.054878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.078481 | 0.285714 | 553 | 22 | 101 | 25.136364 | 0.751899 | 0.081374 | 0 | 0.125 | 1 | 0 | 0.193676 | 0.045455 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7ec5bb3ee6e56981cd97f19bc1f1b34b98f31935 | 631 | py | Python | migrations/versions/1f97f799a477_add_contact_details_to_house.py | havanhuy1997/pmg-cms-2 | 21571235cf3d9552013bca29ab9af288b08e00d6 | [
"Apache-2.0"
] | 2 | 2019-06-11T20:46:43.000Z | 2020-08-27T22:50:32.000Z | migrations/versions/1f97f799a477_add_contact_details_to_house.py | havanhuy1997/pmg-cms-2 | 21571235cf3d9552013bca29ab9af288b08e00d6 | [
"Apache-2.0"
] | 70 | 2017-05-26T14:04:06.000Z | 2021-06-30T10:21:58.000Z | migrations/versions/1f97f799a477_add_contact_details_to_house.py | havanhuy1997/pmg-cms-2 | 21571235cf3d9552013bca29ab9af288b08e00d6 | [
"Apache-2.0"
] | 4 | 2017-08-29T10:09:30.000Z | 2021-05-25T11:29:03.000Z | """Add contact_details to House
Revision ID: 1f97f799a477
Revises: 2df9ce70bad
Create Date: 2018-08-08 10:58:44.869939
"""
# revision identifiers, used by Alembic.
revision = '1f97f799a477'
down_revision = '2df9ce70bad'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('house', sa.Column('contact_details', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('house', 'contact_details')
### end Alembic commands ###
| 23.37037 | 82 | 0.70206 | 79 | 631 | 5.531646 | 0.544304 | 0.09611 | 0.09611 | 0.105263 | 0.201373 | 0.201373 | 0.201373 | 0.201373 | 0 | 0 | 0 | 0.087619 | 0.167987 | 631 | 26 | 83 | 24.269231 | 0.744762 | 0.48336 | 0 | 0 | 0 | 0 | 0.215017 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.25 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7ecf3dcc1c0914b06bad4a9556b6b06a68e54449 | 2,612 | py | Python | turbinia/workers/analysis/postgresql_acct_test.py | jleaniz/turbinia | 78849bf292196e517fe149b2d4c4ab7000576b11 | [
"Apache-2.0"
] | null | null | null | turbinia/workers/analysis/postgresql_acct_test.py | jleaniz/turbinia | 78849bf292196e517fe149b2d4c4ab7000576b11 | [
"Apache-2.0"
] | null | null | null | turbinia/workers/analysis/postgresql_acct_test.py | jleaniz/turbinia | 78849bf292196e517fe149b2d4c4ab7000576b11 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2022 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the PostgreSQL account analysis task."""
import os
import unittest
from turbinia import config
from turbinia.workers.analysis import postgresql_acct
from turbinia.workers.workers_test import TestTurbiniaTaskBase
class PostgresAcctAnalysisTaskTest(TestTurbiniaTaskBase):
"""Tests for PostgresAcctAnalysisTask Task."""
TEST_DATA_DIR = None
EXPECTED_CREDENTIALS = {'5f4dcc3b5aa765d61d8327deb882cf99': 'postgres'}
POSTGRES_REPORT = """#### **PostgreSQL analysis found 1 weak password(s)**
* **1 weak password(s) found:**
* User 'postgres' with password 'password'"""
def setUp(self):
super(PostgresAcctAnalysisTaskTest, self).setUp()
self.setResults(mock_run=False)
filedir = os.path.dirname(os.path.realpath(__file__))
self.TEST_DATA_DIR = os.path.join(filedir, '..', '..', '..', 'test_data')
self.evidence.local_path = self.TEST_DATA_DIR
def test_extract_data_dir(self):
"""Tests the _extract_data_dir method."""
config.LoadConfig()
task = postgresql_acct.PostgresAccountAnalysisTask()
# pylint: disable=protected-access
data_dirs = task._extract_data_dir(self.TEST_DATA_DIR, self.result)
self.assertEqual(len(data_dirs), 1)
self.assertEqual(data_dirs, ['test_data'])
def test_extract_creds(self):
"""Tests the _extract_creds method."""
config.LoadConfig()
task = postgresql_acct.PostgresAccountAnalysisTask()
# pylint: disable=protected-access
hashes = task._extract_creds(['/database'], self.evidence)
self.assertDictEqual(hashes, self.EXPECTED_CREDENTIALS)
def test_analyse_postgres_creds(self):
"""Tests the _analyse_postegres_creds method."""
config.LoadConfig()
task = postgresql_acct.PostgresAccountAnalysisTask()
(report, priority, summary) = task._analyse_postgres_creds(
self.EXPECTED_CREDENTIALS)
self.assertEqual(report, self.POSTGRES_REPORT)
self.assertEqual(priority, 10)
self.assertEqual(summary, 'PostgreSQL analysis found 1 weak password(s)') | 36.788732 | 77 | 0.745023 | 321 | 2,612 | 5.897196 | 0.417445 | 0.025885 | 0.023244 | 0.022187 | 0.180137 | 0.180137 | 0.180137 | 0.141046 | 0.10037 | 0.10037 | 0 | 0.014831 | 0.148162 | 2,612 | 71 | 78 | 36.788732 | 0.835955 | 0.32121 | 0 | 0.166667 | 0 | 0 | 0.143681 | 0.018465 | 0 | 0 | 0 | 0 | 0.166667 | 1 | 0.111111 | false | 0.111111 | 0.138889 | 0 | 0.361111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
7ed5ebddc6457b559b508614ad321953960871a9 | 1,012 | py | Python | fedsimul/utils/language_utils.py | cshjin/fedsimul | 1e2b9a9d9034fbc679dfaff059c42dea5642971d | [
"MIT"
] | 11 | 2021-05-07T01:28:26.000Z | 2022-03-10T08:23:16.000Z | fedsimul/utils/language_utils.py | cshjin/fedsimul | 1e2b9a9d9034fbc679dfaff059c42dea5642971d | [
"MIT"
] | 2 | 2021-08-13T10:12:13.000Z | 2021-08-31T02:03:20.000Z | fedsimul/utils/language_utils.py | cshjin/fedsimul | 1e2b9a9d9034fbc679dfaff059c42dea5642971d | [
"MIT"
] | 1 | 2021-06-08T07:23:22.000Z | 2021-06-08T07:23:22.000Z | ###############################################################################
# Utils functions for language models.
#
# NOTE: source from https://github.com/litian96/FedProx
###############################################################################
ALL_LETTERS = "\n !\"&'(),-.0123456789:;>?ABCDEFGHIJKLMNOPQRSTUVWXYZ[]abcdefghijklmnopqrstuvwxyz}"
NUM_LETTERS = len(ALL_LETTERS)
def _one_hot(index, size):
'''returns one-hot vector with given size and value 1 at given index
'''
vec = [0 for _ in range(size)]
vec[int(index)] = 1
return vec
def letter_to_vec(letter):
'''returns one-hot representation of given letter
'''
index = ALL_LETTERS.find(letter)
return _one_hot(index, NUM_LETTERS)
def word_to_indices(word):
'''returns a list of character indices
Args:
word: string
Return:
indices: int list with length len(word)
'''
indices = []
for c in word:
indices.append(ALL_LETTERS.find(c))
return indices
| 25.948718 | 98 | 0.559289 | 112 | 1,012 | 4.919643 | 0.482143 | 0.072595 | 0.039927 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018248 | 0.187747 | 1,012 | 38 | 99 | 26.631579 | 0.652068 | 0.323123 | 0 | 0 | 0 | 0 | 0.010246 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.214286 | false | 0 | 0 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7edd2d6611c190c72458a52ee8280a39a0f6019d | 2,893 | py | Python | tokenize.py | jobsforher/Internship | 68d99e90c54b3ccf1f06bdc817b8a46f1f0fe97a | [
"Apache-2.0"
] | null | null | null | tokenize.py | jobsforher/Internship | 68d99e90c54b3ccf1f06bdc817b8a46f1f0fe97a | [
"Apache-2.0"
] | null | null | null | tokenize.py | jobsforher/Internship | 68d99e90c54b3ccf1f06bdc817b8a46f1f0fe97a | [
"Apache-2.0"
] | null | null | null | import re
from sys import argv
import sys
script, filename=argv
text=open(filename)
s=text.read()
y= re.split('\s+',s)
'''rite=open("new.txt", 'w')'''
temp=[]
values=[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
stopword1=['with','more','than','for','a','an','the','then','to','I','as','am','and','is']
for word in y:
m=0
for val in values:
if word==stopword1[val]:
'''x=rite.write("--- ")'''
break
else:
m=m+1
if m==7:
'''x=rite.write(word)'''
'''x=rite.write(" ")'''
temp.append(word)
break
temp.append("end")
temp.append("qq")
#rite1=open("new1.txt", 'w')
values=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
val=0
sys.stdout.write("Name :: ")
sys.stdout.write(temp[0])
sys.stdout.write(" ")
sys.stdout.write(temp[1])
sys.stdout.write("\n\n")
i=2
keywords=['Personal','PERSONAL','Location','Domain','Industry','Languages','Interests','INTERESTS','SUMMARY','Summary','COMPETENCIES','Competencies','GRADUATION','Graduation','EXPERIENCE','Experience','SKILLS','Skills','QUALIFICATIONS','Qualifications','CERTIFICATION','Certifications','EDUCATION','Education','PROJECTS','Projects','EXPERTISE','Expertise','ACADEMICS','Academics','asdfg','ACHIEVEMENTS','Achievements']
while temp[i]!="qq":
for val in values:
if temp[i]==keywords[val]:
sys.stdout.write("")
sys.stdout.write(temp[i])
sys.stdout.write(" :: ")
count=0
while count==0:
m=0
i=i+1
sys.stdout.write(" ")
if temp[i]=="qq":
count=1
check=0
for m in values:
if temp[i]==keywords[m]:
sys.stdout.write("\n")
sys.stdout.write("\n")
sys.stdout.write("\n")
check=1
count=1
m=m+1
val=0
i=i-1
break
m=m+1
if check==0:
sys.stdout.write(temp[i])
else:
val=val+1
i=i+1
text.close()
rite.close()
| 38.573333 | 418 | 0.382993 | 298 | 2,893 | 3.718121 | 0.35906 | 0.105596 | 0.16426 | 0.064982 | 0.221119 | 0.183213 | 0.141697 | 0.083935 | 0.043321 | 0.043321 | 0 | 0.06142 | 0.45973 | 2,893 | 74 | 419 | 39.094595 | 0.647473 | 0.009333 | 0 | 0.40625 | 0 | 0 | 0.138356 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.046875 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7ede2a1e09a4811ae478f01a896fc0510aa9e87d | 360 | py | Python | HPOBenchExperimentUtils/core/__init__.py | PhMueller/TrajectoryParser | 9c19d37a3ff29a593c9b6d3e7fd3857e8c2d724f | [
"Apache-2.0"
] | null | null | null | HPOBenchExperimentUtils/core/__init__.py | PhMueller/TrajectoryParser | 9c19d37a3ff29a593c9b6d3e7fd3857e8c2d724f | [
"Apache-2.0"
] | 1 | 2021-09-01T16:35:21.000Z | 2021-11-05T19:53:25.000Z | HPOBenchExperimentUtils/core/__init__.py | automl/HPOBenchExperimentUtils | 9c19d37a3ff29a593c9b6d3e7fd3857e8c2d724f | [
"Apache-2.0"
] | null | null | null | WORKER_WAIT_FOR_SCHEDULER_TO_START_IN_S = 600
WORKER_WAIT_FOR_NAMESERVER_TO_START_IN_S = 300
SCHEDULER_PING_WORKERS_INTERVAL_IN_S = 10
SCHEDULER_TIMEOUT_WORKER_DISCOVERY_IN_S = 600
# See Explanation in HPOBenchExperimentUtils/__init__.py
try:
from HPOBenchExperimentUtils.optimizer.autogluon_optimizer import _obj_fct
except ModuleNotFoundError:
pass
| 32.727273 | 78 | 0.875 | 50 | 360 | 5.68 | 0.64 | 0.042254 | 0.091549 | 0.070423 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033742 | 0.094444 | 360 | 10 | 79 | 36 | 0.837423 | 0.15 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.125 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
7ee0306bca45832afc48ed42c697d3140fcc48e9 | 5,061 | py | Python | cron/main.py | opendatasoft/overpass-query-box | 8ba4fe876ff7959357ad780583b5b849d0ad62f5 | [
"MIT"
] | 3 | 2016-04-28T06:25:57.000Z | 2016-09-27T23:08:53.000Z | cron/main.py | opendatasoft/overpass-query-box | 8ba4fe876ff7959357ad780583b5b849d0ad62f5 | [
"MIT"
] | null | null | null | cron/main.py | opendatasoft/overpass-query-box | 8ba4fe876ff7959357ad780583b5b849d0ad62f5 | [
"MIT"
] | 4 | 2016-07-29T11:54:43.000Z | 2019-04-16T23:27:47.000Z | #!/usr/bin/env python
import sys
import os
import urllib
import requests
import shutil
import json
import datetime
from ftplib import FTP, error_perm, error_reply
API_URL = 'http://localhost/cgi-bin'
TIMEOUT_MINUTES = 15
HOURS_BEFORE_PROCESS = 24
if len(sys.argv) < 4:
sys.stderr.write('Usage : ./main.py FTP_SERVER FTP_USER FTP_PASSWORD\n')
sys.exit(1)
results_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'results')
requests_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'requests')
cron_history_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'cron_history')
cron_history = {}
lock_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '.lock')
ftp_server = sys.argv[1]
ftp_user = sys.argv[2]
ftp_password = sys.argv[3]
def date_to_ftp_timestamp(dt):
return dt.strftime('%Y%m%d%H%M%S')
def ftp_timestamp_to_date(timestamp):
return datetime.datetime.strptime(timestamp, '%Y%m%d%H%M%S')
if os.path.exists(lock_file_path):
print 'A lock file is present, the script is already running'
sys.exit()
fd = open(lock_file_path, 'a')
fd.close()
try:
if os.path.exists(results_directory):
shutil.rmtree(results_directory)
if os.path.exists(requests_directory):
shutil.rmtree(requests_directory)
if os.path.exists(cron_history_path):
with open(cron_history_path, 'r') as fd:
cron_history = json.load(fd)
os.makedirs(requests_directory)
os.makedirs(results_directory)
print 'Downloading requests...'
# Connect to the FTP server and change directory to /requests
ftp = FTP(ftp_server, user=ftp_user, passwd=ftp_password)
if 'requests' not in ftp.nlst():
ftp.mkd('requests')
ftp.cwd('requests')
# Download all files from the /requests directory
file_list = ftp.nlst()
for filename in file_list:
with open(os.path.join(requests_directory, filename), 'w+') as fd:
ftp.retrbinary('RETR ' + filename, fd.write)
print 'Processing requests and downloading responses...'
# Determine which requests to process
files_to_process = []
for filename in file_list:
do_process = False
res = ftp.sendcmd('MDTM %s' % filename)
last_modified = ftp_timestamp_to_date(res.split(' ')[1])
if cron_history.get(filename):
history_last_modified = cron_history[filename].get('last_modified')
if history_last_modified:
history_last_modified = ftp_timestamp_to_date(history_last_modified)
if history_last_modified != last_modified:
do_process = True
if not do_process:
history_last_processed = ftp_timestamp_to_date(cron_history[filename].get('last_processed'))
if history_last_processed:
diff = datetime.datetime.now() - history_last_processed
datetime.timedelta(0, 32400)
if diff.total_seconds() / 60 / 60 >= HOURS_BEFORE_PROCESS:
do_process = True
else:
do_process = True
else:
cron_history[filename] = {}
do_process = True
if do_process:
files_to_process.append(filename)
cron_history[filename]['last_modified'] = date_to_ftp_timestamp(last_modified)
cron_history[filename]['last_processed'] = date_to_ftp_timestamp(datetime.datetime.now())
with open(cron_history_path, 'w+') as fd:
json.dump(cron_history, fd)
# We close FTP because the request can take a while
ftp.close()
for filename in files_to_process:
with open(os.path.join(requests_directory, filename), 'r') as fd:
query = fd.read()
print("Processing %s..." % filename)
url = '%s/interpreter?data=%s' % (API_URL, urllib.quote(query, safe=''))
req = requests.get(url, timeout=(TIMEOUT_MINUTES * 60), stream=True)
req.raise_for_status()
with open(os.path.join(results_directory, filename), 'w+') as fd:
for block in req.iter_content(1024):
# fd.write(req.text.encode('utf-8'))
fd.write(block)
print("%s done." % filename)
print 'Download completed, starting upload on FTP...'
file_list = os.listdir(results_directory)
if file_list:
ftp = FTP(ftp_server, user=ftp_user, passwd=ftp_password)
for filename in file_list:
try:
ftp.delete(filename)
except (error_perm, error_reply) as e:
sys.stderr.write('Cannot delete file %s: %s\n' % (filename, e.message))
fd = open(os.path.join(results_directory, filename))
ftp.storbinary('STOR %s' % filename, fd)
fd.close()
print('File %s uploaded.' % filename)
ftp.close()
print("Upload completed.")
shutil.rmtree(requests_directory)
shutil.rmtree(results_directory)
finally:
os.remove(lock_file_path)
| 36.941606 | 108 | 0.648093 | 669 | 5,061 | 4.681614 | 0.26009 | 0.038314 | 0.025543 | 0.015326 | 0.287676 | 0.183908 | 0.139847 | 0.115581 | 0.088123 | 0.088123 | 0 | 0.007009 | 0.238886 | 5,061 | 136 | 109 | 37.213235 | 0.806075 | 0.0492 | 0 | 0.189189 | 0 | 0 | 0.10718 | 0.004579 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.036036 | 0.072072 | null | null | 0.072072 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.