hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
6b8b3dd970c2381dc6fab482c6f4e706bdbe8c46
25
py
Python
core_dev/datetime_/__init__.py
alexzanderr/_core-dev
831f69dad524e450c4243b1dd88f26de80e1d444
[ "MIT" ]
null
null
null
core_dev/datetime_/__init__.py
alexzanderr/_core-dev
831f69dad524e450c4243b1dd88f26de80e1d444
[ "MIT" ]
null
null
null
core_dev/datetime_/__init__.py
alexzanderr/_core-dev
831f69dad524e450c4243b1dd88f26de80e1d444
[ "MIT" ]
null
null
null
from .datetime_ import *
12.5
24
0.76
3
25
6
1
0
0
0
0
0
0
0
0
0
0
0
0.16
25
2
24
12.5
0.857143
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
6b8ba5e38a11991c07975fe01f68ff669ab459c0
811
py
Python
badx12/common/click.py
agaddis02/badX12
7362a4d9629e570be8cd3b42af5210cda39e0efc
[ "MIT" ]
null
null
null
badx12/common/click.py
agaddis02/badX12
7362a4d9629e570be8cd3b42af5210cda39e0efc
[ "MIT" ]
null
null
null
badx12/common/click.py
agaddis02/badX12
7362a4d9629e570be8cd3b42af5210cda39e0efc
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from collections import Iterable import click def add_commands(click_group, commands): if not isinstance(click_group, click.core.Group): raise TypeError( f"add_commands() expects click.core.Group for click_group, got {type(click_group)}" ) if not isinstance(commands, Iterable): raise TypeError( f"add_commands() expects an Iterable type for commands, got {type(commands)}" ) for command in commands: if not isinstance(command, click.core.Command) and not isinstance( command, click.core.Group ): raise TypeError( f"commands must be of type click.core.Command or click.core.Group, got {type(command)}" ) click_group.add_command(command)
30.037037
103
0.633785
99
811
5.10101
0.313131
0.106931
0.110891
0.091089
0.312871
0.215842
0
0
0
0
0
0.001698
0.273736
811
26
104
31.192308
0.855688
0.025894
0
0.157895
0
0.052632
0.30203
0
0
0
0
0
0
1
0.052632
false
0
0.105263
0
0.157895
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6b8bb153d1b8da0157b44ec53267ac54fdb21350
1,706
py
Python
snakebids/admin.py
LouFel/snakebids
6fdf64fb65471868b9cc9d55fd77654096c07686
[ "MIT" ]
null
null
null
snakebids/admin.py
LouFel/snakebids
6fdf64fb65471868b9cc9d55fd77654096c07686
[ "MIT" ]
2
2022-02-11T22:08:36.000Z
2022-02-11T22:08:47.000Z
snakebids/admin.py
kaitj/snakebids
f205ab279223e9f5bb87ba170fe29d8324bc7484
[ "MIT" ]
null
null
null
"""Script to generate a Snakebids project.""" import argparse import os from pathlib import Path from cookiecutter.main import cookiecutter import snakebids from snakebids.app import SnakeBidsApp from snakebids.cli import add_dynamic_args def create_app(_): cookiecutter(os.path.join(snakebids.__path__[0], "project_template")) def create_descriptor(args): # pylint: disable=unsubscriptable-object app = SnakeBidsApp(args.app_dir.resolve()) add_dynamic_args(app.parser, app.config["parse_args"], app.config["pybids_inputs"]) app.create_descriptor(args.out_path) print(f"Boutiques descriptor created at {args.out_path}") def gen_parser(): parser = argparse.ArgumentParser( description="Perform administrative Snakebids tasks." ) subparsers = parser.add_subparsers(required=True, dest="command") parser_create = subparsers.add_parser("create", help="Create a new Snakebids app.") parser_create.set_defaults(func=create_app) parser_boutiques = subparsers.add_parser( "boutiques", help="Create a Boutiques descriptor for an existing Snakebids app." ) parser_boutiques.add_argument( "out_path", help="Path for the output Boutiques descriptor. Should be a .json file.", type=Path, ) parser_boutiques.add_argument( "--app_dir", help="Location of the Snakebids app. Defaults to the current directory.", type=Path, default=".", ) parser_boutiques.set_defaults(func=create_descriptor) return parser def main(): """Invoke Cookiecutter on the Snakebids project template.""" parser = gen_parser() args = parser.parse_args() args.func(args)
28.433333
88
0.715709
209
1,706
5.660287
0.373206
0.063398
0.023669
0.035503
0
0
0
0
0
0
0
0.000718
0.184056
1,706
59
89
28.915254
0.849138
0.078546
0
0.097561
1
0
0.244715
0
0
0
0
0
0
1
0.097561
false
0
0.170732
0
0.292683
0.02439
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
6b8c93d8422ed7233811fd0f7290cad989fa723c
4,500
py
Python
estimateHR_unittest.py
RingoYen85/HeartRateMonitor
8bbecfd0969c1a91b8a5c3117869ac248c31cf17
[ "MIT" ]
null
null
null
estimateHR_unittest.py
RingoYen85/HeartRateMonitor
8bbecfd0969c1a91b8a5c3117869ac248c31cf17
[ "MIT" ]
null
null
null
estimateHR_unittest.py
RingoYen85/HeartRateMonitor
8bbecfd0969c1a91b8a5c3117869ac248c31cf17
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Fri Sep 23 21:54:45 2016 @author: ringoyen """ import numpy as np import unittest class tester(unittest.TestCase): def test_low_pass_filter(self): """ Unit Test for a moving averager (convolution) to serve as a low pass filter to remove noise :return: """ from estimateHeartRate import low_pass_filter import numpy as np # define inputs for function ecg_data_1 = np.array([1, 2, 3, 4, 5, 6, 7]) ecg_data_2 = np.array([2, 2, 2, 2, 2, 2, 2]) # define outputs for function ecg_clean_1 = np.array([1.2, 2., 3., 4., 5., 4.4, 3.6]) ecg_clean_2 = np.array([1.2, 1.6, 2., 2., 2., 1.6, 1.2]) # Place inputs in function clean_ecg_1 = low_pass_filter(ecg_data_1) clean_ecg_2 = low_pass_filter(ecg_data_2) # Unit Tests self.assertEquals(np.all(ecg_data_1),np.all(clean_ecg_1), msg = 'low pass filter does not work!') self.assertEquals(np.all(ecg_data_2), np.all(clean_ecg_2), msg='low pass filter does not work!') self.assertEquals(len(ecg_data_1),len(clean_ecg_1), msg = 'low pass filter gives wrong output length!') def test_remove_dc_offset(self): """ Unit Test for removing DC offset from data :return: """ from estimateHeartRate import remove_dc_offset import numpy as np # define inputs for function a = np.array([5.0,5.0,5.0,5.0]) b = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) c = np.array([3.45, 2.46, 4.32, 5.56]) # Define known output of function ecgKnown_1 = [0.0, 0.0, 0.0, 0.0] ecgKnown_2 = [-2.0, -1.0, 0.0, 1.0, 2.0] ecgKnown_3 = [-0.4975, -1.4875, 0.3725, 1.61] # Place values in function noOffset_1 = remove_dc_offset(a) noOffset_2 = remove_dc_offset(b) noOffset_3 = remove_dc_offset(c) self.assertEqual(np.all(ecgKnown_1), np.all(noOffset_1), msg ='ECG values incorrect') self.assertEqual(np.all(ecgKnown_2), np.all(noOffset_2), msg='ECG values incorrect') self.assertEqual(np.all(ecgKnown_3),np.all(noOffset_3), msg = 'ECG values incorrect') self.assertEqual(len(ecgKnown_1), len(noOffset_1)) self.assertEqual(len(ecgKnown_2), len(noOffset_2)) def test_findThreshold(self): """ This tests the ability of the function to determine the maximum value in a given data stream, and use it to determine the threshold value """ from estimateHeartRate import findThreshold # Define inputs for function a = np.linspace(1, 1000, 1000) f_s_1 = 100 f_s_2 = 200 # Define known output for function # should be 750, but changed to 800 to check git threshknown_1 = 425 threshknown_2 = 850 # Place values in function thresh_1 = findThreshold(a, f_s_1) thresh_2 = findThreshold(a, f_s_2) self.assertEqual(threshknown_1,thresh_1, msg='Threshold not correct') self.assertEqual(threshknown_2,thresh_2, msg='Threshold not correct') def test_find_instantaneous_heart_rate(self): """ This tests the ability of the function to determine the times and ECG values when there is an actual heartbeat occuring. This is supposed to happen at the peaks (max values) of the traces. There are 2 sets of test arrays I am using """ from estimateHeartRate import find_instantaneous_heart_rate #Define inputs vectors for function ecg_vals_1 = [1, 1, 1, 5, 6, 1, 1, 10, 1, 1, 11, 5] ecg_time_1 = [1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.] ecg_vals_2 =[1,4.5,1,1,1,6,1] ecg_time_2 =[1.,2.,3.,4.,5.,6.,7.] ecg_threshold = 4 # Define known outputs of function instant_hr_1 = [20.] instant_hr_2 = [15.] # Define function with inputs actual_hr_1 = find_instantaneous_heart_rate(ecg_vals_1,ecg_time_1,ecg_threshold) actual_hr_2 = find_instantaneous_heart_rate(ecg_vals_2,ecg_time_2,ecg_threshold) self.assertEqual(instant_hr_1,actual_hr_1, 'msg = instant hr is wrong!') self.assertEqual(instant_hr_2, actual_hr_2, 'msg = instant hr is wrong!') if __name__ == '__main__': unittest.main()
31.690141
112
0.607556
687
4,500
3.780204
0.234352
0.007701
0.040046
0.007701
0.284559
0.236812
0.165191
0.146323
0.109357
0.038506
0
0.077353
0.284667
4,500
142
113
31.690141
0.729419
0.223556
0
0.051724
0
0
0.079494
0
0
0
0
0
0.206897
1
0.068966
false
0.12069
0.137931
0
0.224138
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
6b8d70280f0cb0d3625c06ba1c69384706890a4a
2,602
py
Python
sequence/fastx_translate.py
shenwei356/bio_scripts
703cec8d21903516346e2aae4d77d23385c30905
[ "MIT" ]
94
2015-03-26T04:32:29.000Z
2022-03-22T13:44:11.000Z
sequence/fastx_translate.py
xinwang-bio/bio_scripts
64fda3a72ba14edf87952a809c3d52871f155cca
[ "MIT" ]
null
null
null
sequence/fastx_translate.py
xinwang-bio/bio_scripts
64fda3a72ba14edf87952a809c3d52871f155cca
[ "MIT" ]
70
2015-04-01T10:27:05.000Z
2021-11-08T01:46:39.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- # https://github.com/shenwei356/bio_scripts from __future__ import print_function import argparse import gzip import logging import os import re import sys from Bio import SeqIO from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord def parse_args(): parser = argparse.ArgumentParser(description="Translate DNA to peptide") parser.add_argument("-v", "--verbose", help='verbosely print information', action="count", default=0) group = parser.add_mutually_exclusive_group() group.add_argument("--stdin", action="store_true", help='read from stdin, one sequence per line') group.add_argument('-i', '--infile', type=str, help='file name should like this: infile.[fasta|fa|fastq|fq][.gz]') parser.add_argument('-f', '--format', type=str, # default='fasta', help='seqence format: fasta |fastq [fasta]') parser.add_argument('-t', '--table', type=int, default=1, help='genetic code table (detail: http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi ) [1]') args = parser.parse_args() if not (args.stdin or args.infile): sys.stderr.write("option --stdin or -i should be given\n") sys.exit(1) if args.format and not args.format in ['fasta', 'fastq']: sys.stderr.write("option -f | --format should be 'fasta' or 'fastq'\n") sys.exit(1) if args.stdin and not args.format: sys.stderr.write("option -f | --format should be given when --stdin is set.\n") sys.exit(1) return args if __name__ == '__main__': args = parse_args() file, seq_format, fh = args.infile, args.format, None, if file: if not seq_format: found = re.search(r'(?i)(fasta|fa|fastq|fq)(.gz)?$', file) if not found: print("invalid file name suffix.\nfile name should like this: infile.[fasfa|fa|fastq|fq][.gz]", file=sys.stderr) sys.exit(1) seq_format, is_gz = found.groups() if seq_format == 'fa': seq_format = 'fasta' if seq_format == 'fq': seq_format = 'fastq' fh = gzip.open(file, 'rt') if file.endswith('.gz') else open(file, 'r') else: fh = sys.stdin seq_format = args.format for seq in SeqIO.parse(fh, seq_format): SeqIO.write([SeqRecord(seq.seq.translate(table=args.table), id=seq.id, description=seq.description)], sys.stdout, 'fasta') fh.close()
35.162162
130
0.601076
350
2,602
4.365714
0.365714
0.05301
0.020942
0.021597
0.130236
0.065445
0.045812
0.045812
0
0
0
0.005691
0.25711
2,602
73
131
35.643836
0.78479
0.038816
0
0.071429
0
0.035714
0.257509
0.036844
0
0
0
0
0
1
0.017857
false
0
0.178571
0
0.214286
0.071429
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6b8db4952ad5c55aa4611431b1886cf54a31f84a
682
py
Python
Assignment 3. Paxos/Simulation/Network/Network.py
WailAbou/Distributed-Processing
46a36f1fd51d6f8b35cc639eb8002d81d7e09f2b
[ "MIT" ]
null
null
null
Assignment 3. Paxos/Simulation/Network/Network.py
WailAbou/Distributed-Processing
46a36f1fd51d6f8b35cc639eb8002d81d7e09f2b
[ "MIT" ]
null
null
null
Assignment 3. Paxos/Simulation/Network/Network.py
WailAbou/Distributed-Processing
46a36f1fd51d6f8b35cc639eb8002d81d7e09f2b
[ "MIT" ]
null
null
null
from Simulation.Network.Actions import send_message class Network: def __init__(self, proposers, acceptors): self.queue = [] self.proposers = proposers self.acceptors = acceptors def queue_message(self, message): self.queue.append(message) def extract_message(self): for message in self.queue: if not message.destination.failed and not message.source.failed: self.queue.remove(message) return message def deliver_messsage(self, message): print(f'{message.source} -> {message.destination} {message}', end='') send_message[message.message_type](self, message)
27.28
77
0.651026
76
682
5.710526
0.421053
0.082949
0
0
0
0
0
0
0
0
0
0
0.252199
682
24
78
28.416667
0.85098
0
0
0
0
0
0.07478
0.030792
0
0
0
0
0
1
0.25
false
0
0.0625
0
0.4375
0.0625
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
6b8defc3a09a8d03e2adb7a13517e975cbd888f3
697
py
Python
test/test_db_matches_ui.py
ProOksana/python_training
2174f08ebd52905bf28e7b32baf1b1401a05f79e
[ "Apache-2.0" ]
null
null
null
test/test_db_matches_ui.py
ProOksana/python_training
2174f08ebd52905bf28e7b32baf1b1401a05f79e
[ "Apache-2.0" ]
null
null
null
test/test_db_matches_ui.py
ProOksana/python_training
2174f08ebd52905bf28e7b32baf1b1401a05f79e
[ "Apache-2.0" ]
null
null
null
from model.contact import Contacts def test_contacts_list(app, db): ui_list = app.contact.get_contacts_list() def clean(contact): return (Contacts(id=id, firstname=contact.firstname, middlename=contact.middlename, lastname=contact.lastname, nickname=contact.nickname, company=contact.company, title=contact.title, address=contact.address, homephone=contact.homephone, mobilephone=contact.mobilephone, workphone=contact.workphone, email=contact.email, email2=contact.email2, email3=contact.email3)) db_list = db.get_contacts_list() assert sorted(ui_list, key=Contacts.id_or_max) == sorted(db_list, key=Contacts.id_or_max)
58.083333
186
0.734577
87
697
5.724138
0.37931
0.072289
0.060241
0.068273
0.088353
0.088353
0
0
0
0
0
0.006838
0.160689
697
12
187
58.083333
0.844444
0
0
0
0
0
0
0
0
0
0
0
0.111111
1
0.222222
false
0
0.111111
0.111111
0.444444
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
2
6b904ccd419a7dbc39df362dd10c4cbe6f6a6b80
859
py
Python
DjangoBlog/account/models.py
marasiali/Django-bog
faac81275d27b501dda0be32c82200c80941b7de
[ "MIT" ]
null
null
null
DjangoBlog/account/models.py
marasiali/Django-bog
faac81275d27b501dda0be32c82200c80941b7de
[ "MIT" ]
null
null
null
DjangoBlog/account/models.py
marasiali/Django-bog
faac81275d27b501dda0be32c82200c80941b7de
[ "MIT" ]
null
null
null
from django.db import models from django.utils import timezone from django.contrib.auth.models import AbstractUser class User(AbstractUser): is_author = models.BooleanField(default=False, verbose_name='نویسنده', help_text='نشان میدهد که آیا این کاربر میتواند مطلب ارسال کند یا خیر.') premium_date = models.DateTimeField(default=timezone.now, verbose_name='پایان اشتراک') class Meta: verbose_name = 'کاربر' verbose_name_plural = 'کاربران' def __str__(self): return self.username def premium_days_remaining(self): return (self.premium_date - timezone.now()).days premium_days_remaining.short_description = 'مدت زمان باقیمانده از اشتراک ویژه' def has_premium(self): return self.premium_date > timezone.now() has_premium.boolean = True has_premium.short_description = 'اشتراک ویژه'
35.791667
146
0.735739
112
859
5.4375
0.535714
0.07225
0.068966
0.068966
0.118227
0.118227
0.118227
0
0
0
0
0
0.180442
859
24
147
35.791667
0.865057
0
0
0
0
0
0.154651
0
0
0
0
0
0
1
0.166667
false
0
0.166667
0.166667
0.722222
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
1
0
0
2
6b90cf31e44e1e7e27466082bc57a29d138e03cf
7,345
py
Python
pyfilter/test/test_filter.py
zkscpqm/pyfilter
39c284681ec6f377059907b75346028d99cbdd4c
[ "MIT" ]
null
null
null
pyfilter/test/test_filter.py
zkscpqm/pyfilter
39c284681ec6f377059907b75346028d99cbdd4c
[ "MIT" ]
1
2021-04-28T18:40:13.000Z
2021-04-28T18:40:13.000Z
pyfilter/test/test_filter.py
zkscpqm/pyfilter
39c284681ec6f377059907b75346028d99cbdd4c
[ "MIT" ]
null
null
null
import unittest import os from typing import Any, Text, NoReturn, Set, Union from parameterized import parameterized from pyfilter import FilterContext from pyfilter import TextFilter class TestFilter(unittest.TestCase): def setUp(self) -> Any: self.any_inclusion_keywords: Set[Text] = {'dog', 'cat'} self.all_inclusion_keywords: Set[Text] = {'plane', 'car'} self.exclusion_keywords: Set[Text] = {'red', 'grassy'} self.regex_string: Text = '^[A-Za-z]' self.filter: TextFilter = TextFilter.new_filter( any_inclusion_keywords=self.any_inclusion_keywords, all_inclusion_keywords=self.all_inclusion_keywords, exclusion_keywords=self.exclusion_keywords, regex_string=self.regex_string ) def test_init(self) -> NoReturn: self.assertEqual(self.filter.any_inclusion_filter.keywords, list(self.any_inclusion_keywords), 'The any_inclusion_keywords are different than the expected LIST of STRINGS of input data') self.assertEqual(self.filter.all_inclusion_filter.keywords, list(self.all_inclusion_keywords), 'The all_inclusion_keywords are different than the expected LIST of STRINGS of input data') self.assertEqual(self.filter.exclusion_filter.keywords, list(self.exclusion_keywords), 'The exclusion_keywords are different than the expected LIST of STRINGS of input data') self.assertEqual(self.filter.regex_filter.regex.pattern, self.regex_string, 'The regex pattern is different than expected') expected_default_context = FilterContext(casefold=True) self.assertEqual(self.filter.default_context, expected_default_context, 'The default context is different from the expected (casefold=True)') def test_update_keywords(self) -> NoReturn: new_any_inclusion_keywords = [] new_all_inclusion_keywords = [] new_exclusion_keywords = [] self.filter.update_keywords( any_inclusion_keywords=new_any_inclusion_keywords, all_inclusion_keywords=new_all_inclusion_keywords, exclusion_keywords=new_exclusion_keywords ) self.assertEqual(self.filter.any_inclusion_filter.keywords, list(self.any_inclusion_keywords) + list(new_any_inclusion_keywords), 'Incorrect any_inclusion_keywords after keyword update') self.assertEqual(self.filter.all_inclusion_filter.keywords, list(self.all_inclusion_keywords) + list(new_all_inclusion_keywords), 'Incorrect all_inclusion_keywords after keyword update') self.assertEqual(self.filter.exclusion_filter.keywords, list(self.exclusion_keywords) + list(new_exclusion_keywords), 'Incorrect exclusion_keywords after keyword update') @parameterized.expand([(['new_exclusion', 'kw'],), (None,)]) def test_set_keywords(self, new_exclusion_keywords: Union[Text, None]): new_any_inclusion_keywords = ['new', 'keywords'] new_all_inclusion_keywords = [] new_regex_str = r'[A-Za-z0-9]' self.filter.set_keywords( any_inclusion_keywords=new_any_inclusion_keywords, all_inclusion_keywords=new_all_inclusion_keywords, exclusion_keywords=new_exclusion_keywords, regex_string=new_regex_str ) self.assertEqual(self.filter.any_inclusion_filter.keywords, new_any_inclusion_keywords, 'Incorrect any_inclusion_keywords after replacing keywords') self.assertEqual(self.filter.all_inclusion_filter.keywords, [], 'Incorrect all_inclusion_keywords after replacing keywords') self.assertEqual(self.filter.exclusion_filter.keywords, new_exclusion_keywords or list(self.exclusion_keywords), 'Incorrect exclusion_keywords after replacing keywords') self.assertEqual(self.filter.regex_filter.regex.pattern, new_regex_str, 'Failed to set new regex pattern') def test_delete_keywords(self) -> NoReturn: any_inclusion_keywords_to_delete = ['dog'] all_inclusion_keywords_to_delete = ['nonexistent'] self.filter.delete_keywords( any_inclusion_keywords=any_inclusion_keywords_to_delete, all_inclusion_keywords=all_inclusion_keywords_to_delete, clear_regex=True ) self.assertEqual(self.filter.any_inclusion_filter.keywords, ['cat'], 'Incorrect any_inclusion_keywords after deleting keywords') self.assertEqual(self.filter.all_inclusion_filter.keywords, list(self.all_inclusion_keywords), 'Incorrect all_inclusion_keywords after deleting keywords') self.assertEqual(self.filter.exclusion_filter.keywords, list(self.exclusion_keywords), 'Incorrect exclusion_keywords after deleting keywords') self.assertEqual(self.filter.regex_filter.regex, None, 'Failed to delete regex pattern') @parameterized.expand([("Planes and cars don't allow dogs", True, False), ("Dogs and cats but not the other keywords", False, False), ("Well we have a cat in the car but on on the red plane", False, False), ("The plane carries cats and cars", True, True), ("Just a car and a plane but no pets", False, False), ('123regex fail filter plane cats cars', False, False)]) def test_singular_filter(self, input_string: Text, expected_with_casefold: bool, expected_without_casefold: bool): self.assertEqual(self.filter.filter(input_string, casefold=True), expected_with_casefold) self.assertEqual(self.filter.filter(input_string, casefold=False), expected_without_casefold) def test_multi_filter(self): input_list = ['cat plane car', 'dog cat', 'cat plane car grassy', ''] result = self.filter.multi_filter(input_list) expected_result = ['cat plane car'] self.assertEqual(result, expected_result) @parameterized.expand([('passing_file.txt', True, True), ('casefold_passing_file.txt', True, False), ('failing_file_1.txt', False, False), ('failing_file_2.txt', False, False), ('failing_file_3.txt', False, False)]) def test_file_filter(self, filename: Text, expected_with_casefold: bool, expected_without_casefold: bool): fp = os.path.join('test_files', filename) for casefold in (True, False): for safe in (True, False): result = self.filter.file_filter(fp, safe=safe, casefold=casefold) expected = expected_with_casefold if casefold else expected_without_casefold self.assertEqual(result, expected)
53.224638
116
0.644384
798
7,345
5.659148
0.150376
0.150576
0.088574
0.099646
0.568202
0.499114
0.45992
0.45992
0.348981
0.242693
0
0.001503
0.275425
7,345
137
117
53.613139
0.84705
0
0
0.144068
0
0
0.188913
0.027377
0
0
0
0
0.169492
1
0.067797
false
0.016949
0.050847
0
0.127119
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6b90ec08ade0528601d7d5711be814ac665fc15d
1,694
py
Python
song_bond.py
skullydazed/PyRoombaAdapter
88824e91b0f0cec008b16cfaa5a62a6546f1226e
[ "MIT" ]
null
null
null
song_bond.py
skullydazed/PyRoombaAdapter
88824e91b0f0cec008b16cfaa5a62a6546f1226e
[ "MIT" ]
null
null
null
song_bond.py
skullydazed/PyRoombaAdapter
88824e91b0f0cec008b16cfaa5a62a6546f1226e
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 """James Bond Theme, Composed by Monty Norman Adapted for Roomba by Zach White """ from time import sleep from pyroombaadapter import PyRoombaAdapter notes0 = ( # note, duration 'B3', '1/2', 'C4', '1/2', 'C#4', '1/2', 'C4', '1/2', 'B3', '1/2', 'C4', '1/2', 'C#4', '1/2', 'C4', '1/2', ) notes1 = ( # 3 times 'E4', '1/8', 'F#4', '1/16', 'F#4', '1/16', 'F#4', '1/8', 'F#4', '1/4', 'E4', '1/8', 'E4', '1/8', 'E4', '1/8', 'E4', '1/8', 'G4', '1/16', 'G4', '1/16', 'G4', '1/8', 'G4', '1/4', 'F#4', '1/8', 'F#4', '1/8', 'F#4', '1/8', ) notes2 = ( 'D#5', '1/8', 'D5', '1/2', 'B4', '1/8', 'A4', '1/8', 'B4', '1' ) notes3 = ( 'E4', '1/8', 'G4', '1/4', 'D#5', '1/8', 'D5', 'd1/4', 'G4', '1/8', 'A#4', '1/8', 'B4', 'd1/2', 'G4', '1/4', 'A4', '1/16', 'G4', '1/16', 'F#4', 'd1/4', 'B3', '1/8', 'E4', '1/8', 'C#4', '1' ) PORT = "/dev/ttyUSB0" roomba = PyRoombaAdapter(PORT) roomba.change_mode_to_full() print('Sending song') roomba.send_song_cmd(0, notes0, 5) roomba.send_song_cmd(1, notes1, 5) roomba.send_song_cmd(2, notes2, 5) roomba.send_song_cmd(3, notes3, 5) for song_num in 0, 1, 1, 1, 2, 1, 1, 1, 2, 0, 3, 3, 0, 2: print('Playing segment', song_num) roomba.send_play_cmd(song_num) # Wait for it to start playing while roomba.readings['song_playing'] != 1: sleep(0.05) print('Segment Started') # Wait for it to stop playing while roomba.readings['song_playing'] != 0: sleep(0.005) print('Segment Ended') print('Song Ended') roomba.turn_off_power()
18.822222
57
0.485832
288
1,694
2.788194
0.270833
0.047323
0.034869
0.024907
0.32254
0.188045
0.09589
0.07472
0.059776
0.039851
0
0.13922
0.257969
1,694
89
58
19.033708
0.499602
0.106257
0
0.430556
0
0
0.222888
0
0
0
0
0
0
1
0
false
0
0.027778
0
0.027778
0.069444
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
6b91473adaa0d54ec3df16837118bb8c64722382
227
py
Python
users/views.py
digitaloxford/do-wagtail
49dd75b95109ebb38bf66aca13d3fdeb8e25d319
[ "MIT" ]
2
2021-04-11T11:59:51.000Z
2021-04-12T06:56:23.000Z
users/views.py
digitaloxford/do-wagtail
49dd75b95109ebb38bf66aca13d3fdeb8e25d319
[ "MIT" ]
8
2021-04-10T10:40:27.000Z
2022-01-25T16:32:22.000Z
users/views.py
digitaloxford/do-wagtail
49dd75b95109ebb38bf66aca13d3fdeb8e25d319
[ "MIT" ]
null
null
null
from django.contrib.auth.decorators import login_required from django.shortcuts import render from django.views import generic from . import models def profile_view(request): return render(request, "users/profile.html")
22.7
57
0.806167
31
227
5.83871
0.645161
0.165746
0
0
0
0
0
0
0
0
0
0
0.123348
227
9
58
25.222222
0.909548
0
0
0
0
0
0.079295
0
0
0
0
0
0
1
0.166667
false
0
0.666667
0.166667
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
4
6b934399f266b4f9c4b7a0180b20fcdaa634e13b
4,263
py
Python
tests/conftest.py
Dimfred/imxpy
289a67fa51ef7b33ee106a65ad69340d07c986b3
[ "MIT" ]
13
2021-12-11T11:52:32.000Z
2022-03-11T12:58:56.000Z
tests/conftest.py
Dimfred/imxpy
289a67fa51ef7b33ee106a65ad69340d07c986b3
[ "MIT" ]
1
2021-12-19T19:15:29.000Z
2021-12-26T14:09:16.000Z
tests/conftest.py
Dimfred/imxpy
289a67fa51ef7b33ee106a65ad69340d07c986b3
[ "MIT" ]
1
2022-01-10T15:01:04.000Z
2022-01-10T15:01:04.000Z
from pathlib import Path import sys import time # add parent dir of imxpy sys.path.insert(0, str(Path(__file__).parent.parent.parent)) from easydict import EasyDict as edict import pytest from imx_client import IMXClient from imx_objects import * def random_number(): import random return random.randint(0, 100000000000000000000000000000000000) @pytest.fixture def random_str(): return str(random_number()) @pytest.fixture def acc1(): acc = edict() acc.pk = "4c4b2554e43b374f4cafdd5adaeea5e9aff9b3be54d329bc939752bb747294b9" acc.addr = "0x77406103701907051070fc029e0a90d5be82f76c" return acc @pytest.fixture def acc2(): acc = edict() acc.pk = "ac5d52cc7f75e293ecf2a95f3fafef23c9f5345b4a434ed5bacffccbdbe944fd" acc.addr = "0xea047d1919b732a4b9b12337a60876536f4f2659" return acc @pytest.fixture def acc3(): acc = edict() acc.pk = "bfde975ea5aa3779c7e2f2aade7c2a594b53e32ee23a2ae395927ec5fce4aa4b" acc.addr = "0xd5f5ad7968147c2e198ddbc40868cb1c6f059c6d" return acc @pytest.fixture def one_eth(): return 1_000_000_000_000_000_000 @pytest.fixture def half_eth(one_eth): return one_eth // 2 @pytest.fixture(scope="function") def client(acc1): return IMXClient("test", pk=acc1.pk) @pytest.fixture(scope="function") def mainnet_client(): return IMXClient("main") @pytest.fixture(scope="function") def client2(acc2): return IMXClient("test", pk=acc2.pk) @pytest.fixture(scope="function") def project_id(client, acc1): params = CreateProjectParams( name="test_proj", company_name="test_company", contact_email="test@test.com" ) res = client.create_project(params) res = res.result() return res["result"]["id"] @pytest.fixture(scope="function") def random_addr(): import random allowed = "abcdef0123456789" addr = f"0x{''.join(random.choice(allowed) for _ in range(40))}" return addr @pytest.fixture def contract_addr(): return "0xb72d1aa092cf5b3b50dabb55bdab0f33dfab37b7" @pytest.fixture def unregistered_contract_addr(): return "0xb55016be31047c16c951612f3b0f7c5f92f1faf5" @pytest.fixture(scope="function") def token_id(client2, acc1, acc2, contract_addr): _token_id = 0 yield _token_id params = TransferParams( sender=acc2.addr, receiver=acc1.addr, token=ERC721(token_id=_token_id, contract_addr=contract_addr), ) client2.transfer(params) def mint_params(contract_addr, id_, addr): params = MintParams( contract_addr=contract_addr, targets=[ MintTarget( addr=addr, tokens=[ MintableToken( id=id_, blueprint=str(id_), ), ], ), ], ) return params @pytest.fixture(scope="function") def minted_nft_id(client, acc1, contract_addr): token_id = random_number() params = mint_params(contract_addr, token_id, acc1.addr) res = client.mint(params) res = res.result() # wait until the database has applied the state time.sleep(2) return token_id @pytest.fixture(scope="function") def valid_order_params(client, client2, acc2, contract_addr): # client1 is in control of the sc therefore he mints to acc2 token_id = random_number() params = mint_params(contract_addr, token_id, acc2.addr) res = client.mint(params) time.sleep(2) # client2 now has the nft and can create the order which client1 will buy params = CreateOrderParams( sender=acc2.addr, token_sell=ERC721(token_id=token_id, contract_addr=contract_addr), token_buy=ETH(quantity="0.000001"), ) res = client2.create_order(params) res = res.result() time.sleep(2) return (res["result"]["order_id"], token_id) @pytest.fixture def unregistered_addr(): return "0xd2Bf8229D98716abEA9D22453C5C5613078B2c46" @pytest.fixture def erc20_contract_addr(): return "0x4c04c39fb6d2b356ae8b06c47843576e32a1963e" @pytest.fixture def gods_unchained_addr(): return "0xacb3c6a43d15b907e8433077b6d38ae40936fe2c" @pytest.fixture def gods_addr(): return "0xccc8cb5229b0ac8069c51fd58367fd1e622afd97"
21.861538
84
0.697162
475
4,263
6.086316
0.277895
0.089934
0.066413
0.071947
0.198893
0.110688
0.067797
0.067797
0.067797
0.037357
0
0.132842
0.203612
4,263
194
85
21.974227
0.718704
0.046915
0
0.34375
0
0
0.192213
0.148595
0
0
0.093149
0
0
1
0.171875
false
0
0.070313
0.09375
0.40625
0.007813
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6b93b639b40dce2081a3f1509a8777312189b2f5
73
py
Python
airbyte-integrations/connectors/source-twilio-singer/source_twilio_singer/__init__.py
rajatariya21/airbyte
11e70a7a96e2682b479afbe6f709b9a5fe9c4a8d
[ "MIT" ]
2
2021-03-02T09:17:41.000Z
2021-03-02T11:02:23.000Z
airbyte-integrations/connectors/source-twilio-singer/source_twilio_singer/__init__.py
rajatariya21/airbyte
11e70a7a96e2682b479afbe6f709b9a5fe9c4a8d
[ "MIT" ]
4
2021-09-01T00:33:50.000Z
2022-02-27T16:13:46.000Z
airbyte-integrations/connectors/source-twilio-singer/source_twilio_singer/__init__.py
rajatariya21/airbyte
11e70a7a96e2682b479afbe6f709b9a5fe9c4a8d
[ "MIT" ]
1
2021-07-02T15:08:53.000Z
2021-07-02T15:08:53.000Z
from .source import SourceTwilioSinger __all__ = ["SourceTwilioSinger"]
18.25
38
0.808219
6
73
9.166667
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.109589
73
3
39
24.333333
0.846154
0
0
0
0
0
0.246575
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
6b93c4cb81d74f0acde8f0cc82aaa2e1626a119a
1,088
py
Python
off_policy_rl/off_policy_rl/utils/epoch.py
dti-research/ur-learning-shifting-for-grasping
2dfecf6b2dbe67b65af00fc0ae5f73be2cb8a801
[ "BSD-3-Clause" ]
1
2021-04-12T07:04:26.000Z
2021-04-12T07:04:26.000Z
off_policy_rl/off_policy_rl/utils/epoch.py
dti-research/ur-learning-shifting-for-grasping
2dfecf6b2dbe67b65af00fc0ae5f73be2cb8a801
[ "BSD-3-Clause" ]
1
2021-11-10T15:51:15.000Z
2021-11-10T15:51:15.000Z
off_policy_rl/off_policy_rl/utils/epoch.py
dti-research/ur-learning-shifting-for-grasping
2dfecf6b2dbe67b65af00fc0ae5f73be2cb8a801
[ "BSD-3-Clause" ]
null
null
null
from typing import List import numpy as np from off_policy_rl.utils.selection_method import SelectionMethod class Epoch: def __init__( self, number_episodes: int, selection_methods: List[SelectionMethod], probabilities: List[float] = None ): self.number_episodes = number_episodes if len(selection_methods) > 1 and len(selection_methods) != len(probabilities): raise AssertionError("The number of Selection Methods must match the number of probabilities") self.selection_methods = selection_methods if probabilities is not None: if sum(probabilities) != 1.0: raise AssertionError("The list of probabilities must add to 1.0" " (current sum: {})".format(sum(probabilities))) self.probabilities = probabilities def get_selection_method(self) -> SelectionMethod: if len(SelectionMethod) > 1: return np.random.choice(self.selection_methods, p=self.probabilities) return self.selection_methods[-1]
35.096774
106
0.661765
121
1,088
5.793388
0.404959
0.182596
0.085592
0
0
0
0
0
0
0
0
0.008728
0.262868
1,088
30
107
36.266667
0.865337
0
0
0
0
0
0.118566
0
0
0
0
0
0.086957
1
0.086957
false
0
0.130435
0
0.347826
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6b940f509f4451581eb06a07caf3cae0277d933c
306
py
Python
nodestack/settings_example.py
chriscslaughter/nodestack
1905d5fd7d0e208287ceb6016852ada6d32b0984
[ "MIT" ]
null
null
null
nodestack/settings_example.py
chriscslaughter/nodestack
1905d5fd7d0e208287ceb6016852ada6d32b0984
[ "MIT" ]
3
2020-02-11T23:15:56.000Z
2021-06-10T20:52:54.000Z
nodestack/settings_example.py
chriscslaughter/nodestack
1905d5fd7d0e208287ceb6016852ada6d32b0984
[ "MIT" ]
null
null
null
DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': '', 'USER': '', 'PASSWORD': '', 'HOST': '', 'PORT': 5432 } } REDIS_HOST = '18.215.184.182' REDIS_PORT = 6379 REDIS_PASSWORD = 'vwTe6yLaw5hX9dIdbfW0' REDIS_DB = 0
19.125
59
0.53268
29
306
5.448276
0.758621
0
0
0
0
0
0
0
0
0
0
0.114679
0.287582
306
15
60
20.4
0.610092
0
0
0
0
0
0.356209
0.124183
0
0
0
0
0
1
0
false
0.142857
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
2
6b947dc36dc1850a2e02965bc2a02ae7eeca3cad
11,761
py
Python
model/deterministic_decoder.py
illc-uva/deep-generative-lm
c65bdf9d72e7d9d4e02576b1e84bce623725a0cd
[ "MIT" ]
26
2019-04-18T13:07:34.000Z
2021-03-24T11:55:26.000Z
model/deterministic_decoder.py
illc-uva/deep-generative-lm
c65bdf9d72e7d9d4e02576b1e84bce623725a0cd
[ "MIT" ]
null
null
null
model/deterministic_decoder.py
illc-uva/deep-generative-lm
c65bdf9d72e7d9d4e02576b1e84bce623725a0cd
[ "MIT" ]
9
2019-04-18T23:00:46.000Z
2021-09-23T15:34:56.000Z
""" A deterministic decoder. """ import numpy as np import sys import os.path as osp from collections import defaultdict from warnings import warn import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence # We include the path of the toplevel package in the system path so we can always use absolute imports within the package. toplevel_path = osp.abspath(osp.join(osp.dirname(__file__), '..')) if toplevel_path not in sys.path: sys.path.insert(1, toplevel_path) from model.base_decoder import BaseDecoder # noqa: E402 from util.error import InvalidArgumentError # noqa: E402 __author__ = "Tom Pelsmaeker" __copyright__ = "Copyright 2018" class DeterministicDecoder(BaseDecoder): """A deterministic decoder, i.e. a RNN with next-word prediction objective. Args: device(torch.device): the device (cpu/gpu) on which the model resides. seq_len(int): maximum length of sequences passed to the model. kl_step(int): step size of linear kl weight increment during training of the model. word_p(float): probability of dropping a word, i.e. mapping it to <unk>, before decoding. parameter_p(float): probability of dropping a row in the weight layers, using Gal's dropout on non-rec layers. var_mask(boolean): whether to use a different parameter dropout mask at every timestep. unk_index(int): index of the <unk> token in a one-hot representation. css(boolean): whether to use CSS softmax approximation. N(int): number of sequences in the dataset, for the regularization weight. rnn_type(str): which RNN to use. [GRU, LSTM] are supported. v_dim(int): size of the vocabulary. x_dim(int): size of input embeddings. h_dim(int): size of hidden layers of the RNN. l_dim(int): number of layers of the RNN. """ def __init__(self, device, seq_len, word_p, parameter_p, drop_type, unk_index, css, sparse, N, rnn_type, tie_in_out, v_dim, x_dim, h_dim, s_dim, l_dim): super(DeterministicDecoder, self).__init__(device, seq_len, word_p, parameter_p, drop_type, unk_index, css, N, rnn_type, v_dim, x_dim, h_dim, s_dim, l_dim) self.tie_in_out = tie_in_out # The model embeds words and passes them through the RNN to get a probability of next words. self.emb = nn.Embedding(v_dim, x_dim, sparse=bool(sparse)) # We currently support GRU and LSTM type RNNs if rnn_type == "GRU": if self.drop_type in ["varied", "shared"]: # Varied and shared dropout modes only drop input and output layer. Shared shares between timesteps. self.grnn = nn.GRU(x_dim, h_dim, l_dim, batch_first=True) else: self.grnn = nn.ModuleList([nn.GRUCell(x_dim, h_dim, 1)]) self.grnn.extend([nn.GRUCell(h_dim, h_dim, 1) for _ in range(l_dim - 1)]) elif rnn_type == "LSTM": if self.drop_type in ["varied", "shared"]: self.grnn = nn.LSTM(x_dim, h_dim, l_dim, batch_first=True) else: self.grnn = nn.ModuleList([nn.LSTMCell(x_dim, h_dim, 1)]) self.grnn.extend([nn.LSTMCell(h_dim, h_dim, 1) for _ in range(l_dim - 1)]) self.linear = nn.Linear(h_dim, v_dim) @property def linear(self): return self._linear @linear.setter def linear(self, val): self._linear = val if self.tie_in_out: if self.h_dim != self.x_dim: raise InvalidArgumentError("h_dim should match x_dim when tying weights.") self._linear.weight = self.emb.weight def forward(self, data, log_likelihood=False, extensive=False): """Forward pass through the decoder which returns a loss and prediction. Args: data(list of torch.Tensor): a batch of datapoints, containing at least a tensor of sequences and optionally tensors with length information and a mask as well, given variable length sequences. Returns: losses(dict of torch.FloatTensor): computed losses, averaged over the batch, summed over the sequence. pred(torch.LongTensor): most probable sequences given the data, as predicted by the model. """ x_in, x_len, x_mask = self._unpack_data(data, 3) losses = defaultdict(lambda: torch.tensor(0., device=self.device)) # Before decoding, we map a fraction of words to <UNK>, weakening the Decoder self.word_dropout.sample_mask(self.word_p, x_in.shape) x_dropped = x_in.clone() x_dropped[self.word_dropout._mask == 0] = self.unk_index x = self.emb(x_dropped[:, :-1]) scores = self._rnn_forward(x, x_len) # Compute loss, averaged over the batch, but summed over the sequence if self.css and self.training: loss = self._css(scores, x_in[:, 1:]) else: loss = self.reconstruction_loss(scores.contiguous().view( [-1, scores.shape[2]]), x_in[:, 1:].contiguous().view([-1])).view(scores.shape[0], scores.shape[1]) if x_len is not None: # If we had padded sequences as input, we need to mask the padding from the loss losses["NLL"] = torch.sum(torch.mean(loss * x_mask[:, 1:], 0)) else: losses["NLL"] = torch.sum(torch.mean(loss, 0)) # We also return the predictions, i.e. the most probable token per position in the sequences pred = torch.max(scores.detach(), dim=2)[1] # We use L2-regularization scaled by dropout on the network layers (Gal, 2015) losses["L2"] = self._l2_regularization() if log_likelihood: losses["NLL"] = losses["NLL"].unsqueeze(0) if extensive: return losses, pred, x.new_tensor([[1, 1]]), x.new_tensor([[1, 1]]), x.new_tensor([[1, 1]]), \ x.new_tensor([[1, 1]]), x.new_tensor([[1]]), x.new_tensor([[1]]) else: return losses, pred def _rnn_forward(self, x, x_len): """Recurrent part of the forward pass. Decides between fast or slow based on the dropout type.""" # Drop rows of the input shape = torch.Size(x.shape) if self.var_mask else torch.Size([x.shape[0], 1, self.x_dim]) h = self.parameter_dropout_in(x, self.parameter_p, shape=shape) # We have to run a (slow) for loop to use recurrent dropout if self.drop_type == "recurrent": # Sample fixed dropout masks for every timestep shape = torch.Size([x.shape[0], int(self.h_dim/self.l_dim)]) for i in range(self.l_dim): self.parameter_dropout_hidden[i].sample_mask(self.parameter_p, shape) self.parameter_dropout_out[i].sample_mask(self.parameter_p, shape) if self.rnn_type == "LSTM": self.parameter_dropout_context[i].sample_mask(self.parameter_p, shape) # Forward passing with application of dropout scores = [] if self.rnn_type == "GRU": h_p = list(torch.unbind(self._init_hidden(x.shape[0]))) else: h_p = list(torch.unbind(self._init_hidden(x.shape[0]))) c_p = list(torch.unbind(self._init_hidden(x.shape[0]))) for j in range(x.shape[1]): h_j = h[:, j, :] for i, grnn in enumerate(self.grnn): if self.rnn_type == "GRU": h_j = grnn(h_j, h_p[i]) h_p[i] = self.parameter_dropout_hidden[i].apply_mask(h_j) else: h_j, c_j = grnn(h_j, (h_p[i], c_p[i])) h_p[i] = self.parameter_dropout_hidden[i].apply_mask(h_j) c_p[i] = self.parameter_dropout_context[i].apply_mask(c_j) h_j = self.parameter_dropout_out[i].apply_mask(h_j) scores.append(self.linear(h_j)) scores = torch.stack(scores, 1) # For the input/output dropout we can use fast CUDA RNNs else: # To h: [batch_size, seq_len, h_dim] we apply the same mask: [batch_size, 1, h_dim] at every timestep shape = torch.Size(h.shape) if self.var_mask else torch.Size([x.shape[0], 1, self.h_dim]) if x_len is not None: h = pack_padded_sequence(h, x_len - 1, batch_first=True) h, _ = self.grnn(h) if x_len is not None: h = pad_packed_sequence(h, batch_first=True, total_length=x.shape[1])[0] # We also apply the same dropout mask to every timestep in the output hidden states h = self.parameter_dropout_out(h, self.parameter_p, shape=shape) scores = self.linear(h) return scores def sample_sequences(self, x_i, seq_len, eos_token, pad_token, sample_softmax=False): """'Sample' sequences from the (learned) decoder given a prefix of tokens. Args: x_i(torch.Tensor): initial tokens or sequence of tokens to start generating from. seq_len(int): length of the sampled sequences after the prefix. Defaults to preset seq_len. eos_token(int): the end of sentence indicator. pad_token(int): the token used for padding sentences shorter than seq_len. Returns: list: a list of sampled sequences of pre-defined length. """ if seq_len is not None: self.seq_len = seq_len else: warn("No sequence length provided, preset seq_len will be used.") with torch.no_grad(): if sample_softmax: h_i = None c_i = None else: h_i = self._sample_hidden(x_i.shape[0]) c_i = self._sample_hidden(x_i.shape[0]) samples = [] # Sampling pass through the sequential decoder # The prefix is automatically consumed by the first step through the RNN for i in range(x_i.shape[1]): samples.append(x_i[:, i].squeeze().tolist()) for i in range(self.seq_len): x_i = self.emb(x_i) if self.rnn_type == "GRU": h, h_i = self.grnn(x_i, h_i) else: h, h_i, c_i = self.grnn(x_i, (h_i, c_i)) # scores: [batch_size, h_dim] scores = self.linear(h[:, -1]) # x_i: [batch_size, 1] if sample_softmax: # Sample the output Bernoulli x_i = torch.multinomial(F.softmax(scores, 1), 1) else: # Argmax based on stochasticity from hidden x_i = torch.max(scores, dim=1, keepdim=True)[1] samples.append(x_i.squeeze().tolist()) # Pad samples after the first <eos> token samples = np.array(samples).T eos_spot = np.argwhere(samples == eos_token) prev_row = -1 for spot in eos_spot: if spot[0] != prev_row: try: samples[spot[0], spot[1]+1:] = pad_token except IndexError: pass else: pass prev_row = spot[0] return list(samples) def _sample_hidden(self, batch_size): """Sample the hidden state of a GRU RNN from a standard normal.""" return torch.normal(mean=torch.zeros((self.l_dim, batch_size, self.h_dim), device=self.device))
45.762646
122
0.596803
1,677
11,761
4.013119
0.206917
0.011887
0.026746
0.007132
0.218276
0.168053
0.150074
0.108172
0.100743
0.093314
0
0.009768
0.303631
11,761
256
123
45.941406
0.811966
0.310348
0
0.2
0
0
0.025168
0
0
0
0
0
0
1
0.045161
false
0.012903
0.070968
0.006452
0.16129
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6b96bddabe2ed1e571fa0fb67e8fe24ad3b42daf
2,316
py
Python
trove/tests/scenario/runners/instance_force_delete_runners.py
sapcc/trove
c03ec0827687fba202f72f4d264ab70158604857
[ "Apache-2.0" ]
244
2015-01-01T12:04:44.000Z
2022-03-25T23:38:39.000Z
trove/tests/scenario/runners/instance_force_delete_runners.py
sapcc/trove
c03ec0827687fba202f72f4d264ab70158604857
[ "Apache-2.0" ]
6
2015-08-18T08:19:10.000Z
2022-03-05T02:32:36.000Z
trove/tests/scenario/runners/instance_force_delete_runners.py
sapcc/trove
c03ec0827687fba202f72f4d264ab70158604857
[ "Apache-2.0" ]
178
2015-01-02T15:16:58.000Z
2022-03-23T03:30:20.000Z
# Copyright 2016 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import SkipTest from trove.tests.scenario import runners from trove.tests.scenario.runners.test_runners import SkipKnownBug from trove.tests.scenario.runners.test_runners import TestRunner class InstanceForceDeleteRunner(TestRunner): def __init__(self): super(InstanceForceDeleteRunner, self).__init__(sleep_time=1) self.build_inst_id = None def run_create_build_instance(self, expected_states=['NEW', 'BUILD'], expected_http_code=200): if self.is_using_existing_instance: raise SkipTest("Using an existing instance.") name = self.instance_info.name + '_build' flavor = self.get_instance_flavor() client = self.auth_client inst = client.instances.create( name, self.get_flavor_href(flavor), self.instance_info.volume, nics=self.instance_info.nics, datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version) self.assert_client_code(client, expected_http_code) self.assert_instance_action([inst.id], expected_states) self.build_inst_id = inst.id def run_delete_build_instance(self, expected_http_code=202): if self.build_inst_id: client = self.admin_client client.instances.force_delete(self.build_inst_id) self.assert_client_code(client, expected_http_code) def run_wait_for_force_delete(self): raise SkipKnownBug(runners.BUG_FORCE_DELETE_FAILS) # if self.build_inst_id: # self.assert_all_gone([self.build_inst_id], ['SHUTDOWN'])
38.6
78
0.702936
298
2,316
5.214765
0.419463
0.030888
0.050193
0.057915
0.189833
0.138996
0.113256
0.113256
0
0
0
0.008319
0.221503
2,316
59
79
39.254237
0.853577
0.294041
0
0.0625
0
0
0.02534
0
0
0
0
0
0.09375
1
0.125
false
0
0.125
0
0.28125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6b976f533f13bffac48ebcfedaef5b78d985ab9b
489
py
Python
tests/providers/test_twilio.py
yaakov-github/notifiers
ae204bc08fd9efa06597e5e2cf30ad0a305c94bb
[ "MIT" ]
2
2019-10-06T01:53:42.000Z
2019-11-19T07:52:17.000Z
tests/providers/test_twilio.py
Delgan/notifiers
8dd2a8aaa81a9433034a8f347d984c8aa80be9af
[ "MIT" ]
null
null
null
tests/providers/test_twilio.py
Delgan/notifiers
8dd2a8aaa81a9433034a8f347d984c8aa80be9af
[ "MIT" ]
null
null
null
import pytest provider = 'twilio' class TestTwilio: def test_twilio_metadata(self, provider): assert provider.metadata == { 'base_url': 'https://api.twilio.com/2010-04-01/Accounts/{}/Messages.json', 'name': 'twilio', 'site_url': 'https://www.twilio.com/' } @pytest.mark.online def test_sanity(self, provider): data = { 'message': 'foo' } provider.notify(**data, raise_on_errors=True)
23.285714
86
0.570552
53
489
5.132075
0.679245
0.051471
0
0
0
0
0
0
0
0
0
0.022792
0.282209
489
20
87
24.45
0.752137
0
0
0
0
0
0.253579
0
0
0
0
0
0.066667
1
0.133333
false
0
0.066667
0
0.266667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6b97a216ca7bed17169dea75598a8db7b38aa938
3,225
py
Python
tests/conftest.py
jeromedockes/neuroquery_image_search
2222caf464de84694273a494ec2d00071b3d14a2
[ "BSD-3-Clause" ]
3
2021-01-26T20:27:24.000Z
2021-09-28T19:51:36.000Z
tests/conftest.py
jeromedockes/neuroquery_image_search
2222caf464de84694273a494ec2d00071b3d14a2
[ "BSD-3-Clause" ]
null
null
null
tests/conftest.py
jeromedockes/neuroquery_image_search
2222caf464de84694273a494ec2d00071b3d14a2
[ "BSD-3-Clause" ]
1
2021-01-21T22:27:16.000Z
2021-01-21T22:27:16.000Z
from pathlib import Path import tempfile from unittest.mock import MagicMock import pytest import numpy as np import pandas as pd from scipy import sparse import nibabel import nilearn from nilearn.datasets import _testing from nilearn.datasets._testing import request_mocker # noqa: F401 def make_fake_img(): rng = np.random.default_rng(0) img = rng.random(size=(4, 3, 5)) return nibabel.Nifti1Image(img, np.eye(4)) @pytest.fixture() def fake_img(): return make_fake_img() def make_fake_data(): n_voxels, n_components, n_studies, n_terms = 23, 8, 12, 9 rng = np.random.default_rng(0) difumo_maps = rng.random((n_components, n_voxels)) difumo_maps[rng.binomial(1, 0.3, size=difumo_maps.shape).astype(int)] = 0 difumo_inverse_covariance = np.linalg.pinv(difumo_maps.dot(difumo_maps.T)) difumo_maps = sparse.csr_matrix(difumo_maps) projections = rng.random((n_studies, n_components)) term_projections = rng.random((n_terms, n_components)) articles_info = pd.DataFrame({"pmid": np.arange(n_studies) + 100}) articles_info["title"] = [ f"title {pmid}" for pmid in articles_info["pmid"] ] articles_info["pubmed_url"] = [ f"url {pmid}" for pmid in articles_info["pmid"] ] mask = np.zeros(4 * 3 * 5, dtype=int) mask[:n_voxels] = 1 mask = mask.reshape((4, 3, 5)) mask_img = nibabel.Nifti1Image(mask, np.eye(4)) doc_freq = pd.DataFrame( { "term": ["term_{i}" for i in range(n_terms)], "document_frequency": np.arange(n_terms), } ) with tempfile.TemporaryDirectory() as temp_dir: temp_dir = Path(temp_dir) sparse.save_npz(temp_dir / "difumo_maps.npz", difumo_maps) np.save( temp_dir / "difumo_inverse_covariance.npy", difumo_inverse_covariance, ) np.save(temp_dir / "projections.npy", projections) np.save(temp_dir / "term_projections.npy", term_projections) articles_info.to_csv(temp_dir / "articles-info.csv", index=False) mask_img.to_filename(str(temp_dir / "mask.nii.gz")) doc_freq.to_csv( str(temp_dir / "document_frequencies.csv"), index=False ) archive = _testing.dict_to_archive( {"neuroquery_image_search_data": temp_dir} ) return archive @pytest.fixture(autouse=True) def temp_data_dir(tmp_path_factory, monkeypatch): home_dir = tmp_path_factory.mktemp("temp_home") monkeypatch.setenv("HOME", str(home_dir)) monkeypatch.setenv("USERPROFILE", str(home_dir)) data_dir = home_dir / "neuroquery_data" data_dir.mkdir() monkeypatch.setenv("NEUROQUERY_DATA", str(data_dir)) @pytest.fixture(autouse=True, scope="function") def map_mock_requests(request_mocker): request_mocker.url_mapping[ "https://osf.io/mx3t4/download" ] = make_fake_data() return request_mocker @pytest.fixture(autouse=True) def patch_nilearn(monkeypatch): def fake_motor_task(*args, **kwargs): return {"images": [make_fake_img()]} monkeypatch.setattr( nilearn.datasets, "fetch_neurovault_motor_task", fake_motor_task ) monkeypatch.setattr("webbrowser.open", MagicMock())
32.25
78
0.68093
438
3,225
4.753425
0.321918
0.036984
0.01585
0.018732
0.074928
0.048991
0.027858
0
0
0
0
0.013143
0.197829
3,225
99
79
32.575758
0.791651
0.003101
0
0.047619
0
0
0.117336
0.033613
0
0
0
0
0
1
0.083333
false
0
0.130952
0.02381
0.27381
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6b9b875fc942d57331cb053f791fdadd062c380c
515
py
Python
src/handlers/os.py
rodones/daemon
5bcc680962274390ab1a7ca36eb3a621d4a2144d
[ "BSD-3-Clause" ]
null
null
null
src/handlers/os.py
rodones/daemon
5bcc680962274390ab1a7ca36eb3a621d4a2144d
[ "BSD-3-Clause" ]
null
null
null
src/handlers/os.py
rodones/daemon
5bcc680962274390ab1a7ca36eb3a621d4a2144d
[ "BSD-3-Clause" ]
null
null
null
from core.handler import handle import os from datetime import datetime import psutil @handle("os/name") def get_name(): return os.uname().nodename @handle("os/kernel") def get_kernel(): return os.uname().release @handle("os/user") def get_user(): return psutil.Process().username() @handle("os/boottime") def get_boottime(): return datetime.fromtimestamp(psutil.boot_time()).strftime("%Y-%m-%d %H:%M:%S") @handle("os/processes") def get_processes(): return psutil.Process().as_dict()
17.166667
83
0.699029
73
515
4.835616
0.438356
0.113314
0.073654
0
0
0
0
0
0
0
0
0
0.132039
515
29
84
17.758621
0.789709
0
0
0
0
0
0.12233
0
0
0
0
0
0
1
0.263158
true
0
0.210526
0.263158
0.736842
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
1
1
0
0
4
6b9bad3a40c3c22732dfc5fe66b2f8ecdcf3a1fd
1,162
py
Python
mall_spider/spiders/actions/action.py
524243642/taobao_spider
9cdaed1c7a67fc1f35ee2af2e18313cedf3b1e5e
[ "Unlicense" ]
12
2019-06-06T12:23:08.000Z
2021-06-15T17:50:07.000Z
mall_spider/spiders/actions/action.py
524243642/mall_spider
9cdaed1c7a67fc1f35ee2af2e18313cedf3b1e5e
[ "Unlicense" ]
3
2021-03-31T19:02:47.000Z
2022-02-11T03:43:15.000Z
mall_spider/spiders/actions/action.py
524243642/taobao_spider
9cdaed1c7a67fc1f35ee2af2e18313cedf3b1e5e
[ "Unlicense" ]
5
2019-09-17T03:55:56.000Z
2020-12-18T03:34:03.000Z
# coding: utf-8 import traceback from abc import ABCMeta, abstractmethod from mall_spider.spiders.actions.executable import Executable class Action(Executable): __metaclass__ = ABCMeta def execute(self, context): self.on_create(context=context) self.on_start(context=context) try: result = self.do_execute(context=context) self.on_complete(context=context) return result except Exception as e: import sys exc_info = sys.exc_info() self.on_error(context=context, exp=traceback.format_exc()) # raise exc_info[0], exc_info[1], exc_info[2] raise e finally: self.on_destroy(context=context) @abstractmethod def do_execute(self, context): pass @abstractmethod def on_create(self, context): pass @abstractmethod def on_start(self, context): pass @abstractmethod def on_error(self, context, exp): pass @abstractmethod def on_complete(self, context): pass @abstractmethod def on_destroy(self, context): pass
22.784314
70
0.623924
132
1,162
5.318182
0.348485
0.109687
0.106838
0.163818
0.193732
0.193732
0
0
0
0
0
0.004896
0.296902
1,162
50
71
23.24
0.854345
0.049053
0
0.324324
0
0
0
0
0
0
0
0
0
1
0.189189
false
0.162162
0.108108
0
0.378378
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
2
6b9e0d5209b7b62f3db7419587e03282debb98ce
524
py
Python
Praticas/pratica03/latitude_longitude.py
andrepinto42/Processamento-de-Linguagens
98facba0d1c9ca751743b1c83dca7f441aa182e9
[ "MIT" ]
1
2022-03-18T21:39:47.000Z
2022-03-18T21:39:47.000Z
Praticas/pratica03/latitude_longitude.py
andrepinto42/Processamento-de-Linguagens
98facba0d1c9ca751743b1c83dca7f441aa182e9
[ "MIT" ]
null
null
null
Praticas/pratica03/latitude_longitude.py
andrepinto42/Processamento-de-Linguagens
98facba0d1c9ca751743b1c83dca7f441aa182e9
[ "MIT" ]
null
null
null
import re import sys real_num = r'[+-]?\d+(?:\.\d+)?' # Falta colocar as os paratenses para identificar o grupo correto coord = rf'\(({real_num}),\s*({real_num})\)' for line in sys.stdin: line = re.sub(coord,r"<point lat='\1', lon='\2' />",line) if (line): print(line) quit() # Tambem dá para executar assim coord = rf'\((?P<lat>{real_num}),\s*(?P<lon>{real_num})\)' for line in sys.stdin: line = re.sub(coord,r"<point lat='\g<lat>', lon='\g<lon>' />",line) if (line): print(line)
22.782609
71
0.574427
85
524
3.482353
0.458824
0.118243
0.054054
0.094595
0.445946
0.317568
0.317568
0.317568
0.317568
0.317568
0
0.004717
0.19084
524
23
72
22.782609
0.693396
0.179389
0
0.428571
0
0
0.378505
0.182243
0
0
0
0
0
1
0
false
0
0.142857
0
0.142857
0.142857
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6b9f0ffcb6c75b079a5eb98125aa38eb4f61fd76
375
py
Python
includes/prav_modules/test2.py
praveen868686/DAGAirflow-with-Python
483fffc2e7f987e523ae3653a90869a67cdad886
[ "MIT" ]
null
null
null
includes/prav_modules/test2.py
praveen868686/DAGAirflow-with-Python
483fffc2e7f987e523ae3653a90869a67cdad886
[ "MIT" ]
null
null
null
includes/prav_modules/test2.py
praveen868686/DAGAirflow-with-Python
483fffc2e7f987e523ae3653a90869a67cdad886
[ "MIT" ]
null
null
null
import pandas as pd import numpy as np def see(): m = pd.read_csv('C:/dag/Expenditure.csv') #m = pd.read_csv('C:\dag\Expendture.csv') # print(m.head()) countt= m ['Category'].value_counts(sort=True, ascending=True).to_frame() print(countt) pivottable= m.pivot_table(index=['Category'], values=['Myself'], aggfunc='sum') print(pivottable) see()
25
83
0.656
55
375
4.381818
0.618182
0.024896
0.058091
0.082988
0.116183
0.116183
0
0
0
0
0
0
0.16
375
14
84
26.785714
0.765079
0.149333
0
0
0
0
0.148265
0.069401
0
0
0
0
0
1
0.111111
false
0
0.222222
0
0.333333
0.222222
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6ba002cacb0aea0efbea0d09c9f0563aeccf4db3
4,134
py
Python
MLP/generate_args.py
AMNoureldin/COMP551-HW4
0c855372862300cc0454f144bb40b2e72ba93861
[ "Apache-2.0" ]
15
2021-03-18T03:00:15.000Z
2022-02-28T04:42:54.000Z
MLP/generate_args.py
AMNoureldin/COMP551-HW4
0c855372862300cc0454f144bb40b2e72ba93861
[ "Apache-2.0" ]
null
null
null
MLP/generate_args.py
AMNoureldin/COMP551-HW4
0c855372862300cc0454f144bb40b2e72ba93861
[ "Apache-2.0" ]
2
2021-11-05T15:50:20.000Z
2022-01-16T11:48:27.000Z
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from datetime import datetime from utils import * #===== time stamp for experiment file names timestamp = datetime.now() timestamp = timestamp.strftime("%d-%m-%Y_%H%M") script_name = 'main' # main script to be executed #================================ # args for main script # #================================ seed= 1 # setting random seed for reproducibility #===== MODEL ===== # model_type= 'MLP1' no_bias= True # don't use biases in layers make_linear= False # linear activation function (if False, then ReLU) no_BN= True # disable BatchNorm NTK_style= True # NTK-style parametrization of the network base_width= 8 all_widths= [8, 32, 128, 216, 328, 512, 635] fract_freeze_cl= 0 # allowed fraction of all cl-layer weights that may be frozen dense_only= False # consider dense models only, no weight freezing #===== TRAINING ===== # no_ES= True # disable Early Stopping train_subset_size= 2048 # train on a subset of the train set mbs= 256 # mini-batch size max_epochs= 300 # max number of training epochs #===== DATASET ===== # dataset= 'MNIST' normalize_pixelwise= True #=== for NTK-style nets, the LR value is width-dependent # loading optimized LR values for each width from file if NTK_style: bta_avg_and_lr= torch.load('optimized_LR_for_NTK_style_MLP1.pt') # NWTF (for "Num. Weights To Freeze") is a dictionary with # key = width # val = [(nwtf_cl, nwtf_fc)_1, (nwtf_cl, nwtf_fc)_2, ...] # i.e., a list of valid combinations of weights to freeze for the respective layer (cl and fc) if dense_only: NWTF = {base_width: [(0,0)]} else: NWTF = get_NWTF(base_width, all_widths, fract_freeze_cl) #=== tags for file names bias_tag='_no_bias' if no_bias else '' NTK_tag='_NTK_style' if NTK_style else '' act_fctn='Linear' if make_linear else 'ReLU' job_configs=[] for width, val in NWTF.items(): for nwtf_cl,nwtf_fc in val: cur_base_width=width if nwtf_cl==nwtf_fc else base_width # compose name for output dir output_dir = f'{dataset}_{model_type}_{NTK_tag}' output_dir+= f'_base_{cur_base_width}_width_{width}_{act_fctn}{bias_tag}' if train_subset_size>0: output_dir+=f'_train_on_{train_subset_size}_samples' if normalize_pixelwise: output_dir+=f'_pixelwise_normalization' if NTK_style: # get LR from file lrkey=f'{cur_base_width}_{width}' lr=bta_avg_and_lr[lrkey] else: lr= 0.1 config ={ 'base_width': int(cur_base_width), 'width': int(width), 'lr': lr, 'seed': seed, 'nwtf_cl': int(nwtf_cl), 'nwtf_fc': int(nwtf_fc), 'dataset': dataset, 'normalize_pixelwise': normalize_pixelwise, 'train_subset_size': train_subset_size, 'no_ES': no_ES, 'max_epochs': max_epochs, 'mbs': mbs, 'no_bias': no_bias, 'NTK_style': NTK_style, 'make_linear': make_linear, 'no_BN': no_BN, 'output_dir': output_dir } job_configs.append(config) for config in job_configs: my_str=f'\npython -m {script_name} ' for k, v in config.items(): if isinstance(v, bool): if v: my_str+=f'--{k} ' else: my_str+=f'--{k} {v} ' print(my_str)
32.046512
94
0.618287
577
4,134
4.211438
0.37435
0.032922
0.030864
0.024691
0
0
0
0
0
0
0
0.015815
0.265844
4,134
128
95
32.296875
0.784843
0.376149
0
0.042254
0
0
0.174566
0.082149
0
0
0
0
0
1
0
false
0
0.042254
0
0.042254
0.014085
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6ba2a99855aee76d9f42b28ebf924f529a089bcd
854
py
Python
data/datatable.py
sisiwei/impossible-food
567e0811d5d88a83eae6dadf526bc048e7057121
[ "MIT" ]
1
2015-04-11T10:27:29.000Z
2015-04-11T10:27:29.000Z
data/datatable.py
sisiwei/impossible-food
567e0811d5d88a83eae6dadf526bc048e7057121
[ "MIT" ]
null
null
null
data/datatable.py
sisiwei/impossible-food
567e0811d5d88a83eae6dadf526bc048e7057121
[ "MIT" ]
null
null
null
import json import sys from multiprocessing import Pool DOMAIN = "TM" def read_file(year): data = [] with open("%s-%s.json" % (DOMAIN, year), 'rb') as f: j = json.loads(f.read()) for row in j: data.append({ 'domain': row[1], 'country': row[2], 'country_code': int(row[3]), 'item': row[4], 'item_code': int(row[5]), 'element': row[6], 'element_code': int(row[7]), 'year': int(row[8]), 'units': row[9], 'value': row[10], 'flag': row[11] }) print j[0] #print year, len(data) #print data[100] return data if __name__ == "__main__": pool = Pool(processes=3) #datas = pool.map(read_file, (2011, 2009, 2008)) datas = [] for year in (2011, 2009, 2008): datas.append(read_file(year)) data = reduce(lambda a, b: a+b, datas)
22.473684
54
0.539813
123
854
3.634146
0.504065
0.053691
0.067114
0.071588
0
0
0
0
0
0
0
0.067742
0.274005
854
37
55
23.081081
0.653226
0.09719
0
0
0
0
0.126302
0
0
0
0
0
0
0
null
null
0
0.1
null
null
0.033333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
6ba35922a8da9226341017db55248451774263a5
38,712
py
Python
EasyRegression.py
pankajchejara23/EasyRegression
7f76d92c4a9d056a83bde6abc2fd6eb980602e44
[ "MIT" ]
1
2021-04-19T16:47:27.000Z
2021-04-19T16:47:27.000Z
EasyRegression.py
pankajchejara23/EasyRegression
7f76d92c4a9d056a83bde6abc2fd6eb980602e44
[ "MIT" ]
null
null
null
EasyRegression.py
pankajchejara23/EasyRegression
7f76d92c4a9d056a83bde6abc2fd6eb980602e44
[ "MIT" ]
null
null
null
import pandas as pd import matplotlib.pyplot as plt import librosa import seaborn as sns from sklearn.model_selection import train_test_split import math from sklearn.model_selection import LeaveOneGroupOut from sklearn.metrics import mean_squared_error, mean_absolute_error import traceback import statistics # Regression Model from sklearn.linear_model import LinearRegression from sklearn.linear_model import Ridge from sklearn.linear_model import Lasso from sklearn.linear_model import ElasticNet from sklearn.linear_model import Lars from sklearn.linear_model import BayesianRidge from sklearn.linear_model import SGDRegressor from sklearn.linear_model import RANSACRegressor from pyfiglet import Figlet from sklearn.model_selection import cross_val_score from joblib import dump, load from sklearn.kernel_ridge import KernelRidge from sklearn.tree import DecisionTreeRegressor from xgboost import XGBRegressor from sklearn.svm import SVR from sklearn.neighbors import KNeighborsRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import AdaBoostRegressor from sklearn.ensemble import ExtraTreesRegressor from sklearn.ensemble import GradientBoostingRegressor from sklearn.ensemble import VotingRegressor from sklearn.ensemble import StackingRegressor from sklearn.neural_network import MLPRegressor from sklearn.model_selection import cross_val_score import statistics from sklearn.model_selection import cross_validate # Dimensionality reduction from sklearn.decomposition import PCA from sklearn import manifold import numpy as np from sklearn.model_selection import GridSearchCV from scipy.special import entr import random from sklearn import metrics from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.neural_network import MLPClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.gaussian_process.kernels import RBF from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier from sklearn.naive_bayes import GaussianNB from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis from sklearn import metrics from art import * class EasyRegression: def __init__(self): print(text2art('Easy')) print(text2art('Regression')) self.seed = 40 self.strategy = None self.parameterFound = dict() self.configured = False self.models = None # Scalers self.std = StandardScaler() self.mmax = MinMaxScaler() self.random_state = 42 self.feature_set = dict() self.label_set = dict() self.groups = None self.datasets = None self.label = None self.flagDataset = False self.flagGroup = False self.flagParameterFind = False self.train_test = None self.cross_val = None self.leave_group = None self.leave_dataset = None self.stratified = None def loadFeature(self,feature_file,feature_type,feature_name): if len(self.feature_set) == 0: print('-------------------------------') print(' STEP : Features loading') print('-------------------------------') if feature_type not in ['ind','grp']: print('===> Error: Undefined feature type') return else: try: if feature_name in self.feature_set.keys(): print('===> Feature with name ',feature_name,' already exist. Choose a different name') return else: tmp = pd.read_csv(feature_file) if len(self.feature_set) > 0: first_feat = self.feature_set[list(self.feature_set.keys())[0]] if tmp.shape[0] != first_feat[2].shape[0]: print('===> Error: Mismatch in feature size with previously added features ',first_feat[1] ) return self.feature_set[feature_name] = [feature_type,feature_name,tmp] print('===> Feature file:',feature_file,' is loaded successfully !') print('===> Summary:') print(' #instances:',tmp.shape[0]) print(' #attributes:',tmp.shape[1]) num_cols = tmp.select_dtypes(['int64','float64']) print(' #numeric-attributes:',num_cols.shape[1]) print('') return num_cols except: print('===> Error occurred while loading the file') traceback.print_exc() def loadLabels(self,label_file): try: print('-------------------------------') print(' STEP : Labels loading') print('-------------------------------') tmp = pd.read_csv(label_file) if len(self.feature_set) > 0: first_feat = self.feature_set[list(self.feature_set.keys())[0]] if tmp.shape[0] != first_feat[2].shape[0]: print(' Error: Mismatch in feature size with loaded feature ',first_feat[1] ) return None for label in tmp.columns: self.label_set[label] = tmp[label] print('===> Label file:',label_file,' is loaded successfully !') print('===> Summary:') print(' #labels:',len(tmp.columns.tolist())) print(' labels:', tmp.columns.tolist()) print('') return tmp except: print('===> Error occurred while loading the file:',label_file) traceback.print_exc() return None def feature_name_check(self,feature_name): if feature_name not in self.feature_set.keys(): print(' Feature name:', feature_name,' is not available.') return None def label_name_check(self,label_name): if label_name not in self.label_set.keys(): print(' Label name:',label_name,' is not available.') return None def extractFeatures(self,data,cor=.80): print('-------------------------------') print(' STEP : Feature Extraction ') print('-------------------------------') correlated_features = set() features = data correlation_matrix = features.corr() for i in range(len(correlation_matrix .columns)): for j in range(i): if abs(correlation_matrix.iloc[i, j]) > cor: colname = correlation_matrix.columns[i] correlated_features.add(colname) #print('Correlated Features:') #print(correlated_features) features.drop(labels=correlated_features,axis=1,inplace=True) print('===> ',len(correlated_features),' correlated features are removed.') print('===> Final features shape:',features.shape) return features def findCorrelation(self,label_name=None,sort=True): if self.dataReady == False: print('Data is not ready yet for analysis.') return if label_name is not None: if label_name in self.labels.columns: tmp_features = self.features.copy() tmp_features[label_name] = self.labels[label_name] cor_table = tmp_features.corr() print(' Correlation ') print(' -------------------------------') print(cor_table[label_name]) print(' -------------------------------') else: if self.labels.shape[1] > 1: print(' There are more than one label available.') print(self.labels.columns) print('Deafult: first column is used to computer correlation') label_name = self.labels.columns[0] tmp_features = self.features.copy() tmp_features[label_name] = self.labels[label_name] cor_table = tmp_features.corr() print(' Correlation ') print(' -------------------------------') print(cor_table[label_name]) print(' -------------------------------') def setGroupFeatureLabels(self,feat_labels): self.group_feature_labels = feat_labels """ This function performs group-level feature computation supported fusions: Dimensionality reduction, Entropy, Gini, Average """ def getGroupFeatures(self,data): group_feature_labels = ['add','del','speak','turns'] features_group = dict() # iterate for each group-level feature for grp_feature in group_feature_labels: tmp = list() # get all column names similar to grp_feature for indiv_feature in data.columns: if grp_feature in indiv_feature: tmp.append(indiv_feature) features_group[grp_feature] = tmp.copy() return features_group # preparing gini coefficient def getGINI(self,data): """Calculate the Gini coefficient of a numpy array.""" print('-------------------------------') print(' STEP : Feature Fusion using Gini') print('-------------------------------') group_features = self.getGroupFeatures(data) gini = dict() for key in group_features.keys(): tmp = data[group_features[key]].values tmp = tmp + 0.0000001 tmp = np.sort(tmp) index = np.arange(1,tmp.shape[1]+1) n = tmp.shape[1] key = 'grp_gini_'+key gini[key] = ((np.sum((2 * index - n - 1) * tmp,axis=1)) / (n * np.sum(tmp,axis=1))) #Gini coefficient gini_features = pd.DataFrame(gini) return gini_features # Compute entropy features for individual features def getEntropy(self,data): print('-------------------------------') print(' STEP : Feature Fusion using Entropy') print('-------------------------------') group_features = self.getGroupFeatures(data) entropy = dict() for key in group_features.keys(): tmp = data[group_features[key]].values tmp = tmp tmp_sum = tmp.sum(axis=1,keepdims=True) + .0000000000001 p = tmp/tmp_sum key = 'grp_entropy_'+key entropy[key] = entr(p).sum(axis=1)/np.log(2) entropy_features = pd.DataFrame(entropy) return entropy_features """ Apply dimentionality reduction on features PCA """ def Scaling(self,data,algo): print('-------------------------------') print(' STEP : Feature Scaling') print('-------------------------------') if algo in ['std','mmax']: if algo == 'std': res = pd.DataFrame(self.std.fit_transform(data), columns=data.columns) print('===> Successfully applied Standard Scaling') return res elif algo == 'mmax': res = pd.DataFrame(self.mmax.fit_transform(data), columns=data.columns) print('===> Successfully applied MinMax Scaling') return res else: print('===> Error: Unsupported scaling method') return None def DimRed(self,algo,data,params=None): print('-------------------------------') print(' STEP : Feature fusion using DimRed') print('-------------------------------') if algo not in ['pca','mds','isomap','tsne']: print('===> Erro: Unsupported dimension reduction algorithm specified') return None else: if algo!='pca' and len(params) ==0: print('===> Error: Specify n_components/n_neighbors parameters') return None else: # Dimensionality reduction X_train, X_test, y_train, y_test = train_test_split(data,self.label_set[self.label],train_size=.7,random_state=self.seed) self.pca = PCA(random_state = self.seed) self.mds = manifold.MDS(n_components=params['n_components'],max_iter=100,n_init=1,random_state = self.seed) self.isomap = manifold.Isomap(n_neighbors=params['n_neighbors'],n_components=params['n_components']) self.tsne = manifold.TSNE(n_components=params['n_components'],init='pca',random_state = self.seed) if algo == 'pca': self.pca.fit(X_train) pca_features = self.pca.transform(data) print('===> Successfully applied PCA') pca_columns = [None] * pca_features.shape[1] for k in range(pca_features.shape[1]): pca_columns[k] = 'pca_' + str(k) return pd.DataFrame(pca_features,columns=pca_columns) if algo == 'mds': self.mds.fit(X_train) mds_features = self.mds.transform(data) mds_columns = [None] * mds_features.shape[1] for k in range(mds_features.shape[1]): mds_columns[k] = 'mds_' + str(k) print('===> Successfully applied MDS') return pd.DataFrame(mds_features,columns=mds_columns) if algo== 'isomap': self.isomap.fit(X_train) isomap_features = self.isomap.transform(data) print('===> Successfully applied ISOMAP') isomap_columns = [None] * isomap_features.shape[1] for k in range(isomap_features.shape[1]): isomap_columns[k] = 'iso_' + str(k) return pd.DataFrame(isomap_features,columns=isomap_columns) if algo=='tsne': tsne_features = self.tsne.fit_transform(data) print('===> Successfully applied t-SNE') tsne_columns = [None] * tsne_features.shape[1] for k in range(tsne_features.shape[1]): tsne_columns[k] = 'tsne_' + str(k) return pd.DataFrame(tsne_features,columns=tsne_columns) ; def loadConfiguredModules(self,modules): print('-------------------------------') print(' STEP : Configured Regression Moduel Loaded') print('-------------------------------') self.models = modules self.configured = True def regressionModelInitialize(self): print('-------------------------------') print(' STEP : Regression Moduel Initialised') print('-------------------------------') self.models = dict() self.params=dict() self.models['knn'] = KNeighborsRegressor() self.models['rf'] = RandomForestRegressor(random_state = self.seed) self.models['ada'] = AdaBoostRegressor(random_state = self.seed) self.models['gb'] = GradientBoostingRegressor(random_state = self.seed) self.models['xg'] = XGBRegressor(random_state = self.seed) self.models['mlp'] = MLPRegressor() self.models['svm'] = SVR() self.models['vot'] = VotingRegressor([('knn',self.models['knn']),('ada',self.models['ada']),('rand',self.models['rf']),('svm',self.models['svm'])]) # Preparing parameter for finding optimal parameters self.params['knn'] ={'n_neighbors':[2,3,4,5],'algorithm':['auto', 'ball_tree', 'kd_tree', 'brute']} self.params['rf'] = {'max_depth':[2,3,4,5,6],'n_estimators':[50,100,150,200],'min_samples_split':[3,4,5]} self.params['ada'] = {'learning_rate':[.01,.001,.0001],'n_estimators':[50,100,150,200],'loss':['linear', 'square', 'exponential']} self.params['gb'] = {'learning_rate':[.01,.001,.0001],'n_estimators':[50,100,150,200],'loss':['ls', 'lad', 'huber', 'quantile'],'min_samples_split':[3,4,5]} self.params['xg']={'booster':['gbtree', 'gblinear','dart']} self.params['mlp']={'solver':['lbfgs','sgd','adam'],'activation':['identity', 'logistic', 'tanh', 'relu'],'hidden_layer_sizes':[(5,5,5),(5,4,3),(10,10,5)]} k=['rbf', 'linear','poly','sigmoid'] c= [1,10,100,.1] g=[.0001,.001,.001,.01,.1] self.params['svm']=dict(kernel=k, C=c, gamma=g) print('-------------------------------------------') print('===> K-Nearest Neighbors initialized') print('===> Random Forest initialized') print('===> AdaBoost initialized') print('===> Gradient Boost initialized') print('===> XGBoost initialized') print('===> Neural Network initialized') print('===> SVM initialized') print('===> Voting classifier with KNN, AdaBoost, SVM and Random Forest') def findParametersAndEvaluate(self,data,strategy,label_name,group=None,dataset=None,cv=5): self.strategy = strategy self.results = {} print('-------------------------------') print(' STEP : Finding Parameters & Evaluate Models') print('-------------------------------') self.label_name_check(label_name) #print(self.labelset.columns) # store performance data for each strategy if (strategy == 'train_test_split' or strategy == 'all'): self.train_test = dict() for model in self.models.keys(): self.train_test[model] = None print('===> Evaluation strategy: Train and Test Split ') X_train, X_test, y_train, y_test = train_test_split(data,self.label_set[label_name],train_size=.7,random_state=self.seed) print('===> Parameters find-> Start') for model in self.models.keys(): if model == 'vot': continue if not self.configured: gd = GridSearchCV(self.models[model],self.params[model],cv=cv,scoring='neg_root_mean_squared_error') gd.fit(X_train,y_train) print(' Parameters for ',model,': ',gd.best_params_) self.models[model] = gd.best_estimator_ print('===> Parameters find-> End') test_performances = dict() print('===> Test data performance[RMSE] ') for model in self.models.keys(): self.models[model].fit(X_train,y_train) test_performances[model] = mean_squared_error(y_test,self.models[model].predict(X_test),squared=False) #print(' Model[',model,']:',test_performances[model]) self.train_test[model] = test_performances[model] print(self.train_test) self.results['train_test'] = self.train_test if (strategy == 'cross_val' or strategy == 'all'): self.cross_val = dict() cross_val = dict() for model in self.models.keys(): self.cross_val[model] = None print('==============================================') print('Evaluation strategy: Cross Validation') print('==============================================') for model in self.models.keys(): if model != 'vot' and not self.configured: print(' ==> Finding params for ',model) gd = GridSearchCV(self.models[model],self.params[model],cv=10,scoring='neg_root_mean_squared_error') gd.fit(data,self.label_set[label_name]) print(' Parameters: ',gd.best_params_) self.models[model] = gd.best_estimator_ cross_val[model] = cross_val_score(self.models[model],data,self.label_set[label_name],scoring='neg_root_mean_squared_error',cv=cv) #print(' Score[',model,']:',cross_val_scores[model]) cross_val_mean = -1 * statistics.mean(cross_val[model]) cross_val_var = statistics.variance(cross_val[model]) self.cross_val[model] = [cross_val_mean,cross_val_var] self.results['cross_val'] = self.cross_val if (strategy == 'leave_one_group_out' or strategy == 'all'): self.leave_group = dict() for model in self.models.keys(): self.leave_group[model] = None print('==============================================') print('Evaluation strategy: Leave one group out') print('==============================================') logo = LeaveOneGroupOut() n_splits = logo.get_n_splits(groups=group) error= dict() for model in self.models.keys(): error[model] = [None]*n_splits k =0 for train_index, test_index in logo.split(data,self.label_set[label_name],group): #print(test_index) X_train, y_train = data.iloc[train_index],self.label_set[label_name][train_index] X_test, y_test = data.iloc[test_index],self.label_set[label_name][test_index] for model in self.models.keys(): if model != 'vot' and not self.configured: print(' ==> Finding params for ',model) gd = GridSearchCV(self.models[model],self.params[model],cv=10,scoring='neg_root_mean_squared_error') gd.fit(X_train,y_train) print(' Parameters: ',gd.best_params_) estimator = gd.best_estimator_ self.models[model] = estimator self.models[model].fit(X_train,y_train) error[model][k] = mean_squared_error(y_test,self.models[model].predict(X_test),squared=False) #print(' Model[',model,']:',error[model]) k = k+1 for model in self.models.keys(): err_mean = statistics.mean(error[model]) err_var = statistics.variance(error[model]) self.leave_group[model] = [err_mean,err_var] self.results['leave_group'] = self.leave_group if (strategy == 'leave_one_dataset_out' or strategy == 'all'): self.leave_dataset = dict() for model in self.models.keys(): self.leave_dataset[model] = None print('==============================================') print('Evaluation strategy: Leave one dataset out') print('==============================================') logo = LeaveOneGroupOut() n_splits = logo.get_n_splits(groups=dataset) error= dict() for model in self.models.keys(): error[model] = [None]*n_splits k =0 for train_index, test_index in logo.split(data,self.label_set[label_name],dataset): X_train, y_train = data.iloc[train_index],self.label_set[label_name][train_index] X_test, y_test = data.iloc[test_index],self.label_set[label_name][test_index] for model in self.models.keys(): if model != 'vot' and not self.configured: print(' ==> Finding params for ',model) gd = GridSearchCV(self.models[model],self.params[model],cv=10,scoring='neg_root_mean_squared_error') gd.fit(X_train,y_train) #print(' Parameters: ',gd.best_params_) estimator = gd.best_estimator_ self.models[model] = estimator self.models[model].fit(X_train,y_train) error[model][k] = mean_squared_error(y_test,self.models[model].predict(X_test),squared=False) #print(' Model[',model,']:',error[model]) k = k+1 for model in self.models.keys(): err_mean = statistics.mean(error[model]) err_var = statistics.variance(error[model]) self.leave_dataset[model] = [err_mean,err_var] self.results['leave_dataset'] = self.leave_dataset if (strategy=='sorted_stratified' or strategy == 'all') : self.stratified = dict() for model in self.models.keys(): self.stratified[model] = None # idea from https://scottclowe.com/2016-03-19-stratified-regression-partitions/ print('==============================================') print('Evaluation strategy: Sorted Stratification') print('==============================================') label_df = pd.DataFrame(self.label_set) indices = label_df.sort_values(by=[label_name]).index.tolist() splits = dict() error = dict() for model in self.models.keys(): error[model] = [None]*cv for i in range(cv): splits[i] = list() for i in range(len(indices)): if i%cv == 0: pick = random.sample(range(cv),cv) cur_pick = pick.pop() splits[cur_pick].append(indices[i]) for i in range(cv): test_index = splits[i] train_index = [] for j in range(cv): if j != i: train_index = train_index + splits[j] ########################################## # Code to training model on sorted stratified set X_train, y_train = data.iloc[train_index],self.label_set[label_name][train_index] X_test, y_test = data.iloc[test_index],self.label_set[label_name][test_index] for model in self.models.keys(): if model != 'vot' and not self.configured: print(' ==> Finding params for ',model) gd = GridSearchCV(self.models[model],self.params[model],cv=10,scoring='neg_root_mean_squared_error') gd.fit(X_train,y_train) print(' Parameters: ',gd.best_params_) estimator = gd.best_estimator_ self.models[model] = estimator self.models[model].fit(X_train,y_train) error[model][i] = mean_squared_error(y_test,self.models[model].predict(X_test),squared=False) #print(' Model[',model,']:',error[model]) for model in self.models.keys(): err_mean = statistics.mean(error[model]) err_var = statistics.variance(error[model]) self.stratified[model] = [err_mean,err_var] ########################################## self.results['stratified'] = self.stratified else: print('Unsupported evaluation strategy') return None return self.results # Preparing dataframe with results for report generation """ if strategy == 'train_test_split': df = pd.DataFrame(columns = ['model','train_test]) for model in self.models.keys(): df = df.append({'model':model,'train_test':self.train_test[model]},ignore_index=True) if strategy == 'cross_val': df = pd.DataFrame(columns = ['model','train_test_mean','train_test_var']) for model in self.models.keys(): df = df.append({'model':model,'train_test_mean':self.train_test[model][0],'train_test_var':self.train_test[model][1]},ignore_index=True) if strategy == 'leave_one_group_out': df = pd.DataFrame(columns = ['model','train_test_mean','train_test_var']) for model in self.models.keys(): df = df.append({'model':model,'train_test_mean':self.train_test[model][0],'train_test_var':self.train_test[model][1]},ignore_index=True) if strategy == 'leave_one_dataset_out': df = pd.DataFrame(columns = ['model','train_test_mean','train_test_var']) for model in self.models.keys(): df = df.append({'model':model,'train_test_mean':self.train_test[model][0],'train_test_var':self.train_test[model][1]},ignore_index=True) if strategy == 'sorted_stratified': df = pd.DataFrame(columns = ['model','train_test_mean','train_test_var']) for model in self.models.keys(): df = df.append({'model':model,'train_test_mean':self.train_test[model][0],'train_test_var':self.train_test[model][1]},ignore_index=True) if strategy == 'all': df = pd.DataFrame(columns = ['model','train_test','cross_val','leave_group','leave_dataset','stratified']) for model in self.models.keys(): df = df.append({'model':model,'train_test':self.train_test[model],'cross_val':self.cross_val[model],'leave_group':self.leave_group[model],'leave_dataset':self.leave_dataset[model],'stratified':self.stratified[model]},ignore_index=True) return df """ def report(self,currentOutput,report_name=''): df = pd.DataFrame(columns = ['model','train_test','cross_val_mean','cross_val_var','leave_group_mean','leave_group_var','leave_dataset_mean','leave_dataset_var','stratified_mean','stratified_var']) for model in self.models.keys(): df = df.append({'model':model,'train_test':self.train_test[model],'cross_val_mean':self.cross_val[model][0],'cross_val_var':self.cross_val[model][1],'leave_group_mean':self.leave_group[model][0],'leave_group_var':self.leave_group[model][1],'leave_dataset_mean':self.leave_dataset[model][0],'leave_dataset_var':self.leave_dataset[model][1],'stratified_mean':self.stratified[model][0],'stratified_var':self.stratified[model][1]},ignore_index=True) filename = report_name df.to_csv(filename,index=False) print('==============================================') print(' Report Generation') print('==============================================') print(' ===> Successfully generated ') print(' ===> Results saved in easyRegress_report.csv file') def activateGroups(self,groups): self.groups = groups self.flagGroup = True def activateDatasets(self,datasets): self.datasets = datasets self.flagDataset = True def activateLabel(self,label): self.label = label def buildPipeline(self,sequence,report_name=''): """ <feature_name> : Name of feature feature_extraction: Apply feature extraction based on correlation feature_scaling: Apply feature scaling. Options: Standard, MinMax feature_fusion: Apply feature fusion. Options: gini, entropy, pca, isomap, mds, tsne load_models: Load regression models. find_evaluate: Model evaluation. Options: train_test_split, cross_validation, leave_one_group_out, leave_one_dataset_out, sorted_stratified report_results: Report results. Options: table, chart """ currentOutput = None for index, step in enumerate(sequence): label = self.label groups = self.groups datasets = self.datasets if index == 0: self.feature_name_check(step) currentOutput = self.feature_set[step][2] elif step == 'feature_extraction': results = self.extractFeatures(currentOutput) currentOutput = results elif step == 'feature_scaling_std': print(currentOutput.shape) results = self.Scaling(currentOutput,'std') currentOutput = results elif step == 'feature_scaling_mmax': results = self.Scaling(currentOutput,'mmax') currentOutput = results elif step == 'feature_fusion_pca': results = self.DimRed('pca',currentOutput,{'n_components':2,'n_neighbors':3}) currentOutput = results elif step == 'feature_fusion_mds': results = self.DimRed('mds',currentOutput,{'n_components':2,'n_neighbors':3}) currentOutput = results elif step == 'feature_fusion_isomap': results = self.DimRed('isomap',currentOutput,{'n_components':2,'n_neighbors':3}) currentOutput = results elif step == 'feature_fusion_tsne': results = self.DimRed('tsne',currentOutput,{'n_components':2,'n_neighbors':3}) currentOutput = results elif step == 'feature_fusion_entropy': results = self.getEntropy(currentOutput) currentOutput = results print(results) elif step == 'feature_fusion_gini': results = self.getGINI(currentOutput) currentOutput = results print(results) elif step == 'load_modules': self.regressionModelInitialize() elif step == 'evaluate_train_test': if label == None: print(' ====> Error: labels are not loaded') results =self.findParametersAndEvaluate(currentOutput,'train_test_split',label) currentOutput = results elif step == 'evaluate_cross_val': if label == None: print(' ====> Error: labels are not loaded') results =self.findParametersAndEvaluate(currentOutput,'cross_val',label) currentOutput = results elif step == 'evaluate_leave_group_out': if label == None: print(' ====> Error: labels are not loaded') if self.flagDataset == False: print(' ====> Error: groups ids are not loaded') results =self.findParametersAndEvaluate(currentOutput,'leave_one_group_out',label,group=groups) currentOutput = results elif step == 'evaluate_leave_dataset_out': if label == None: print(' ====> Error: labels are not loaded') if self.flagDataset == False: print(' ====> Error: datasets ids are not loaded') results =self.findParametersAndEvaluate(currentOutput,'leave_one_dataset_out',label,dataset = datasets) currentOutput = results elif step == 'evaluate_stratified': if label == None: print(' ====> Error: labels are not loaded') results =self.findParametersAndEvaluate(currentOutput,'sorted_stratified',label) currentOutput = results elif step == 'all': if label == None: print(' ====> Error: labels are not loaded') results =self.findParametersAndEvaluate(currentOutput,'all',label,group = groups, dataset = datasets) currentOutput = results elif step == 'report_csv': self.report(currentOutput,report_name) else: print(' Unsupported module ',step,' is specified')
41.139214
457
0.517462
3,813
38,712
5.084448
0.120378
0.029917
0.012379
0.017331
0.469851
0.41043
0.359983
0.325012
0.293857
0.271264
0
0.008895
0.346585
38,712
941
458
41.139214
0.757541
0.03712
0
0.3457
0
0
0.161192
0.045734
0
0
0
0
0
1
0.035413
false
0
0.097808
0
0.177066
0.215852
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6ba459622ba98919bfa10ab43ed05d7011713aea
469
py
Python
Moderate/Prime Numbers/main.py
AstrorEnales/CodeEval
eae0fb471d27d3a83d544ff4a4651ed1a2076930
[ "MIT" ]
null
null
null
Moderate/Prime Numbers/main.py
AstrorEnales/CodeEval
eae0fb471d27d3a83d544ff4a4651ed1a2076930
[ "MIT" ]
null
null
null
Moderate/Prime Numbers/main.py
AstrorEnales/CodeEval
eae0fb471d27d3a83d544ff4a4651ed1a2076930
[ "MIT" ]
null
null
null
import sys lines = open(sys.argv[1], 'r') for line in lines: line = line.replace('\n', '').replace('\r', '') if len(line) > 0: n = int(line) primes = set([2]) num = 3 while num < n: if all(num % i != 0 for i in primes): primes = set(list(primes) + [num]) num = num + 1 primes = sorted(list(primes)) print(','.join([str(x) for x in primes])) lines.close()
26.055556
52
0.45629
64
469
3.34375
0.484375
0.084112
0
0
0
0
0
0
0
0
0
0.020408
0.373134
469
17
53
27.588235
0.707483
0
0
0
0
0
0.013274
0
0
0
0
0
0
1
0
false
0
0.066667
0
0.066667
0.066667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6ba4e572e52707590a52608ce4cc12b513909627
2,117
py
Python
gemtown/users/serializers.py
doramong0926/gemtown
2c39284e3c68f0cc11994bed0ee2abaad0ea06b6
[ "MIT" ]
null
null
null
gemtown/users/serializers.py
doramong0926/gemtown
2c39284e3c68f0cc11994bed0ee2abaad0ea06b6
[ "MIT" ]
5
2020-09-04T20:13:39.000Z
2022-02-17T22:03:33.000Z
gemtown/users/serializers.py
doramong0926/gemtown
2c39284e3c68f0cc11994bed0ee2abaad0ea06b6
[ "MIT" ]
null
null
null
from rest_framework import serializers from gemtown.modelphotos import models as modelphoto_models from gemtown.modelers import models as modeler_models from gemtown.musicians import models as musician_models from . import models import time class TimestampField(serializers.Field): def to_representation(self, value): return int(time.mktime(value.timetuple())) class UsernameSerializer(serializers.ModelSerializer): class Meta: model = models.User fields = ( 'username', ) class MusicianSerializer(serializers.ModelSerializer): class Meta: model = musician_models.Musician fields = ( 'id', 'nickname', 'country', ) class ModelPhotoSerializer(serializers.ModelSerializer): class Meta: model = modelphoto_models.ModelPhoto fields = ( 'file', 'photo_type', ) class ModelerSerializer(serializers.ModelSerializer): cover_image = ModelPhotoSerializer() class Meta: model = modeler_models.Modeler fields = ( 'id', 'cover_image', 'nickname', 'country', ) class UserSerializer(serializers.ModelSerializer): created_at = TimestampField() updated_at = TimestampField() followers = UsernameSerializer(many=True) followings = UsernameSerializer(many=True) musician = MusicianSerializer() modeler = ModelerSerializer() class Meta: model = models.User fields = ( 'id', 'username', 'email', 'first_name', 'last_name', 'user_class', 'gem_amount', 'musician', 'modeler', 'gender', 'profile_photo', 'country', 'mobile_number', 'mobile_country', 'followers', 'followings', 'is_superuser', 'is_staff', 'created_at', 'updated_at' )
25.817073
60
0.561171
166
2,117
7.012048
0.39759
0.111684
0.060137
0.090206
0.142612
0.051546
0
0
0
0
0
0
0.352858
2,117
81
61
26.135802
0.849635
0
0
0.309859
0
0
0.117313
0
0
0
0
0
0
1
0.014085
false
0
0.084507
0.014085
0.366197
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6ba6fc313f5304844d20f830ba30be7f1409ef35
8,294
py
Python
neaps-api/neaps_lib/delivery_time_test.py
HotelsDotCom/neaps
4222016d60d27a168f4fb5569d696ec1731f8698
[ "Apache-2.0" ]
6
2018-01-18T11:31:41.000Z
2019-11-11T19:56:06.000Z
neaps-api/neaps_lib/delivery_time_test.py
HotelsDotCom/neaps
4222016d60d27a168f4fb5569d696ec1731f8698
[ "Apache-2.0" ]
null
null
null
neaps-api/neaps_lib/delivery_time_test.py
HotelsDotCom/neaps
4222016d60d27a168f4fb5569d696ec1731f8698
[ "Apache-2.0" ]
null
null
null
# # Copyright 2018 Expedia Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from functions import delivery_time test_cases = [ { 'data': [[3, 3, 3], 1, 3], 'result': 9 }, { 'data': [[3, 3, 3], 1, 10], 'result': 30 }, { 'data': [[3, 3, 3], 2, 3], 'result': 6 }, { 'data': [[3, 3, 3], 3, 3], 'result': 3 }, ] results = [] class DeliveryTimeTestCase(unittest.TestCase): """ docstring """ def setUp(self): for i in range(len(test_cases)): results.append( delivery_time( test_cases[i]['data'] ) ) def test_result(self): """ docstring """ for i in range(len(test_cases)): self.assertEqual(results[i], test_cases[i]['result']) if __name__ == '__main__': unittest.main() # tests_sc = [ # { # 'num': np.float64(3), # 'wip': 1, # 'runsdim': np.float64(10), # 'runstot': 1000, # 'fun': 1 # }, # { # 'num': np.float64(3), # 'wip': 2, # 'runsdim': np.float64(10), # 'runstot': 1000, # 'fun': 1 # }, # { # 'num': np.float64(3), # 'wip': 3, # 'runsdim': np.float64(10), # 'runstot': 1000, # 'fun': 1 # }, # { # 'num': np.float64(4), # 'wip': 1, # 'runsdim': np.float64(10), # 'runstot': 500, # 'fun': 1 # }, # { # 'num': np.float64(4), # 'wip': 2, # 'runsdim': np.float64(10), # 'runstot': 500, # 'fun': 1 # }, # { # 'num': np.float64(4), # 'wip': 3, # 'runsdim': np.float64(10), # 'runstot': 500, # 'fun': 1 # }, # { # 'num': np.float64(3), # 'wip': 1, # 'runsdim': np.float64(20), # 'runstot': 1500, # 'fun': 1 # }, # { # 'num': np.float64(3), # 'wip': 2, # 'runsdim': np.float64(20), # 'runstot': 1500, # 'fun': 1 # }, # { # 'num': np.float64(3), # 'wip': 3, # 'runsdim': np.float64(20), # 'runstot': 1500, # 'fun': 1 # } # ] # results_sc = [] # montecarlo_results_sc = [] # results_expected_sc = [ # 4, # 7, # 10, # 3, # 5, # 7, # 7, # 13, # 19 # ] # tots_expected_sc = [ # 1000, # 1000, # 1000, # 500, # 500, # 500, # 1500, # 1500, # 1500 # ] # for i in range(len(tests_sc)): # results_sc.append( # result( # tests_sc[i]['num'], # tests_sc[i]['wip'], # tests_sc[i]['runsdim'], # tests_sc[i]['runstot'], # tests_sc[i]['fun'] # ) # ) # montecarlo_results_sc.append( # stories_completed( # ( # [tests_sc[i]['num']], # tests_sc[i]['wip'], # tests_sc[i]['runsdim'] # ) # ) # ) # class Test_stories_completed: # """ test for boostrap helper""" # for i in range(len(tests_sc)): # def test_length(self): # assert len(results_sc[i]) == tots_expected_sc[i] # def test_mean(self): # for j in range(len(results_sc[i])): # assert results_sc[i][j] == results_expected_sc[i] # class Test_montecarlo_stories_completed: # """ test for boostrap helper""" # for i in range(len(tests_sc)): # def test_mean(self): # assert montecarlo_results_sc[i] == results_expected_sc[i] # tests_sn = [ # { # 'num': np.float64(3), # 'wip': 1, # 'runsdim': np.float64(10), # 'runstot': 1000, # 'fun': 2 # }, # { # 'num': np.float64(3), # 'wip': 2, # 'runsdim': np.float64(10), # 'runstot': 1000, # 'fun': 2 # }, # { # 'num': np.float64(3), # 'wip': 3, # 'runsdim': np.float64(10), # 'runstot': 1000, # 'fun': 2 # }, # { # 'num': np.float64(4), # 'wip': 1, # 'runsdim': np.float64(10), # 'runstot': 500, # 'fun': 2 # }, # { # 'num': np.float64(4), # 'wip': 2, # 'runsdim': np.float64(10), # 'runstot': 500, # 'fun': 2 # }, # { # 'num': np.float64(4), # 'wip': 3, # 'runsdim': np.float64(10), # 'runstot': 500, # 'fun': 2 # }, # { # 'num': np.float64(3), # 'wip': 1, # 'runsdim': np.float64(20), # 'runstot': 1500, # 'fun': 2 # }, # { # 'num': np.float64(3), # 'wip': 2, # 'runsdim': np.float64(20), # 'runstot': 1500, # 'fun': 2 # }, # { # 'num': np.float64(3), # 'wip': 3, # 'runsdim': np.float64(20), # 'runstot': 1500, # 'fun': 2 # } # ] # results_sn = [] # montecarlo_results_sn = [] # results_expected_sn = [ # 4, # 2, # 2, # 3, # 2, # 1, # 7, # 4, # 3 # ] # tots_expected_sn = [ # 1000, # 1000, # 1000, # 500, # 500, # 500, # 1500, # 1500, # 1500 # ] # for i in range(len(tests_sn)): # results_sn.append( # result( # tests_sn[i]['num'], # tests_sn[i]['wip'], # tests_sn[i]['runsdim'], # tests_sn[i]['runstot'], # tests_sn[i]['fun'] # ) # ) # montecarlo_results_sn.append( # sprints_needed( # ( # [tests_sn[i]['num']], # tests_sn[i]['wip'], # tests_sn[i]['runsdim'] # ) # ) # ) # class Test_sprints_needed: # """ test for boostrap helper""" # for i in range(len(tests_sn)): # def test_length(self): # assert len(results_sn[i]) == tots_expected_sn[i] # def test_mean(self): # for j in range(len(results_sn[i])): # assert results_sn[i][j] == results_expected_sn[i] # class Test_montecarlo_sprints_needed: # """ test for boostrap helper""" # for i in range(len(tests_sn)): # def test_mean(self): # assert montecarlo_results_sn[i] == results_expected_sn[i] # tests_bt = [ # { # 'sample': [np.float64(3) for x in range(10)], # 'predstot': 500, # 'predsdim': 1, # }, # { # 'sample': [np.float64(4) for x in range(10)], # 'predstot': 750, # 'predsdim': 5, # }, # { # 'sample': [np.float64(5) for x in range(10)], # 'predstot': 1000, # 'predsdim': 10, # }, # ] # results_bt = [] # results_expected_bt = [ # 3, # 4, # 5 # ] # tots_expected_bt = [ # 500, # 750, # 1000 # ] # for i in range(len(tests_bt)): # results_bt.append( # bootstrap( # tests_bt[i]['sample'], # tests_bt[i]['predstot'], # tests_bt[i]['predsdim'] # ) # ) # class Test_bootstrap: # """ test for boostrap helper""" # for i in range(len(tests_bt)): # def test_length(self): # assert len(results_bt[i]) == tots_expected_bt[i] # def test_mean(self): # assert np.mean(results_bt[i]) == results_expected_bt[i] # def test_max(self): # assert np.amax(results_bt[i]) == results_expected_bt[i] # def test_min(self): # assert np.amin(results_bt[i]) == results_expected_bt[i]
22.176471
74
0.437545
895
8,294
3.910615
0.15419
0.100286
0.061714
0.044571
0.555143
0.541429
0.523429
0.452571
0.452
0.432
0
0.076622
0.384736
8,294
373
75
22.235925
0.609249
0.825778
0
0.058824
0
0
0.051327
0
0
0
0
0
0.029412
1
0.058824
false
0
0.058824
0
0.147059
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
6ba78991985070dc29bb6e09cbc030857e571e30
6,702
py
Python
alexa_skills/cis_diagnosis.py
paramraghavan/sls-py-alexa-color-picker
da4752442dd4ead19832930103adb9d81cfc163a
[ "MIT" ]
null
null
null
alexa_skills/cis_diagnosis.py
paramraghavan/sls-py-alexa-color-picker
da4752442dd4ead19832930103adb9d81cfc163a
[ "MIT" ]
null
null
null
alexa_skills/cis_diagnosis.py
paramraghavan/sls-py-alexa-color-picker
da4752442dd4ead19832930103adb9d81cfc163a
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import logging from ask_sdk_core.skill_builder import SkillBuilder from ask_sdk_core.utils import is_request_type, is_intent_name from ask_sdk_core.handler_input import HandlerInput from ask_sdk_model import Response from ask_sdk_model.ui import SimpleCard import os from alexa_skills import aws_utils CIS_SERVICE_URL = os.environ['CIS_SERVICE_URL'] CIS_AWS_ACCESS_KEY_ID = os.environ['CIS_AWS_ACCESS_KEY_ID'] CIS_AWS_SECRET_ACCESS_KEY = os.environ.get('CIS_AWS_SECRET_ACCESS_KEY') skill_name = "CISDiagnosis" help_text = ("Please tell me your medical condition. You can say " "I have cold headache.") report_slot = "report" sb = SkillBuilder() logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) @sb.request_handler(can_handle_func=is_request_type("LaunchRequest")) def launch_request_handler(handler_input): """Handler for Skill Launch.""" # type: (HandlerInput) -> Response speech = "Welcome, Tell me your medical condition." handler_input.response_builder.speak( speech + " " + help_text).ask(help_text) return handler_input.response_builder.response @sb.request_handler(can_handle_func=is_intent_name("AMAZON.HelpIntent")) def help_intent_handler(handler_input): """Handler for Help Intent.""" # type: (HandlerInput) -> Response handler_input.response_builder.speak(help_text).ask(help_text) return handler_input.response_builder.response @sb.request_handler( can_handle_func=lambda handler_input: is_intent_name("AMAZON.CancelIntent")(handler_input) or is_intent_name("AMAZON.StopIntent")(handler_input)) def cancel_and_stop_intent_handler(handler_input): """Single handler for Cancel and Stop Intent.""" # type: (HandlerInput) -> Response speech_text = "Goodbye!" return handler_input.response_builder.speak(speech_text).response @sb.request_handler(can_handle_func=is_request_type("SessionEndedRequest")) def session_ended_request_handler(handler_input): """Handler for Session End.""" # type: (HandlerInput) -> Response return handler_input.response_builder.response from io import StringIO def getMedicalAnalysis(medical_report): client = aws_utils.get_boto3_client(CIS_AWS_ACCESS_KEY_ID, CIS_AWS_SECRET_ACCESS_KEY, 'comprehendmedical') response = client.detect_entities_v2( Text=medical_report ) mc_dict = {} for entity in response['Entities']: if entity["Category"] == "MEDICAL_CONDITION" and len(entity["Traits"]) > 0: #print(f'| {entity["Text"]} |{entity["Category"]} |') mc_dict[entity["Text"]] = entity["Category"] #print(mc_dict) string_buffer = StringIO() for item in mc_dict: string_buffer.write( item + ' is ' + mc_dict[item] + ' ') return string_buffer.getvalue() @sb.request_handler(can_handle_func=is_intent_name("MedicalIntent")) def my_medical_diagnosis_handler(handler_input): """Check if color is provided in slot values. If provided, then set your favorite color from slot value into session attributes. If not, then it asks user to provide the color. """ # type: (HandlerInput) -> Response slots = handler_input.request_envelope.request.intent.slots if report_slot in slots: medical_report = slots[report_slot].value speakOutput = getMedicalAnalysis(medical_report) # build json object as per the CISApi # handler_input.attributes_manager.session_attributes[color_slot_key] = fav_color speech = "Identified diseases are " + speakOutput reprompt = ("That's " + speakOutput) else: speech = "I'm not sure, please try again" reprompt = ("I'm not sure, please try again") handler_input.response_builder.speak(speech).ask(reprompt) return handler_input.response_builder.response @sb.request_handler(can_handle_func=is_intent_name("AMAZON.FallbackIntent")) def fallback_handler(handler_input): """AMAZON.FallbackIntent is only available in en-US locale. This handler will not be triggered except in that locale, so it is safe to deploy on any locale. """ # type: (HandlerInput) -> Response speech = ( "The {} skill can't help you with that. " + help_text ).format(skill_name) reprompt = (help_text) handler_input.response_builder.speak(speech).ask(reprompt) return handler_input.response_builder.response def convert_speech_to_text(ssml_speech): """convert ssml speech to text, by removing html tags.""" # type: (str) -> str s = SSMLStripper() s.feed(ssml_speech) return s.get_data() @sb.global_response_interceptor() def add_card(handler_input, response): """Add a card by translating ssml text to card content.""" # type: (HandlerInput, Response) -> None response.card = SimpleCard( title=skill_name, content=convert_speech_to_text(response.output_speech.ssml)) @sb.global_response_interceptor() def log_response(handler_input, response): """Log response from alexa service.""" # type: (HandlerInput, Response) -> None print("Alexa Response: {}\n".format(response)) @sb.global_request_interceptor() def log_request(handler_input): """Log request to alexa service.""" # type: (HandlerInput) -> None print("Alexa Request: {}\n".format(handler_input.request_envelope.request)) @sb.exception_handler(can_handle_func=lambda i, e: True) def all_exception_handler(handler_input, exception): """Catch all exception handler, log exception and respond with custom message. """ # type: (HandlerInput, Exception) -> None print("Encountered following exception: {}".format(exception)) speech = "Sorry, there was some problem. Please try again!!" handler_input.response_builder.speak(speech).ask(speech) return handler_input.response_builder.response ######## Convert SSML to Card text ############ # This is for automatic conversion of ssml to text content on simple card # You can create your own simple cards for each response, if this is not # what you want to use. from six import PY2 try: from HTMLParser import HTMLParser except ImportError: from html.parser import HTMLParser class SSMLStripper(HTMLParser): def __init__(self): self.reset() self.full_str_list = [] if not PY2: self.strict = False self.convert_charrefs = True def handle_data(self, d): self.full_str_list.append(d) def get_data(self): return ''.join(self.full_str_list) ################################################ # Handler to be provided in lambda console. lambda_handler = sb.lambda_handler()
32.852941
110
0.716801
873
6,702
5.255441
0.273769
0.07585
0.061029
0.070619
0.278989
0.209895
0.159765
0.15279
0.151046
0.123801
0
0.001084
0.173829
6,702
203
111
33.014778
0.827524
0.222769
0
0.090909
0
0
0.128404
0.013317
0
0
0
0
0
1
0.136364
false
0
0.118182
0.009091
0.354545
0.027273
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6ba87f59c7f750ad17a00d25ed32add4e2688d55
9,360
py
Python
layouts/params.py
JAOP1/pyristicLab
fb3f0eb88af5b964bf91875e20fb3c42b04e257d
[ "MIT" ]
null
null
null
layouts/params.py
JAOP1/pyristicLab
fb3f0eb88af5b964bf91875e20fb3c42b04e257d
[ "MIT" ]
null
null
null
layouts/params.py
JAOP1/pyristicLab
fb3f0eb88af5b964bf91875e20fb3c42b04e257d
[ "MIT" ]
null
null
null
""" --------------------------------------------------------------------- Continuos Optimization Problems --------------------------------------------------------------------- """ """ --------------------------------------------------------------------- GA configuration --------------------------------------------------------------------- """ _standardInputsGA = [ { "id": "generaciones", "desc": "Número de generaciones:", "default": 10, "step": 1, "min": 1 }, { "id": "poblacionGeneracional", "desc": "Tamaño de la población:", "default": 30, "step":1, "min":10 } ] _operadoresGA = [ { 'id': 'parentSelection', 'desc': "Método de selección de padres:", 'items': [ { 'label': 'roulette_sampler', 'inputs':[] }, { 'label':'stochastic_universal_sampler', 'inputs':[] }, { 'label':'deterministic_sampler', 'inputs':[] }, { 'label':'tournament_sampler', 'inputs':[ { "id": "chunk", "desc": "Tamaño de grupos:", "default": 2, "step": 1, "min": 2 }, { "id": "prob", "desc": "Probabilidad de seleccionar al mejor:", "default": 0.5, "step": 0.01, "min": 0, "max": 1 } ] } ] }, { 'id':'crossOver', 'desc':'Operador de cruza:', 'items':[ { 'label': 'n_point_crossover', 'inputs':[ { "id": "nPoint", "desc": "Número de puntos de cruza:", "default": 1, "step": 1, "min": 1 } ] }, { 'label':'uniform_crossover', 'inputs':[ { "id": "uniform", "desc": "Probabilidad de cruza:", "default": 0.5, "step": 0.01, "min": 0, "max":1 } ] }, { 'label':'simulated_binary_crossover', 'inputs':[ { "id": "simulatedBinary", "desc": "nc:", "default": 1 } ] }, { 'label':'discrete_crossover', 'inputs':[] }, { 'label':'intermediate_crossover', 'inputs': [ { 'id':'alphaGA', 'desc': 'Aporte del P1:', 'default':0.5, 'step':0.1, 'min':0.01, 'max':1 } ] } ] }, { 'id':'mutation', 'desc': 'Operadores de mutación', 'items':[ { 'label':'boundary_mutator', 'inputs':[] }, { 'label':'uniform_mutator', 'inputs':[] }, { 'label':'non_uniform_mutator', 'inputs':[ { 'id':'sigmaNonUniform', 'desc':'Valor de sigma:', 'default':1, 'min':0.01 } ] } ] }, { 'id':'survivorSelectionGA', 'desc': 'Esquemas de selección:', 'items':[ { 'label':'merge_selector', 'inputs':[] }, { 'label':'replacement_selector', 'inputs':[] } ] } ] """ --------------------------------------------------------------------- ES configuration --------------------------------------------------------------------- """ _standardInputsES = [ { "id": "generaciones", "desc": "Número de generaciones:", "default": 10, "step": 1, "min": 1 }, { "id": "poblacionGeneracional", "desc": "Tamaño de la población padre:", "default": 30, "step":1, "min":10 }, { 'id':'poblacionHijos', 'desc':'Tamaño de la población hija:', 'default':30, 'step':1, 'min':10 }, { 'id':'epsilonSigma', 'desc':'Mínimo valor aceptado sigma:', 'default':0.001, 'min':0, 'step':0.01 } ] _operadoresES = [ { 'id': 'crossoverSolES', 'desc': "Operadores de cruza para la solución:", 'items': [ { 'label':'discrete_crossover', 'inputs':[] }, { 'label':'intermediate_crossover', 'inputs': [ { 'id':'alphaEEX', 'desc': 'Aporte del P1:', 'default':0.5, 'step':0.1, 'min':0.01, 'max':1 } ] } ] }, { 'id':'mutationSolES', 'desc':'Operadores de mutación para la solución:', 'items': [ { 'label':'sigma_mutator', 'inputs':[] } ] }, { 'id':'crossoverSigmaES', 'desc':'Operadores de cruza para los sigma:', 'items':[ { 'label':'discrete_crossover', 'inputs':[] }, { 'label':'intermediate_crossover', 'inputs': [ { 'id':'alphaEESigma', 'desc': 'Aporte del P1:', 'default':0.5, 'step':0.1, 'min':0.01, 'max':1 } ] } ] }, { 'id':'mutationSigmaES', 'desc': 'Operadores de mutación para los sigma:', 'items':[ { 'label':'single_sigma_adaptive_mutator', 'inputs':[] }, { 'label':'mult_sigma_adaptive_mutator', 'inputs':[ ] } ] }, { 'id':'survivorSelectionES', 'desc':'Esquema de selección de sobrevivientes:', 'items': [ { 'label':'merge_selector', 'inputs':[] }, { 'label':'replacement_selector', 'inputs':[] } ] } ] """ --------------------------------------------------------------------- EP configuration --------------------------------------------------------------------- """ _standardInputsEP = [ { "id": "generaciones", "desc": "Número de generaciones:", "default": 10, "step": 1, "min": 1 }, { "id": "poblacionGeneracional", "desc": "Tamaño de la población:", "default": 30, "step":1, "min":10 } ] _operadoresEP = [ { 'id': 'operadorMutacionX', 'desc': "Operadores de mutación en la solución X:", 'items': [ { 'label': 'sigma_mutator', 'inputs':[] } ] }, { 'id': 'operadorMutacionSigma', 'desc': "Operadores de mutación en la variable de sigma: ", 'items': [ { 'label': "sigma_ep_adaptive_mutator", 'inputs':[ { "id": "alpha", "desc": "Valor alpha:", "default": 0.5, "step": 0.01, "min": 0, "max": 1 } ] } ] }, { 'id':'survivorSelectionPE', 'desc': 'Esquemas de selección:', 'items':[ { 'label':'merge_selector', 'inputs':[] }, { 'label':'replacement_selector', 'inputs':[] } ] } ] def getOperands(name): if name == 'EP': return _operadoresEP elif name== 'EE': return _operadoresES elif name == 'GA': return _operadoresGA
25.57377
72
0.290278
508
9,360
5.257874
0.238189
0.019468
0.026956
0.029203
0.48596
0.420442
0.377012
0.361288
0.361288
0.315987
0
0.022262
0.524893
9,360
366
73
25.57377
0.578367
0.020406
0
0.338279
0
0
0.288419
0.035544
0
0
0
0
0
1
0.002967
false
0
0
0
0.011869
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
6ba8948db01a555810296ad83a1297622916c86e
988
py
Python
armautils_cli/smdimerge.py
KoffeinFlummi/ArmaUtils
2f1fdc8fb561fb54077f3c328d7a788e75c78dad
[ "MIT" ]
1
2015-02-19T17:31:17.000Z
2015-02-19T17:31:17.000Z
armautils_cli/smdimerge.py
KoffeinFlummi/ArmaUtils
2f1fdc8fb561fb54077f3c328d7a788e75c78dad
[ "MIT" ]
null
null
null
armautils_cli/smdimerge.py
KoffeinFlummi/ArmaUtils
2f1fdc8fb561fb54077f3c328d7a788e75c78dad
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import numpy as np from PIL import Image def smdimerge(pargs, oargs): if len(pargs) != 3: return -1 path_spec, path_gloss, path_target = pargs try: spec = Image.open(path_spec).convert("RGBA") gloss = Image.open(path_gloss).convert("RGBA") except: print("Failed to read images. Please check your paths.") return 1 if spec.size != gloss.size: print("Image sizes do not match, aborting.") return 1 smdi = Image.new("RGBA", spec.size, "white") data = np.array(smdi) r,g,b,a = data.transpose() g = np.array(spec).transpose()[0] b = np.array(gloss).transpose()[0] data = np.array([r,g,b,a]).transpose() smdi = Image.fromarray(data) try: smdi.save(path_target) except: print("Failed to write final image to disk. Check permissions.") return 1 else: print("SMDI map saved at: {}".format(path_target)) return 0
23.52381
72
0.601215
140
988
4.192857
0.471429
0.0477
0.044293
0.064736
0
0
0
0
0
0
0
0.01238
0.26417
988
41
73
24.097561
0.795048
0.021255
0
0.233333
0
0
0.181159
0
0
0
0
0
0
1
0.033333
false
0
0.066667
0
0.266667
0.133333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6ba92406854249e5b9f75ef7a1b017bf6c889193
932
py
Python
logstash-pruner/prune_logs.py
wuvt/containers
729469c4e878027da589afb9b37d61de104a5713
[ "0BSD" ]
1
2017-09-16T04:54:33.000Z
2017-09-16T04:54:33.000Z
logstash-pruner/prune_logs.py
wuvt/containers
729469c4e878027da589afb9b37d61de104a5713
[ "0BSD" ]
null
null
null
logstash-pruner/prune_logs.py
wuvt/containers
729469c4e878027da589afb9b37d61de104a5713
[ "0BSD" ]
null
null
null
#!/usr/bin/python3 import datetime import os import requests import sys if sys.version_info[0] >= 3: import urllib.parse as urllib else: import urllib endpoint = os.environ['ELASTICSEARCH_URL'] username = os.environ['ELASTICSEARCH_USERNAME'] password = os.environ['ELASTICSEARCH_PASSWORD'] prune_start = datetime.datetime.utcnow() - datetime.timedelta(days=60) r = requests.get('{0}/_cat/indices'.format(endpoint), auth=(username, password)) for line in r.text.splitlines(): data = line.split(' ') if len(data) > 10: index = data[2] indexd = index.split('-') if len(indexd) > 1: d = datetime.datetime.strptime(indexd[1], "%Y.%m.%d") if d < prune_start: r2 = requests.delete( '{0}/{1}'.format(endpoint, urllib.quote(index)), auth=(username, password)) print(index, r2.json())
29.125
70
0.604077
113
932
4.920354
0.504425
0.048561
0.118705
0
0
0
0
0
0
0
0
0.021521
0.252146
932
31
71
30.064516
0.776184
0.01824
0
0.076923
0
0
0.102845
0.04814
0
0
0
0
0
1
0
false
0.115385
0.230769
0
0.230769
0.038462
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
6baa9c56ec82f3de3e848ccc5b1bc7bfad503442
7,001
py
Python
detect_motor_test3.py
binghaohuang1/object-detective-visual-tracking
e61680a771dc13a006113d96965e59ff1bc3ce6d
[ "MIT" ]
null
null
null
detect_motor_test3.py
binghaohuang1/object-detective-visual-tracking
e61680a771dc13a006113d96965e59ff1bc3ce6d
[ "MIT" ]
null
null
null
detect_motor_test3.py
binghaohuang1/object-detective-visual-tracking
e61680a771dc13a006113d96965e59ff1bc3ce6d
[ "MIT" ]
null
null
null
#!/usr/bin/env python #!coding=utf-8 import rospy import numpy as np import PIL.Image as pilimage from sensor_msgs.msg import CompressedImage from sensor_msgs.msg import Image from std_msgs.msg import Float64 from cv_bridge import CvBridge, CvBridgeError import cv2 import time from yolo import YOLO from sensor_msgs.msg import Joy from std_msgs.msg import String from geometry_msgs.msg import Twist from tf.transformations import * from math import pi from geometry_msgs.msg import PoseStamped from std_msgs.msg import Header from sensor_msgs.msg import JointState from threading import Thread import threading global RV2_motor1_joint yolo = YOLO() bridge = CvBridge() def send(): rospy.Subscriber('/mid_camera/color/image_raw/compressed', CompressedImage, ReceiveVideo_right) rospy.spin() def ReceiveVideo_right(data): global cv_image # print(1) cv_image = bridge.compressed_imgmsg_to_cv2(data, 'bgr8') def main(): global delta_x,cv_image time.sleep(4) fps = 0 while not rospy.is_shutdown(): t1 = time.time() # 读取某一帧 frame = cv2.cvtColor(cv_image,cv2.COLOR_BGR2RGB) # 转变成Image frame = pilimage.fromarray(np.uint8(frame)) # 进行检测 frame, bbox_list, label_list = yolo.detect_image(frame) frame = np.array(frame) # RGBtoBGR满足opencv显示格式 frame = cv2.cvtColor(frame,cv2.COLOR_RGB2BGR) fps = ( fps + (1./(time.time()-t1)) ) / 2 print("fps= %.2f"%(fps)) frame = cv2.putText(frame, "fps= %.2f"%(fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) print(frame.shape) cv2.imshow("video",frame) cv2.waitKey(3) # c= cv2.waitKey(1) & 0xff # if c==27: # break if type(label_list) != int: # 没检测到物体的时候,bbox_list和label_list为1 num_of_obj = len(label_list) #print('num_of_object:', num_of_obj) #确定跟踪物体与图像中点的相对坐标 for i in range(num_of_obj): if 'banana' in label_list[i]: object_center = (bbox_list[i][1]+bbox_list[i][3])*0.5 delta_x = 320-object_center #print(delta_x) #return delta_x # location_pub.publish(delta_x) #motor1_move() elif 'bed' in label_list[i]: print("yyy") pass else: print('yolo未识别到任何物体') pass def motor1_move(): time.sleep(1) global command_vel_pub_m, delta_x, RV2_motor1_joint delta_x = 0 now = rospy.Time.now() motor_vel = JointState() motor_vel.header = Header() motor_vel.header.stamp = now motor_vel.header.frame_id = "bulldog" motor_vel.name = ["motor1"] # rospy.Subscriber('/joint_states_motor',JointState,RV2_motorjointstate_callback) while not rospy.is_shutdown(): print(delta_x) #中间位判断 if -1.5 < RV2_motor1_joint < 1.5: #左转判断条件 if delta_x > 200: motor_vel.velocity = [0.48] print (motor_vel) command_vel_pub_m.publish(motor_vel) time.sleep(2) elif 80 < delta_x < 200: motor_vel.velocity = [(delta_x - 40) * 0.003] print (motor_vel) command_vel_pub_m.publish(motor_vel) time.sleep(2) #右转判断条件 elif delta_x < -200: motor_vel.velocity = [-0.48] command_vel_pub_m.publish(motor_vel) time.sleep(2) elif -200 < delta_x < -80: motor_vel.velocity = [(delta_x + 40) * 0.003] command_vel_pub_m.publish(motor_vel) time.sleep(2) #停止判断条件 elif -80 < delta_x < 80: motor_vel.velocity = [0] command_vel_pub_m.publish(motor_vel) #左限位判断条件 if 1.5 < RV2_motor1_joint: #左转判断条件 if delta_x > 80: motor_vel.velocity = [0] print (motor_vel) command_vel_pub_m.publish(motor_vel) time.sleep(2) #右转判断条件 elif delta_x < -200: motor_vel.velocity = [-0.48] command_vel_pub_m.publish(motor_vel) time.sleep(2) elif -200 < delta_x < -80: motor_vel.velocity = [(delta_x + 40) * 0.003] command_vel_pub_m.publish(motor_vel) time.sleep(2) #停止判断条件 elif -80 < delta_x < 80: motor_vel.velocity = [0] command_vel_pub_m.publish(motor_vel) time.sleep(0.5) #右限位判断条件 if RV2_motor1_joint < -1.5: #左转判断条件 if delta_x > 200: motor_vel.velocity = [0.48] print (motor_vel) command_vel_pub_m.publish(motor_vel) time.sleep(2) elif 80 < delta_x < 200: motor_vel.velocity = [(delta_x - 40) * 0.003] print (motor_vel) command_vel_pub_m.publish(motor_vel) time.sleep(2) #右转判断条件 elif delta_x < -80: motor_vel.velocity = [0] command_vel_pub_m.publish(motor_vel) time.sleep(2) #停止判断条件 elif -80 < delta_x < 80: motor_vel.velocity = [0] command_vel_pub_m.publish(motor_vel) time.sleep(0.5) else: motor_vel.velocity = [0] command_vel_pub_m.publish(motor_vel) time.sleep(0.5) #for object in vision_database_dict: # 再将opencv格式额数据转换成ros image格式的数据发布 # try: # #self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8")) # location_pub.publish(location_pub) # except CvBridgeError as e: # print('e') def RV2_motorjointstate_callback(data): # 定义RV2 motor数据全局变量,进行赋值 global RV2_motor1_joint RV2_motor1_joint = data.position[0] print(RV2_motor1_joint) if __name__ == '__main__': # 初始化ros节点 rospy.init_node("cv_bridge_test") rospy.loginfo("Starting cv_bridge_test node") global command_vel_pub_m, delta_x #创建发布者 command_vel_pub_m = rospy.Publisher('/motor_control/input/velocity', JointState, queue_size = 100, latch=True) #订阅躯干点击位置信息 rospy.Subscriber('/joint_states_motor',JointState,RV2_motorjointstate_callback) #定义yolo识别子程序 t_send = threading.Thread(target = send) t_send.start() t_main = threading.Thread(target=main) t_main.start() #time.sleep(2) # 定义躯干运动子进程 t_motor1 = threading.Thread(target = motor1_move) t_motor1.start() rospy.spin() # except KeyboardInterrupt: # print("Shutting down cv_bridge_test node.") # cv2.destroyAllWindows()
32.868545
114
0.571347
862
7,001
4.37935
0.230858
0.08053
0.058543
0.063046
0.423841
0.358146
0.352318
0.331921
0.331921
0.296424
0
0.042836
0.333095
7,001
212
115
33.023585
0.765689
0.12084
0
0.446667
0
0
0.032749
0.010971
0
0
0
0
0
1
0.033333
false
0.013333
0.133333
0
0.166667
0.073333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6baa9dca059c9918a2a7be0aeb44d2c0167aca74
1,841
py
Python
venv/lib/python3.8/site-packages/ansible/utils/collection_loader/_collection_meta.py
saeedya/docker-ansible
6fb0cfc6bc4a5925b21380952a5a4502ec02119a
[ "Apache-2.0" ]
null
null
null
venv/lib/python3.8/site-packages/ansible/utils/collection_loader/_collection_meta.py
saeedya/docker-ansible
6fb0cfc6bc4a5925b21380952a5a4502ec02119a
[ "Apache-2.0" ]
null
null
null
venv/lib/python3.8/site-packages/ansible/utils/collection_loader/_collection_meta.py
saeedya/docker-ansible
6fb0cfc6bc4a5925b21380952a5a4502ec02119a
[ "Apache-2.0" ]
null
null
null
# (c) 2019 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # CAUTION: There are two implementations of the collection loader. # They must be kept functionally identical, although their implementations may differ. # # 1) The controller implementation resides in the "lib/ansible/utils/collection_loader/" directory. # It must function on all Python versions supported on the controller. # 2) The ansible-test implementation resides in the "test/lib/ansible_test/_util/target/legacy_collection_loader/" directory. # It must function on all Python versions supported on managed hosts which are not supported by the controller. from __future__ import (absolute_import, division, print_function) __metaclass__ = type try: from collections.abc import Mapping # pylint: disable=ansible-bad-import-from except ImportError: from collections import Mapping # pylint: disable=ansible-bad-import-from from ansible.module_utils.common.yaml import yaml_load def _meta_yml_to_dict(yaml_string_data, content_id): """ Converts string YAML dictionary to a Python dictionary. This function may be monkeypatched to another implementation by some tools (eg the import sanity test). :param yaml_string_data: a bytes-ish YAML dictionary :param content_id: a unique ID representing the content to allow other implementations to cache the output :return: a Python dictionary representing the YAML dictionary content """ # NB: content_id is passed in, but not used by this implementation routing_dict = yaml_load(yaml_string_data) if not routing_dict: routing_dict = {} if not isinstance(routing_dict, Mapping): raise ValueError('collection metadata must be an instance of Python Mapping') return routing_dict
48.447368
125
0.76969
259
1,841
5.332046
0.490347
0.039826
0.030413
0.037654
0.166546
0.166546
0.166546
0.166546
0.099928
0.099928
0
0.00654
0.169473
1,841
37
126
49.756757
0.896664
0.661054
0
0
0
0
0.098107
0
0
0
0
0
0
1
0.071429
false
0
0.357143
0
0.5
0.071429
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
6bab2761f4e78590b87af2ab8a2f23f2ce26ee32
69
py
Python
tests/test_resources/test_jobs_package/test_jobs_package/throw_an_exception_job.py
ni/hoplite
bc1b01aa08ba21daa36f46b06000b62890096787
[ "MIT" ]
16
2016-02-25T16:41:28.000Z
2020-11-07T13:27:40.000Z
tests/test_resources/test_jobs_package/test_jobs_package/throw_an_exception_job.py
ni/hoplite
bc1b01aa08ba21daa36f46b06000b62890096787
[ "MIT" ]
null
null
null
tests/test_resources/test_jobs_package/test_jobs_package/throw_an_exception_job.py
ni/hoplite
bc1b01aa08ba21daa36f46b06000b62890096787
[ "MIT" ]
8
2016-02-11T16:44:52.000Z
2020-01-23T19:45:37.000Z
def run(config, status): raise TypeError("THE SKY IS FALLING!!")
23
43
0.681159
10
69
4.7
1
0
0
0
0
0
0
0
0
0
0
0
0.173913
69
2
44
34.5
0.824561
0
0
0
0
0
0.289855
0
0
0
0
0
0
1
0.5
false
0
0
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
3
6baba2b5a50d24a7e67ce4b5e3c206e3e6b416ad
7,184
py
Python
git_pull_all.py
searKing/git-pull-all
b77a1b0461cd00a5f3ccd48253a12674557302b6
[ "MIT" ]
null
null
null
git_pull_all.py
searKing/git-pull-all
b77a1b0461cd00a5f3ccd48253a12674557302b6
[ "MIT" ]
null
null
null
git_pull_all.py
searKing/git-pull-all
b77a1b0461cd00a5f3ccd48253a12674557302b6
[ "MIT" ]
null
null
null
#!/usr/bin/python import git from git import * import threading import os import sys import getopt from enum import Enum class GitCommandType(Enum): pull = 1 push = 2 nop = 3 def yes_or_no(msg: str): yes_no = input(msg + " ? [Y]es or [n]o?") yes_no = yes_no.lower() if yes_no == "yes" or yes_no == "y": return True elif yes_no == "no" or yes_no == "n": return False else: return True # is_git_dir returns if current directory has .git/ def is_git_dir(dir_path: str): repo_git_dir = os.path.join(dir_path, '.git') if not os.path.exists(repo_git_dir): return False return True def update_git_repo(git_cmd_type: GitCommandType, git_repo_dir: str, git_stash_if_have_uncommitted_changes: bool, unhandled_git_repo_dirs: list): try: git_repo = git.Repo(git_repo_dir) if git_cmd_type == GitCommandType.pull and git_repo.is_dirty(): if not git_stash_if_have_uncommitted_changes: if not yes_or_no("Repo " + git_repo_dir + " have uncommitted changes, \n\tgit reset --hard"): unhandled_git_repo_dirs.append(git_repo_dir) return try: git_repo.git.stash('save', True) except Exception as exception: print( "git stash repo:" + git_repo_dir + " Failed:\r\n git reset --hard recommended" + str(exception)) unhandled_git_repo_dirs.append(git_repo_dir) return remote_repo = git_repo.remote() print("start git %s from remote for: %s" % (git_cmd_type.name, git_repo_dir), end='') try: if git_cmd_type == GitCommandType.pull: remote_repo.pull() elif git_cmd_type == GitCommandType.push: remote_repo.push() elif git_cmd_type == GitCommandType.nop: pass else: print("") raise Exception('unrecognised git command: ' + git_cmd_type.name) except Exception as exception: print("") print( "git " + git_cmd_type.name + " repo:" + git_repo_dir + " Failed:\r\n git reset --hard recommended" + str( exception)) unhandled_git_repo_dirs.append(git_repo_dir) return print("... Done.") except NoSuchPathError as e: pass except InvalidGitRepositoryError as e: pass finally: pass def update_git_repo_thread(git_cmd_type: GitCommandType, root_path: str, git_stash_if_have_uncommitted_changes: bool, dirty_git_repo_dirs: list, git_update_thread_pools: list): if git_stash_if_have_uncommitted_changes: git_update_thread_ = threading.Thread(target=update_git_repo, args=(git_cmd_type, root_path, True, dirty_git_repo_dirs)) git_update_thread_.start() git_update_thread_pools.append(git_update_thread_) else: update_git_repo(git_cmd_type, root_path, False, dirty_git_repo_dirs) def walk_and_update(git_cmd_type: GitCommandType, root_path: str, continue_when_meet_git: bool, depth: int, max_depth: int, git_stash_if_have_uncommitted_changes: bool, dirty_git_repo_dirs: list, git_update_thread_pools: list): if depth >= max_depth: print("jump for %s too deep: depth[%d] max_depth[%d]" % (root_path, depth, max_depth)) return if is_git_dir(root_path): update_git_repo_thread(git_cmd_type, root_path, git_stash_if_have_uncommitted_changes, dirty_git_repo_dirs, git_update_thread_pools) if not continue_when_meet_git: # print("jump subdirs for %s meet git" % (root_path)) return depth = depth + 1 for root_dir, sub_dirs, sub_files in os.walk(root_path): for sub_dir in sub_dirs: walk_and_update(git_cmd_type, os.path.join(root_dir, sub_dir), continue_when_meet_git, depth, max_depth, git_stash_if_have_uncommitted_changes, dirty_git_repo_dirs, git_update_thread_pools) sub_dirs.clear() sub_files.clear() class Usage(Exception): def __init__(self, msg): self.msg = msg def main(argv=None): if argv is None: argv = sys.argv try: try: g_git_cmd_type: GitCommandType = GitCommandType.nop g_walk_paths: list = ["."] g_git_stash_if_have_uncommitted_changes: bool = False g_continue_when_meet_git: bool = False g_stop_when_meet_max_depth: int = 10 opts, args = getopt.getopt(argv[1:], "hycd:", ["help", "path", "git_stash_if_have_uncommitted_changes", "continue_when_meet_git", "stop_when_meet_max_depth=10"]) if len(args) > 0: g_git_cmd_type = GitCommandType[args[0]] if len(args) > 1: g_walk_paths = args[1:] for op, value in opts: if op == "-y": g_git_stash_if_have_uncommitted_changes = True if op == "-c": g_continue_when_meet_git = True elif op == "-d": g_stop_when_meet_max_depth = value elif op == "-h": print("=======""Usage:") print("python git_pull_all.py pull|push .") print("python git_pull_all.py -y -c -d 10 pull|push YourPath") print("python git_pull_all.py" " --git_stash_if_have_uncommitted_changes " "--continue_when_meet_git " "--stop_when_meet_max_depth=10 pull|push YourPath") print("=======") Usage("-h") sys.exit() g_dirty_git_repo_dirs = [] g_git_update_thread_pools = [] for walk_path in g_walk_paths: walk_and_update(g_git_cmd_type, walk_path, g_continue_when_meet_git, 0, g_stop_when_meet_max_depth, g_git_stash_if_have_uncommitted_changes, g_dirty_git_repo_dirs, g_git_update_thread_pools) for git_update_thread in g_git_update_thread_pools: git_update_thread.join(30) if len(g_dirty_git_repo_dirs) != 0: print('these repos have uncommitted changes or conflicts:\r\n') for dirty_repo_dir in g_dirty_git_repo_dirs: print('dir %s has uncommited changes or conflicts, please check\r\n' % (dirty_repo_dir)) print("Done git " + g_git_cmd_type.name + " all") except getopt.error as msg: raise Usage(msg) except Usage as err: print >> sys.stderr, err.msg print >> sys.stderr, "for help use --help" return 2 if __name__ == "__main__": sys.exit(main())
39.256831
121
0.579065
921
7,184
4.127036
0.156352
0.060773
0.047356
0.044199
0.46935
0.36964
0.280716
0.213102
0.202052
0.191002
0
0.004607
0.335329
7,184
182
122
39.472527
0.791414
0.016425
0
0.224359
0
0
0.114682
0.025202
0
0
0
0
0
1
0.044872
false
0.025641
0.044872
0
0.192308
0.108974
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6bac63dfef6fa2d50e75637d8cb0e279922534d7
1,943
py
Python
03-dearsanta/dateconvert.py
hugovk/NaNoGenMo-2016
e71b333173b221066f56adcdea4fe8cfdfd4e7c7
[ "FTL" ]
null
null
null
03-dearsanta/dateconvert.py
hugovk/NaNoGenMo-2016
e71b333173b221066f56adcdea4fe8cfdfd4e7c7
[ "FTL" ]
null
null
null
03-dearsanta/dateconvert.py
hugovk/NaNoGenMo-2016
e71b333173b221066f56adcdea4fe8cfdfd4e7c7
[ "FTL" ]
null
null
null
#!/usr/bin/env python3 """ Take a timestamp like: 25/11/2016 23:05:03 Convert it to: 25 November 2016, 13:05 PST 25 November 2016, 16:05 EST 25 November 2016, 21:05 GMT 25 November 2016, 21:05 UTC 25 November 2016, 23:05 EET 26 November 2016, 02:35 IST 26 November 2016, 05:05 CST 26 November 2016, 06:05 JST 26 November 2016, 08:05 AEDT """ import argparse import pytz # pip install pytz from dateutil.parser import parse # pip install python-dateutil def utc_to_local(utc_dt, local_tz): local_dt = utc_dt.replace(tzinfo=pytz.utc).astimezone(local_tz) return local_tz.normalize(local_dt) # .normalize might be unnecessary if __name__ == "__main__": parser = argparse.ArgumentParser( description="Convert a timestamp into eight others.", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument("timestamp", help="Input timestamp") args = parser.parse_args() # print(args.timestamp) indate = parse(args.timestamp, dayfirst=True, yearfirst=False) local_tz = pytz.timezone("Europe/Helsinki") # print(indate, local_tz) localdt = local_tz.localize(indate) us_pacific = pytz.timezone("US/Pacific") us_eastern = pytz.timezone("US/Eastern") london = pytz.timezone("Europe/London") india = pytz.timezone("Asia/Calcutta") china = pytz.timezone("Asia/Shanghai") japan = pytz.timezone("Asia/Tokyo") sydney = pytz.timezone("Australia/Sydney") for tz in [ us_pacific, us_eastern, london, pytz.UTC, local_tz, india, china, japan, sydney, ]: timezone_name = tz.localize(indate).tzname() local_date = localdt.astimezone(tz).strftime("%d %B %Y, %H:%M") print(f"{local_date} {timezone_name}") # x = tz.localize(indate) # print("{} ({})".format(localdt.astimezone(tz), x.tzname())) # print() # End of file
25.906667
74
0.662378
261
1,943
4.808429
0.43295
0.086056
0.055777
0.025498
0.028685
0
0
0
0
0
0
0.068898
0.215646
1,943
74
75
26.256757
0.754593
0.288214
0
0
0
0
0.15593
0
0
0
0
0
0
1
0.027027
false
0
0.081081
0
0.135135
0.027027
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6bad66e09e43cf21ce9d5331e922f80d18955982
799
py
Python
priv_tube/database/repositories/system_flags.py
ActionCactus/prive_tube
d6d53b87e1a91248e32532a86b23f2d2f3196c58
[ "MIT" ]
null
null
null
priv_tube/database/repositories/system_flags.py
ActionCactus/prive_tube
d6d53b87e1a91248e32532a86b23f2d2f3196c58
[ "MIT" ]
null
null
null
priv_tube/database/repositories/system_flags.py
ActionCactus/prive_tube
d6d53b87e1a91248e32532a86b23f2d2f3196c58
[ "MIT" ]
null
null
null
from priv_tube.database.models.system_flags import SystemFlags as Model from priv_tube.database import db class SystemFlags: """ Repository for interacting with the `system_flags` database table responsible for system-wide toggles. """ @staticmethod def is_enabled(setting_name: str) -> bool: flag: Model = Model.query.filter_by(flag_name=setting_name).first() return bool(flag.value) @staticmethod def enable(setting_name: str): model: Model = Model.query.filter_by(flag_name=setting_name).first() model.value = True db.session.commit() @staticmethod def disable(setting_name: str): model: Model = Model.query.filter_by(flag_name=setting_name).first() model.value = False db.session.commit()
29.592593
106
0.693367
102
799
5.264706
0.421569
0.122905
0.078212
0.117318
0.370577
0.370577
0.370577
0.370577
0.370577
0.370577
0
0
0.210263
799
26
107
30.730769
0.85103
0.12766
0
0.411765
0
0
0
0
0
0
0
0
0
1
0.176471
false
0
0.117647
0
0.411765
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6baec59b9ec0b23b54791668f80cdb75f7f78fe2
2,899
py
Python
borisat/models/rd/unnested.py
CircleOnCircles/borisat
4f170ee1a0b11f06b1c3e99f42823061d7e0028e
[ "MIT" ]
null
null
null
borisat/models/rd/unnested.py
CircleOnCircles/borisat
4f170ee1a0b11f06b1c3e99f42823061d7e0028e
[ "MIT" ]
1
2020-10-10T08:18:29.000Z
2020-10-10T08:18:29.000Z
borisat/models/rd/unnested.py
CircleOnCircles/borisat
4f170ee1a0b11f06b1c3e99f42823061d7e0028e
[ "MIT" ]
null
null
null
""" { 'vNID': { 'anyType': [ '0105558096348' ] }, 'vtin': None, 'vtitleName': { 'anyType': [ 'บริษัท' ] }, 'vName': { 'anyType': [ 'โฟลว์แอคเคาท์ จำกัด' ] }, 'vSurname': { 'anyType': [ '-' ] }, 'vBranchTitleName': { 'anyType': [ 'บริษัท' ] }, 'vBranchName': { 'anyType': [ 'โฟลว์แอคเคาท์ จำกัด' ] }, 'vBranchNumber': { 'anyType': [ 0 ] }, 'vBuildingName': { 'anyType': [ 'ชุดสกุลไทย สุรวงศ์ ทาวเวอร์' ] }, 'vFloorNumber': { 'anyType': [ '11' ] }, 'vVillageName': { 'anyType': [ '-' ] }, 'vRoomNumber': { 'anyType': [ '12B' ] }, 'vHouseNumber': { 'anyType': [ '141/12' ] }, 'vMooNumber': { 'anyType': [ '-' ] }, 'vSoiName': { 'anyType': [ '-' ] }, 'vStreetName': { 'anyType': [ 'สุรวงศ์' ] }, 'vThambol': { 'anyType': [ 'สุริยวงศ์' ] }, 'vAmphur': { 'anyType': [ 'บางรัก' ] }, 'vProvince': { 'anyType': [ 'กรุงเทพมหานคร' ] }, 'vPostCode': { 'anyType': [ '10500' ] }, 'vBusinessFirstDate': { 'anyType': [ '2016/04/07' ] }, 'vmsgerr': None } """ from typing import Any from typing import Dict from typing import List from typing import Optional from typing import Union import stringcase as stringcase from loguru import logger def unnest(soap_data: Dict[str, Optional[Dict[str, List[Union[str, int]]]]], nonull:bool=True): # drop none if nonull: notnull = {k: v for k, v in soap_data.items() if v} # anytype flatten flatten = {} for k, v in notnull.items(): if k.startswith('v'): k = k[1:] k = stringcase.snakecase(k) try: flatten[k] = v['anyType'][0] if len(v['anyType']) > 1: logger.info( "please let dev. know this case exists. by creating an issue on https://github.com/CircleOnCircles/borisat/issues.") except Exception as e: logger.exception("unseen format") return flatten def get_error(unnested: Dict[str, Any]) -> Optional[str]: """ get error if any""" if error_message := unnested.get('msgerr'): return error_message else: return False
19.993103
137
0.397378
255
2,899
4.592157
0.494118
0.042699
0.068318
0.018787
0.06661
0.06661
0.044406
0.044406
0.044406
0.044406
0
0.024809
0.457744
2,899
144
138
20.131944
0.704835
0.588479
0
0
0
0.035714
0.142166
0
0
0
0
0
0
1
0.071429
false
0
0.25
0
0.428571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6bb2d695f2072ac119b9190737ac6f9ea82daea3
7,969
py
Python
Kernelized-Rank-Learning-master/dataset.py
zsyasd/drug-response-test
65bff3423a0cc972dd4664b7f547767c1f10f635
[ "MIT" ]
1
2021-01-22T04:06:57.000Z
2021-01-22T04:06:57.000Z
Kernelized-Rank-Learning-master/dataset.py
zsyasd/drug-response-test
65bff3423a0cc972dd4664b7f547767c1f10f635
[ "MIT" ]
null
null
null
Kernelized-Rank-Learning-master/dataset.py
zsyasd/drug-response-test
65bff3423a0cc972dd4664b7f547767c1f10f635
[ "MIT" ]
null
null
null
#!/usr/bin/env python import os import sys import urllib2 import zipfile import numpy as np import pandas as pd from openpyxl import load_workbook from misc import intersect_index def main(): # Read URLs to GDSC datasets urls_file = sys.argv[1] urls = [] with open(urls_file) as f: for line in f: if line.startswith('http://') or line.startswith('https://'): urls.append(line[:-1]) # Create data folder directory = '%s/data' % os.getcwd() if not os.path.exists(directory): os.makedirs(directory) # Download datasets for url in urls: print 'Downloading %s' % url local_fn = os.path.join(directory, os.path.basename(url)) remote_file = urllib2.urlopen(url) with open(local_fn, 'wb') as local_file: local_file.write(remote_file.read()) remote_file.close() if local_fn.endswith('.zip'): with zipfile.ZipFile(local_fn, 'r') as zip_ref: zip_ref.extractall(directory) print 'Preprocessing the GDSC dataset...' # Read Gene expression dataset GEX_file = '%s/Cell_line_RMA_proc_basalExp.txt' % directory GEX = pd.read_csv(GEX_file, sep='\t') GEX_gene_symbols = np.array(GEX['GENE_SYMBOLS'], dtype='str') GEX = GEX.drop(['GENE_SYMBOLS', 'GENE_title'], axis=1) GEX_cell_ids = np.array(GEX.columns, dtype='str') for i, cell_id in enumerate(GEX_cell_ids): GEX_cell_ids[i] = cell_id[5:] GEX = np.array(GEX.values, dtype=np.float).T # Read Exome sequencing dataset WES_file = '%s/CellLines_CG_BEMs/PANCAN_SEQ_BEM.txt' % directory WES = pd.read_csv(WES_file, sep='\t') WES_CG = np.array(WES['CG'], dtype='str') WES = WES.drop(['CG'], axis=1) WES_cell_ids = np.array(WES.columns, dtype='str') WES = np.array(WES.values, dtype=np.int).T # Read Copy number dataset CNV_file = '%s/CellLine_CNV_BEMs/PANCAN_CNA_BEM.rdata.txt' % directory CNV = pd.read_csv(CNV_file, sep='\t') CNV_cell_ids = np.array(CNV['Unnamed: 0'], dtype='str') CNV = CNV.drop(['Unnamed: 0'], axis=1) CNV_cna = np.array(CNV.columns, dtype='str') CNV = np.array(CNV.values, dtype=int) # Read Methylation dataset MET_file = '%s/METH_CELLLINES_BEMs/PANCAN.txt' % directory MET = pd.read_csv(MET_file, sep='\t') MET_met = np.array(MET['Unnamed: 0'], dtype='str') MET = MET.drop(['Unnamed: 0'], axis=1) MET_cell_ids = np.array(MET.columns, dtype='str') MET = np.array(MET.values, dtype=int).T # Read LOG_IC50 dataset IC50_file = '%s/TableS4A.xlsx' % directory wb = load_workbook(filename=IC50_file) sheet = wb['TableS4A-IC50s'] IC50_cell_ids, IC50_cell_names = [], [] for i in range(7, 997): IC50_cell_ids.append('%s' % sheet['A%s' % i].value) IC50_cell_names.append(('%s' % sheet['B%s' % i].value).strip()) IC50_cell_ids = np.array(IC50_cell_ids, dtype='str') IC50_cell_names = np.array(IC50_cell_names, dtype='str') IC50_drug_ids, IC50_drug_names = [], [] for i, (cell_row5, cell_row6) in enumerate(zip(sheet[5], sheet[6])): if i > 1: IC50_drug_ids.append('%s' % cell_row5.value) IC50_drug_names.append(('%s' % cell_row6.value).strip()) IC50_drug_ids = np.array(IC50_drug_ids, dtype='str') IC50_drug_names = np.array(IC50_drug_names, dtype='str') IC50 = np.ones([IC50_cell_ids.shape[0], IC50_drug_ids.shape[0]]) * np.nan for i in range(7, 997): for j, cell in enumerate(sheet[i]): if j > 1: if cell.value != 'NA': IC50[i - 7, j - 2] = cell.value # Read LOG_IC50 Threshold threshold_file = '%s/TableS5C.xlsx' % directory wb = load_workbook(filename=threshold_file) sheet = wb['Table-S5C binaryIC50s'] threshold = [] for i, cell in enumerate(sheet[7]): if i > 1: threshold.append(cell.value) threshold = np.array(threshold) drug_ids_file = '%s/TableS1F.xlsx' % directory wb = load_workbook(filename=drug_ids_file) sheet = wb['TableS1F_ScreenedCompounds'] threshold_drug_ids = [] for i in range(4, 269): threshold_drug_ids.append('%s' % sheet['B%s'%i].value) threshold_drug_ids = np.array(threshold_drug_ids) # Normalize IC50 by the threshold merged = intersect_index(IC50_drug_ids, threshold_drug_ids) IC50_keep_index = np.array(merged['index1'].values, dtype=np.int) IC50_drug_ids = IC50_drug_ids[IC50_keep_index] IC50 = IC50[:, IC50_keep_index] threshold_keep_index = np.array(merged['index2'].values, dtype=np.int) threshold_drug_ids = threshold_drug_ids[threshold_keep_index] threshold = threshold[threshold_keep_index] IC50_norm = - (IC50 - threshold) IC50_norm_min = np.min(IC50_norm[~np.isnan(IC50_norm)]) IC50_norm = IC50_norm - IC50_norm_min # Save the GEX features and normalized IC50 dataset merged = intersect_index(GEX_cell_ids, IC50_cell_ids) GEX_keep_index = np.array(merged['index1'].values, dtype=np.int) IC50_keep_index = np.array(merged['index2'].values, dtype=np.int) GEX = GEX[GEX_keep_index] GEX_cell_ids = GEX_cell_ids[GEX_keep_index] GEX_cell_names = IC50_cell_names[IC50_keep_index] IC50 = IC50_norm[IC50_keep_index] np.savez('%s/GDSC_GEX.npz' % directory, X=GEX, Y=IC50, cell_ids=GEX_cell_ids, cell_names=GEX_cell_names, drug_ids=IC50_drug_ids, drug_names=IC50_drug_names, GEX_gene_symbols=GEX_gene_symbols) print 'Gene expression (GEX) dataset: {} cell lines, {} features, {} drugs'.format(GEX.shape[0], GEX.shape[1], IC50.shape[1]) # Save the WES features and normalized IC50 dataset merged = intersect_index(WES_cell_ids, IC50_cell_ids) WES_keep_index = np.array(merged['index1'].values, dtype=np.int) IC50_keep_index = np.array(merged['index2'].values, dtype=np.int) WES = WES[WES_keep_index] WES_cell_ids = WES_cell_ids[WES_keep_index] WES_cell_names = IC50_cell_names[IC50_keep_index] IC50 = IC50_norm[IC50_keep_index] np.savez('%s/GDSC_WES.npz' % directory, X=WES, Y=IC50, cell_ids=WES_cell_ids, cell_names=WES_cell_names, drug_ids=IC50_drug_ids, drug_names=IC50_drug_names, WES_CG=WES_CG) print 'Whole-exome sequencing (WES) dataset: {} cell lines, {} features, {} drugs'.format(WES.shape[0], WES.shape[1], IC50.shape[1]) # Save the CNV features and normalized IC50 dataset merged = intersect_index(CNV_cell_ids, IC50_cell_ids) CNV_keep_index = np.array(merged['index1'].values, dtype=np.int) IC50_keep_index = np.array(merged['index2'].values, dtype=np.int) CNV = CNV[CNV_keep_index] CNV_cell_ids = CNV_cell_ids[CNV_keep_index] CNV_cell_names = IC50_cell_names[IC50_keep_index] IC50 = IC50_norm[IC50_keep_index] np.savez('%s/GDSC_CNV.npz' % directory, X=CNV, Y=IC50, cell_ids=CNV_cell_ids, cell_names=CNV_cell_names, drug_ids=IC50_drug_ids, drug_names=IC50_drug_names, CNV_cna=CNV_cna) print 'Copy number variation (CNV) dataset: {} cell lines, {} features, {} drugs'.format(CNV.shape[0], CNV.shape[1], IC50.shape[1]) # Save the MET features and normalized IC50 dataset merged = intersect_index(MET_cell_ids, IC50_cell_ids) MET_keep_index = np.array(merged['index1'].values, dtype=np.int) IC50_keep_index = np.array(merged['index2'].values, dtype=np.int) MET = MET[MET_keep_index] MET_cell_ids = MET_cell_ids[MET_keep_index] MET_cell_names = IC50_cell_names[IC50_keep_index] IC50 = IC50_norm[IC50_keep_index] np.savez('%s/GDSC_MET.npz' % directory, X=MET, Y=IC50, cell_ids=MET_cell_ids, cell_names=MET_cell_names, drug_ids=IC50_drug_ids, drug_names=IC50_drug_names, MET_met=MET_met) print 'Methylation (MET) dataset: {} cell lines, {} features, {} drugs'.format(MET.shape[0], MET.shape[1], IC50.shape[1]) print 'Finished.' if __name__ == '__main__': main()
40.866667
136
0.677626
1,228
7,969
4.126222
0.145765
0.048352
0.038484
0.034735
0.420762
0.318137
0.248865
0.227353
0.186304
0.186304
0
0.038569
0.189861
7,969
194
137
41.07732
0.746283
0.059104
0
0.082759
0
0
0.119401
0.023666
0
0
0
0
0
0
null
null
0
0.055172
null
null
0.048276
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
6bb3065e877b04d04efa39f8cb2dec584e2b65df
3,696
py
Python
app/utils/logger.py
janaSunrise/ZeroCOM
7197684ce708f080fe215b0a6e57c12836e4c0ab
[ "Apache-2.0" ]
6
2021-03-27T08:58:04.000Z
2021-05-23T17:07:09.000Z
app/utils/logger.py
janaSunrise/ZeroCOM
7197684ce708f080fe215b0a6e57c12836e4c0ab
[ "Apache-2.0" ]
2
2021-05-30T08:06:53.000Z
2021-06-02T17:02:06.000Z
app/utils/logger.py
janaSunrise/ZeroCOM
7197684ce708f080fe215b0a6e57c12836e4c0ab
[ "Apache-2.0" ]
null
null
null
# -- Imports -- from datetime import datetime from colorama import Back from rich.console import Console from .colors import get_bright_color, get_color # -- Mappings -- log_color_mapping = { "error": get_bright_color("RED"), "warning": get_bright_color("YELLOW"), "message": get_color("CYAN"), "success": get_bright_color("GREEN"), "info": get_bright_color("MAGENTA"), "critical": get_bright_color("RED") + Back.YELLOW, "flash": get_bright_color("BLUE"), } log_mapping = { "error": f"[{log_color_mapping['error']}%{get_color('RESET')}]", "warning": f"[{log_color_mapping['warning']}!{get_color('RESET')}]", "message": f"[{log_color_mapping['message']}>{get_color('RESET')}]", "success": f"[{log_color_mapping['success']}+{get_color('RESET')}]", "info": f"[{log_color_mapping['info']}#{get_color('RESET')}]", "critical": f"[{log_color_mapping['critical']}X{get_color('RESET')}{Back.RESET}]", "flash": f"[{log_color_mapping['flash']}-{get_color('RESET')}]", } class Logger: def __init__(self): self._console = Console() @staticmethod def _append_date(message: str) -> str: timestamp = datetime.now() timestamp = ( f"{get_bright_color('CYAN')}" f"{timestamp.hour}:{timestamp.minute}:{timestamp.second}" f"{get_bright_color('RESET')}" ) return f"[{timestamp}]{message}" def error(self, message: str, date: bool = True) -> None: log_type = "error" message_prefix = log_mapping[log_type] message = f"{message_prefix} {log_color_mapping[log_type]}{message}" if date: message = self._append_date(message) print(message) def warning(self, message: str, date: bool = True) -> None: log_type = "warning" message_prefix = log_mapping[log_type] message = f"{message_prefix} {log_color_mapping[log_type]}{message}" if date: message = self._append_date(message) print(message) def message(self, username: str, message: str, date: bool = True, **kwargs) -> None: log_type = "message" message_prefix = log_mapping[log_type] message_pre = f"{get_bright_color('YELLOW')} {username}{get_color('RESET')} {message_prefix} " if date: message_pre = self._append_date(message_pre) print(message_pre, end="") self._console.print(message, **kwargs) def success(self, message: str, date: bool = True) -> None: log_type = "success" message_prefix = log_mapping[log_type] message = f"{message_prefix} {log_color_mapping[log_type]}{message}" if date: message = self._append_date(message) print(message) def info(self, message: str, date: bool = True) -> None: log_type = "info" message_prefix = log_mapping[log_type] message = f"{message_prefix} {log_color_mapping[log_type]}{message}" if date: message = self._append_date(message) print(message) def critical(self, message: str, date: bool = True) -> None: log_type = "critical" message_prefix = log_mapping[log_type] message = f"{message_prefix} {log_color_mapping[log_type]}{message}" if date: message = self._append_date(message) print(message) def flash(self, message: str, date: bool = True) -> None: log_type = "flash" message_prefix = log_mapping[log_type] message = f"{message_prefix} {log_color_mapping[log_type]}{message}" if date: message = self._append_date(message) print(message)
29.806452
102
0.619589
444
3,696
4.873874
0.128378
0.064695
0.097043
0.126155
0.507394
0.47597
0.47597
0.458872
0.458872
0.356285
0
0
0.232413
3,696
123
103
30.04878
0.762778
0.007576
0
0.380952
0
0
0.293042
0.216098
0
0
0
0
0
1
0.107143
false
0
0.047619
0
0.178571
0.095238
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6bb44465b2c1e375804bcf23757d647344be8b01
1,506
py
Python
modasci/data_connector.py
ArashLab/MoDaSci
3f173a9ca686edf00a2f143814b603d4e3be9de2
[ "MIT" ]
null
null
null
modasci/data_connector.py
ArashLab/MoDaSci
3f173a9ca686edf00a2f143814b603d4e3be9de2
[ "MIT" ]
null
null
null
modasci/data_connector.py
ArashLab/MoDaSci
3f173a9ca686edf00a2f143814b603d4e3be9de2
[ "MIT" ]
1
2021-09-11T13:59:13.000Z
2021-09-11T13:59:13.000Z
from munch import Munch from .utils import import_class from .serialization import YAMLMixin from .micro_task import import_class class DataConnector(YAMLMixin): """Encapsulates a data handler along with zero or more micro tasks. With the earliest access to the underlying data (through the `values` property), the class will first execute the micro tasks in the same order that they are defined in, and then stores the augmented volatile data internally. Tasks that attempt to access the data through this connector will receive the augmented data, while the original data handler remains intact. """ def __init__(self, plainDataConnector, dataHandler): self.dataHandler = dataHandler self.augmentedVolatileData = None self.microTasks = [import_class(plainMicroTask.spec)(plainMicroTask) for plainMicroTask in plainDataConnector.get('microTasks', Munch({}))] @property def values(self): if self.augmentedVolatileData is None: self.augmentedVolatileData = self.dataHandler.values # Data handler will read from the source. for microTask in self.microTasks: self.augmentedVolatileData = microTask.execute(self.augmentedVolatileData) return self.augmentedVolatileData @values.setter def values(self, newValue): self.dataHandler.values = newValue # Data handler will write back to the source. def toDict(self): return {} # ToDo
40.702703
117
0.717131
175
1,506
6.125714
0.434286
0.139925
0.031716
0
0
0
0
0
0
0
0
0
0.224436
1,506
36
118
41.833333
0.917808
0.347278
0
0
0
0
0.010482
0
0
0
0
0.027778
0
1
0.181818
false
0
0.227273
0.045455
0.545455
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
1
0
0
2
6bb46fd7a793cdd2673a191ac0ee65bd9e96f260
682
py
Python
gallery/migrations/0011_auto_20161126_1821.py
danielsinaga1/djangonewsfix
04b84240daabb840c1e715fbade8ae15b3b0f22c
[ "BSD-3-Clause" ]
19
2018-01-28T14:35:40.000Z
2020-12-04T03:04:02.000Z
gallery/migrations/0011_auto_20161126_1821.py
danielsinaga1/djangonewsfix
04b84240daabb840c1e715fbade8ae15b3b0f22c
[ "BSD-3-Clause" ]
8
2018-06-02T14:28:28.000Z
2021-08-06T10:22:37.000Z
gallery/migrations/0011_auto_20161126_1821.py
danielsinaga1/djangonewsfix
04b84240daabb840c1e715fbade8ae15b3b0f22c
[ "BSD-3-Clause" ]
21
2018-02-25T14:07:48.000Z
2020-05-28T23:10:52.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.9.6 on 2016-11-26 16:21 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('gallery', '0010_auto_20161126_1543'), ] operations = [ migrations.AlterField( model_name='photograph', name='copyright', field=models.TextField(blank=True, help_text='Leave blank for default'), ), migrations.AlterField( model_name='photograph', name='credit', field=models.TextField(blank=True, help_text='Leave blank for default'), ), ]
26.230769
84
0.612903
73
682
5.561644
0.643836
0.098522
0.123153
0.142857
0.492611
0.492611
0.280788
0.280788
0.280788
0.280788
0
0.064386
0.271261
682
25
85
27.28
0.752515
0.09824
0
0.444444
1
0
0.181373
0.037582
0
0
0
0
0
1
0
false
0
0.111111
0
0.277778
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
6bb5785201bd7a9127918db4d2b10a17a6eb105d
1,213
py
Python
mechanic/drive_navigate_boost/drive_navigate_boost.py
oxrock/DisasterBot
36260e9ef8730edbae018ba87aa19aaad72c8814
[ "MIT" ]
11
2018-11-18T09:30:39.000Z
2021-08-24T18:47:48.000Z
mechanic/drive_navigate_boost/drive_navigate_boost.py
oxrock/DisasterBot
36260e9ef8730edbae018ba87aa19aaad72c8814
[ "MIT" ]
6
2020-01-31T11:37:56.000Z
2020-05-01T19:13:54.000Z
mechanic/drive_navigate_boost/drive_navigate_boost.py
oxrock/DisasterBot
36260e9ef8730edbae018ba87aa19aaad72c8814
[ "MIT" ]
7
2018-11-17T20:02:19.000Z
2020-05-01T15:07:13.000Z
from rlbot.agents.base_agent import SimpleControllerState from mechanic.base_mechanic import BaseMechanic from mechanic.drive_arrive_in_time import DriveArriveInTime from skeleton.util.structure import Player from util.linear_algebra import norm from util.path_finder import find_fastest_path, first_target, optional_boost_target class DriveNavigateBoost(BaseMechanic): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.mechanic = DriveArriveInTime(self.agent, rendering_enabled=self.rendering_enabled) def step(self, car: Player, boost_pads, target_loc, target_dt=0) -> SimpleControllerState: # path = find_fastest_path(boost_pads, car.location, target_loc, car.velocity, car.boost) # target = first_target(boost_pads, target_loc, path) target = optional_boost_target(boost_pads, car.location, target_loc, car.velocity, car.boost) time = target_dt if (target == target_loc).all() else 0 # updating status if norm(car.location - target_loc) < 25 and abs(target_dt) < 0.05: self.finished = True else: self.finished = False return self.mechanic.step(car, target, time)
40.433333
101
0.73042
156
1,213
5.423077
0.384615
0.06383
0.060284
0.070922
0.113475
0.113475
0.113475
0.113475
0.113475
0.113475
0
0.007056
0.182193
1,213
29
102
41.827586
0.845766
0.127782
0
0
0
0
0
0
0
0
0
0
0
1
0.111111
false
0
0.333333
0
0.555556
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
6bb5a3ae1541a6323233d61a349487d06b8c5714
1,907
py
Python
Simplex_Files/Dual_Problem.py
c-randall/Primal-Simplex-Method
620d7598691ed9717d2d18706c44e462f75e85c5
[ "BSD-3-Clause" ]
1
2021-12-04T12:18:17.000Z
2021-12-04T12:18:17.000Z
Simplex_Files/Dual_Problem.py
c-randall/Primal-Simplex-Method
620d7598691ed9717d2d18706c44e462f75e85c5
[ "BSD-3-Clause" ]
null
null
null
Simplex_Files/Dual_Problem.py
c-randall/Primal-Simplex-Method
620d7598691ed9717d2d18706c44e462f75e85c5
[ "BSD-3-Clause" ]
2
2020-05-30T16:38:37.000Z
2022-01-22T19:50:42.000Z
""" Created on Wed Apr 3 13:07:18 2019 Author: Corey R. Randall Summary: If the user wishes to solve the Dual Problem over the Primal one, this function provides support to appropriately convert the problem into its alternate form. """ """ Import needed modules """ "-----------------------------------------------------------------------------" import numpy as np """ Function definition """ "-----------------------------------------------------------------------------" def dual_problem(user_inputs, conversion): # Extract dictionary for readibility: A = conversion['A'] b = conversion['b'] c_coeff = conversion['c_coeff'] n = conversion['n'] m = conversion['m'] n_slack = conversion['n_slack'] # Convert A, c_coeff to allow for unrestricted y (i.e. y = y' - y''): A_temp = np.repeat(A.T, 2) A_temp[1::2] = -A_temp[1::2] A = np.reshape(A_temp, [A.shape[1], 2*A.shape[0]]) A = np.hstack([A, np.identity(A.shape[0])]) b_temp = np.repeat(b.T, 2) # the obj. coeff. in (D) are b from (P) b_temp[1::2] = -b_temp[1::2] b = np.reshape(b_temp, [b.shape[1], 2*b.shape[0]]) b = np.hstack([b, np.zeros([1, A.shape[0]])]) # Ensure no negative values on RHS: for i in range(c_coeff.shape[1]): if c_coeff[0,i] < 0: # the RHS, b values, in (D) are c from (P) A[i,:] = -A[i,:] c_coeff[0,i] = -c_coeff[0,i] # Generate dictionary for outputs: dual_conversion = {} dual_conversion['A'] = A dual_conversion['b'] = c_coeff.T dual_conversion['c_coeff'] = -b dual_conversion['n'] = 2*m dual_conversion['m'] = n +n_slack dual_conversion['n_slack'] = n +n_slack dual_conversion['n_prim'] = n dual_conversion['n_slack_prim'] = n_slack return dual_conversion
31.262295
80
0.529628
273
1,907
3.56044
0.322344
0.144033
0.024691
0.024691
0.09465
0.061728
0
0
0
0
0
0.025228
0.251704
1,907
60
81
31.783333
0.655922
0.262192
0
0.0625
0
0
0.162882
0.120595
0
0
0
0
0
1
0.03125
false
0
0.03125
0
0.09375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6bb5e552c9b4052b2b3d89711be844490a830473
1,764
py
Python
sgcc-client/api/my_api.py
zhanwei33/baai-federated-learning
838507f2344139e66385e1ef475a148fdeaf5c62
[ "Apache-2.0" ]
26
2020-11-30T09:42:17.000Z
2022-03-05T02:20:45.000Z
sgcc-client/api/my_api.py
AIOpenData/baai-federated-learning-helmet-baseline
8d9a1eded43a945f790d415f3ee37cda78b73c52
[ "Apache-2.0" ]
null
null
null
sgcc-client/api/my_api.py
AIOpenData/baai-federated-learning-helmet-baseline
8d9a1eded43a945f790d415f3ee37cda78b73c52
[ "Apache-2.0" ]
6
2020-12-01T13:01:15.000Z
2021-11-16T10:51:39.000Z
from api import my_api from service.federated.client import Client from utils.common_utils import Common from flask import request @my_api.route('/') def index(): return '<h1>Hello, this is client!</h1>' @my_api.route("/federated_train_size", methods=["GET", "POST"]) def federated_train_size(): return Client.get_federated_train_size() @my_api.route("/federated_train", methods=["GET", "POST"]) def federated_train(): # receive the server training epoch and initial or federated averaging model pickled_server_epoch = request.files["server_epoch"].read() pickled_server_model_params = request.files["server_model_params"].read() server_epoch = Common.get_object_by_pickle_bytes_func(pickled_server_epoch) server_model_params = Common.get_object_by_pickle_bytes_func(pickled_server_model_params) # return the local model after training of current client to server return Client.train(server_model_params=server_model_params, epoch=server_epoch) @my_api.route("/federated_test", methods=["GET", "POST"]) def federated_test(): # receive the final best model from server and do the evaluating pickled_best_model_params = request.files["best_model_params"].read() best_model_params = Common.get_object_by_pickle_bytes_func(pickled_best_model_params) return Client.test(test_model_params=best_model_params, mode="test") @my_api.route("/federated_detect", methods=["GET", "POST"]) def federated_detect(): # receive the final best model from server and do the evaluating pickled_best_model_params = request.files["best_model_params"].read() best_model_params = Common.get_object_by_pickle_bytes_func(pickled_best_model_params) return Client.detect(detect_model_params=best_model_params)
36.75
93
0.781746
253
1,764
5.098814
0.217391
0.153488
0.116279
0.058915
0.532558
0.414729
0.366667
0.366667
0.366667
0.327132
0
0.001287
0.119048
1,764
47
94
37.531915
0.828829
0.150794
0
0.148148
0
0
0.132708
0.014075
0
0
0
0
0
1
0.185185
false
0
0.148148
0.074074
0.518519
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
6bba5f186b30e976cabf28a647d3b87bff42e9f0
5,954
py
Python
backend/tracim_backend/tests/library/test_application_api.py
lezardrouge/tracim
713ff6066767554333e7e0b1de608ec1a7e4229c
[ "MIT" ]
null
null
null
backend/tracim_backend/tests/library/test_application_api.py
lezardrouge/tracim
713ff6066767554333e7e0b1de608ec1a7e4229c
[ "MIT" ]
null
null
null
backend/tracim_backend/tests/library/test_application_api.py
lezardrouge/tracim
713ff6066767554333e7e0b1de608ec1a7e4229c
[ "MIT" ]
null
null
null
from mock import Mock from tracim_backend.app_models.applications import Application from tracim_backend.app_models.contents import content_status_list from tracim_backend.app_models.workspace_menu_entries import all_content_menu_entry from tracim_backend.app_models.workspace_menu_entries import dashboard_menu_entry from tracim_backend.lib.core.application import ApplicationApi from tracim_backend.models.roles import WorkspaceRoles from tracim_backend.tests import DefaultTest class TestApplicationApi(DefaultTest): def test_get_default_workspace_menu_entry__ok__nominal_case(self): """ Show only enabled app """ app_config = Mock() app_config.APPS_COLORS = {} app_config.APPS_COLORS["primary"] = "#fff" thread = Application( label="Threads", slug="contents/thread", fa_icon="comments-o", is_active=True, config={}, main_route="/ui/workspaces/{workspace_id}/contents?type=thread", app_config=app_config, ) thread.add_content_type( slug="thread", label="Thread", creation_label="Start a topic", available_statuses=content_status_list.get_all(), file_extension=".thread.html", ) markdownpluspage = Application( label="Markdown Plus Documents", # TODO - G.M - 24-05-2018 - Check label slug="contents/markdownpluspage", fa_icon="file-code-o", is_active=False, config={}, main_route="/ui/workspaces/{workspace_id}/contents?type=markdownpluspage", app_config=app_config, ) markdownpluspage.add_content_type( slug="markdownpage", label="Rich Markdown File", creation_label="Create a Markdown document", available_statuses=content_status_list.get_all(), ) app_api = ApplicationApi(app_list=[thread, markdownpluspage], show_all=False) workspace = Mock() workspace.workspace_id = 12 workspace.agenda_enabled = True default_workspace_menu_entry = app_api.get_default_workspace_menu_entry(workspace=workspace) assert len(default_workspace_menu_entry) == 3 assert default_workspace_menu_entry[0].label == dashboard_menu_entry.label assert default_workspace_menu_entry[1].label == all_content_menu_entry.label assert default_workspace_menu_entry[2].label == thread.label def test_get_default_workspace_menu_entry__ok__folder_case(self): """ main route for folder is empty, that why it should not be included in default_menu entry :return: """ app_config = Mock() app_config.APPS_COLORS = {} app_config.APPS_COLORS["primary"] = "#fff" folder = Application( label="Folder", slug="contents/folder", fa_icon="folder-o", is_active=True, config={}, main_route="", app_config=app_config, ) folder.add_content_type( slug="folder", label="Folder", creation_label="Create a folder", available_statuses=content_status_list.get_all(), allow_sub_content=True, minimal_role_content_creation=WorkspaceRoles.CONTENT_MANAGER, ) app_api = ApplicationApi(app_list=[folder], show_all=False) workspace = Mock() workspace.workspace_id = 12 workspace.agenda_enabled = True default_workspace_menu_entry = app_api.get_default_workspace_menu_entry(workspace=workspace) assert len(default_workspace_menu_entry) == 2 assert default_workspace_menu_entry[0].label == dashboard_menu_entry.label assert default_workspace_menu_entry[1].label == all_content_menu_entry.label def test_get_default_workspace_menu_entry__ok__agenda_enabled_workspace_case(self): app_config = Mock() app_config.APPS_COLORS = {} app_config.APPS_COLORS["primary"] = "#fff" agenda = Application( label="Agenda", slug="agenda", fa_icon="calendar", is_active=True, config={}, main_route="/ui/workspaces/{workspace_id}/agenda", app_config=app_config, ) app_api = ApplicationApi(app_list=[agenda], show_all=False) workspace = Mock() workspace.workspace_id = 12 workspace.agenda_enabled = True default_workspace_menu_entry = app_api.get_default_workspace_menu_entry(workspace=workspace) assert len(default_workspace_menu_entry) == 3 assert default_workspace_menu_entry[0].label == dashboard_menu_entry.label assert default_workspace_menu_entry[1].label == all_content_menu_entry.label assert default_workspace_menu_entry[2].label == agenda.label def test_get_default_workspace_menu_entry__ok__agenda_disabled_workspace_case(self): app_config = Mock() app_config.APPS_COLORS = {} app_config.APPS_COLORS["primary"] = "#fff" agenda = Application( label="Agenda", slug="agenda", fa_icon="calendar", is_active=True, config={}, main_route="/ui/workspaces/{workspace_id}/agenda", app_config=app_config, ) app_api = ApplicationApi(app_list=[agenda], show_all=False) workspace = Mock() workspace.workspace_id = 12 workspace.agenda_enabled = False default_workspace_menu_entry = app_api.get_default_workspace_menu_entry(workspace=workspace) assert len(default_workspace_menu_entry) == 2 assert default_workspace_menu_entry[0].label == dashboard_menu_entry.label assert default_workspace_menu_entry[1].label == all_content_menu_entry.label
41.062069
100
0.658885
674
5,954
5.437685
0.172107
0.090859
0.141883
0.177353
0.700136
0.661664
0.661664
0.62101
0.610914
0.554161
0
0.006763
0.254955
5,954
144
101
41.347222
0.819432
0.026537
0
0.56
0
0
0.088261
0.036107
0
0
0
0.006944
0.112
1
0.032
false
0
0.064
0
0.104
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
6bbbb5f359c8e40427bcd1ee484f14b132a99a62
2,593
py
Python
gallery/m_cardloader.py
kengoon/KvGallery
4d946fa06479636411e027bfdebbb15c58c176cf
[ "MIT" ]
2
2021-05-28T13:37:07.000Z
2021-06-20T06:47:20.000Z
gallery/m_cardloader.py
kengoon/KvGallery
4d946fa06479636411e027bfdebbb15c58c176cf
[ "MIT" ]
null
null
null
gallery/m_cardloader.py
kengoon/KvGallery
4d946fa06479636411e027bfdebbb15c58c176cf
[ "MIT" ]
null
null
null
from kivy.event import EventDispatcher from kivy.metrics import dp from kivy.properties import ListProperty, StringProperty from kivymd.uix.card import MDCard from kivy.lang import Builder __all__ = "M_CardLoader" Builder.load_string( """ # kv_start <M_CardLoader>: md_bg_color: 0, 0, 0, 0 radius: [dp(10), ] ripple_behavior: True RelativeLayout: AsyncImage: id: image color: 0,0,0,0 source: root.source anim_delay: .1 allow_stretch: True keep_ratio: False nocache: True on_load: root.dispatch("on_load") canvas.before: StencilPush RoundedRectangle: pos: self.pos size: self.size radius: root.radius StencilUse canvas.after: StencilUnUse RoundedRectangle: size: self.size pos: self.pos radius: root.radius StencilPop M_AKImageLoader: id: loader radius: root.radius circle: False MDBoxLayout: id:box opacity: 0 padding: dp(10) adaptive_height: True md_bg_color: 0, 0, 0, .6 radius: [0, 0, root.radius[0], root.radius[0]] M_AKLabelLoader: text: root.text radius: root.text_radius size_hint_y: None theme_text_color: "Custom" text_color: root.text_color height: dp(20) if not self.text else self.texture_size[1] font_style: "Money" font_size: dp(16) halign:"center" # kv_end """ ) class M_CardLoader(MDCard): text = StringProperty("") text_radius = ListProperty([dp(5), ]) text_color = ListProperty([1, 1, 1, 1]) source = StringProperty("") def __init__(self, **kwargs): super().__init__(**kwargs) self.register_event_type("on_load") def on_load(self): self.ids.loader.opacity = 0 self.ids.image.color = [1, 1, 1, 1] def on_touch_down(self, touch): self.root.pause_clock() def on_touch_up(self, touch): timer = touch.time_end - touch.time_start if timer < 0.2: self.root.ids.raw.switch_tab("feeds") self.root.resume_clock() def on_release(self): self.root.ids.feeds.dispatch("on_tab_release")
27.585106
73
0.529117
288
2,593
4.559028
0.388889
0.013709
0.011424
0.018279
0.025895
0.018279
0
0
0
0
0
0.024284
0.38064
2,593
93
74
27.88172
0.793275
0
0
0
0
0
0.038657
0
0
0
0
0
0
1
0.178571
false
0
0.178571
0
0.535714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6bbc545ddb8b337163afbee7b7359f2bf1545ca8
763
py
Python
setup.py
OpenTAI/pre-commit-hooks
e123691fa26ff26d1a5f3513ee419bec6eef02ab
[ "MIT" ]
null
null
null
setup.py
OpenTAI/pre-commit-hooks
e123691fa26ff26d1a5f3513ee419bec6eef02ab
[ "MIT" ]
1
2022-02-16T10:19:25.000Z
2022-02-16T10:19:26.000Z
setup.py
OpenTAI/pre-commit-hooks
e123691fa26ff26d1a5f3513ee419bec6eef02ab
[ "MIT" ]
null
null
null
from setuptools import find_packages, setup # type: ignore def readme(): with open('./README.md', encoding='utf-8') as f: content = f.read() return content setup( name='pre_commit_hooks', version='0.1.0', description='A pre-commit hook for OpenTAI projects', long_description=readme(), long_description_content_type='text/markdown', url='https://github.com/OpenTAI/pre-commit-hooks', author='OpenTAI Team', author_email='', packages=find_packages(), python_requires='>=3.6', install_requires=['PyYAML'], entry_points={ 'console_scripts': [ 'say-hello=pre_commit_hooks.say_hello:main', 'check-copyright=pre_commit_hooks.check_copyright:main', ], }, )
26.310345
68
0.647444
93
763
5.107527
0.634409
0.094737
0.117895
0
0
0
0
0
0
0
0
0.009934
0.208388
763
28
69
27.25
0.77649
0.015727
0
0
0
0
0.351135
0.125501
0
0
0
0
0
1
0.041667
false
0
0.041667
0
0.125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6bbdd01acf71fc3e4336063b1dbd03093b010571
6,037
py
Python
baselines/ppo2/defaults.py
jinala/RLbaselines
3594c1edae49e1bb997057912cfb9b07531d41f4
[ "MIT" ]
null
null
null
baselines/ppo2/defaults.py
jinala/RLbaselines
3594c1edae49e1bb997057912cfb9b07531d41f4
[ "MIT" ]
null
null
null
baselines/ppo2/defaults.py
jinala/RLbaselines
3594c1edae49e1bb997057912cfb9b07531d41f4
[ "MIT" ]
1
2021-04-27T17:21:28.000Z
2021-04-27T17:21:28.000Z
import random import numpy as np def unif_range(a, b): return random.random() * (b - a) + a def rand_elem(xs): return xs[random.randrange(len(xs))] def rand_int_linspace(start, stop, num = 50): return rand_elem([int(x) for x in np.linspace(start, stop, num)]) def mujoco(): return dict( nsteps=2048, nminibatches=32, lam=0.95, gamma=0.99, noptepochs=10, log_interval=1, ent_coef=0.0, lr=lambda f: 3e-4 * f, cliprange=0.2, value_network='copy' ) def atari(): return dict( nsteps=128, nminibatches=4, lam=0.95, gamma=0.99, noptepochs=4, log_interval=1, ent_coef=.01, lr=lambda f : f * 2.5e-4, cliprange=0.1, ) def retro(): return atari() def car_retrieval_train(): lr = unif_range(0.003, 5e-6) print("lr: ", lr) return dict( # horizon = rand_int_linspace(32, 500), #nminibatches = rand_elem([1, 2, 4, 8, 16, 32, 64, 128, 256]), nminibatches = 1, # for lstm ent_coef = rand_elem([0.0, 0.01, 0.05, 0.1]), noptepochs = rand_int_linspace(3, 36), cliprange = rand_elem([0.1, 0.2, 0.3]), gamma = 0.99, lr = lambda f : f * lr ) ''' # best params for car retrieval bench def car_retrieval_train(): lr = 0.002 print("lr: ", lr) return dict( # horizon = rand_int_linspace(32, 500), nminibatches = 128, ent_coef = 0.01, noptepochs = 33, cliprange = 0.1, gamma = 0.99, lr = lambda f : f * lr ) def car_retrieval_train1(): lr = 0.002 print("lr: ", lr) return dict( # horizon = rand_int_linspace(32, 500), nminibatches = 128, ent_coef = 0.01, noptepochs = 33, cliprange = 0.1, gamma = 0.99, lr = lambda f : f * lr ) def car_retrieval_train2(): lr = 0.002 print("lr: ", lr) return dict( # horizon = rand_int_linspace(32, 500), nminibatches = 128, ent_coef = 0.01, noptepochs = 33, cliprange = 0.1, gamma = 0.99, lr = lambda f : f * lr ) def car_retrieval_train3(): lr = 0.002 print("lr: ", lr) return dict( # horizon = rand_int_linspace(32, 500), nminibatches = 128, ent_coef = 0.01, noptepochs = 33, cliprange = 0.1, gamma = 0.99, lr = lambda f : f * lr ) def car_retrieval_train4(): lr = 0.002 print("lr: ", lr) return dict( # horizon = rand_int_linspace(32, 500), nminibatches = 128, ent_coef = 0.01, noptepochs = 33, cliprange = 0.1, gamma = 0.99, lr = lambda f : f * lr ) def car_retrieval_train5(): lr = 0.002 print("lr: ", lr) return dict( # horizon = rand_int_linspace(32, 500), nminibatches = 128, ent_coef = 0.01, noptepochs = 33, cliprange = 0.1, gamma = 0.99, lr = lambda f : f * lr )''' def pendulum_train(): lr = unif_range(0.003, 5e-6) print("lr: ", lr) return dict( # horizon = rand_int_linspace(32, 500), #nminibatches = rand_elem([1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048]), nminibatches = 1, #for lstm ent_coef = rand_elem([0.0, 0.01, 0.05, 0.1]), noptepochs = rand_int_linspace(3, 36), cliprange = rand_elem([0.1, 0.2, 0.3]), gamma = 0.99, lr = lambda f : f * lr ) ''' # best version for pendulum def pendulum_train(): lr = 0.0003 return dict( # horizon = rand_int_linspace(32, 500), nminibatches = 1, ent_coef = 0.01, noptepochs = 28, cliprange = 0.1, gamma = 0.99, lr = lambda f : f * lr )''' def mountain_car_train(): lr = unif_range(0.003, 5e-6) print("lr: ", lr) return dict( # horizon = rand_int_linspace(32, 500), nminibatches = 1, #rand_elem([1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048]), ent_coef = rand_elem([0.0, 0.01, 0.05, 0.1]), noptepochs = rand_int_linspace(3, 36), cliprange = rand_elem([0.1, 0.2, 0.3]), gamma = 0.99, lr = lambda f : f * lr ) def quad_train(): lr = unif_range(0.003, 5e-6) print("lr: ", lr) return dict( # horizon = rand_int_linspace(32, 500), #nminibatches = rand_elem([1, 2, 4, 8, 16, 32, 64, 128, 256]), nminibatches=1, # for lstm ent_coef = rand_elem([0.0, 0.01, 0.05, 0.1]), noptepochs = rand_int_linspace(3, 36), cliprange = rand_elem([0.1, 0.2, 0.3]), gamma = 0.99, lr = lambda f : f * lr ) def quad_r_train(): lr = unif_range(0.003, 5e-6) print("lr: ", lr) return dict( # horizon = rand_int_linspace(32, 500), nminibatches = 1, #rand_elem([1, 2, 4, 8, 16, 32, 64, 128, 256]), ent_coef = rand_elem([0.0, 0.01, 0.05, 0.1]), noptepochs = rand_int_linspace(3, 36), cliprange = rand_elem([0.1, 0.2, 0.3]), gamma = 0.99, lr = lambda f : f * lr ) def acrobot_train(): lr = unif_range(0.003, 5e-6) print("lr: ", lr) return dict( # horizon = rand_int_linspace(32, 500), nminibatches = 1, #rand_elem([1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048]), ent_coef = rand_elem([0.0, 0.01, 0.05, 0.1]), noptepochs = rand_int_linspace(3, 36), cliprange = rand_elem([0.1, 0.2, 0.3]), gamma = 0.99, lr = lambda f : f * lr ) def cartpole_train(): lr = unif_range(0.003, 5e-6) print("lr: ", lr) return dict( # horizon = rand_int_linspace(32, 500), nminibatches = 1, #rand_elem([1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048]), ent_coef = rand_elem([0.0, 0.01, 0.05, 0.1]), noptepochs = rand_int_linspace(3, 36), cliprange = rand_elem([0.1, 0.2, 0.3]), gamma = 0.99, lr = lambda f : f * lr )
27.193694
90
0.525427
891
6,037
3.428732
0.106622
0.060229
0.10802
0.0491
0.839935
0.809493
0.809493
0.793781
0.793781
0.793781
0
0.145982
0.32599
6,037
222
91
27.193694
0.604817
0.120258
0
0.611111
0
0
0.009997
0
0
0
0
0
0
1
0.12037
false
0
0.018519
0.055556
0.259259
0.064815
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
6bc269d33439bd018df37c790bc02091c42db872
3,339
py
Python
pysnmp-with-texts/DLINK-3100-JUMBOFRAMES-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
8
2019-05-09T17:04:00.000Z
2021-06-09T06:50:51.000Z
pysnmp-with-texts/DLINK-3100-JUMBOFRAMES-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
4
2019-05-31T16:42:59.000Z
2020-01-31T21:57:17.000Z
pysnmp-with-texts/DLINK-3100-JUMBOFRAMES-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
10
2019-04-30T05:51:36.000Z
2022-02-16T03:33:41.000Z
# # PySNMP MIB module DLINK-3100-JUMBOFRAMES-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DLINK-3100-JUMBOFRAMES-MIB # Produced by pysmi-0.3.4 at Wed May 1 12:48:33 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint") rnd, = mibBuilder.importSymbols("DLINK-3100-MIB", "rnd") NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance") IpAddress, Bits, ModuleIdentity, iso, NotificationType, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, TimeTicks, ObjectIdentity, Counter64, Counter32, Gauge32, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "Bits", "ModuleIdentity", "iso", "NotificationType", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "TimeTicks", "ObjectIdentity", "Counter64", "Counter32", "Gauge32", "MibIdentifier") DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention") rlJumboFrames = ModuleIdentity((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 91)) rlJumboFrames.setRevisions(('2007-01-02 00:00',)) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): if mibBuilder.loadTexts: rlJumboFrames.setRevisionsDescriptions(('Initial revision.',)) if mibBuilder.loadTexts: rlJumboFrames.setLastUpdated('200701020000Z') if mibBuilder.loadTexts: rlJumboFrames.setOrganization('Dlink, Inc. Dlink Semiconductor, Inc.') if mibBuilder.loadTexts: rlJumboFrames.setContactInfo('www.dlink.com') if mibBuilder.loadTexts: rlJumboFrames.setDescription('This private MIB module defines Jumbo Frames private MIBs.') rlJumboFramesCurrentStatus = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 91, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readonly") if mibBuilder.loadTexts: rlJumboFramesCurrentStatus.setStatus('current') if mibBuilder.loadTexts: rlJumboFramesCurrentStatus.setDescription('Show the current Jumbo Frames status') rlJumboFramesStatusAfterReset = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 91, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: rlJumboFramesStatusAfterReset.setStatus('current') if mibBuilder.loadTexts: rlJumboFramesStatusAfterReset.setDescription('Set the Jumbo Frames status after reset') mibBuilder.exportSymbols("DLINK-3100-JUMBOFRAMES-MIB", rlJumboFramesCurrentStatus=rlJumboFramesCurrentStatus, rlJumboFramesStatusAfterReset=rlJumboFramesStatusAfterReset, PYSNMP_MODULE_ID=rlJumboFrames, rlJumboFrames=rlJumboFrames)
107.709677
477
0.790356
346
3,339
7.621387
0.407514
0.040956
0.071672
0.064467
0.350019
0.246871
0.246871
0.246871
0.246871
0.246871
0
0.059851
0.074274
3,339
30
478
111.3
0.793271
0.103624
0
0
0
0
0.260054
0.023458
0
0
0
0
0
1
0
false
0
0.318182
0
0.318182
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
6bc31e0e8c79fc64c7885e7fdab75aafe565a7f9
204
py
Python
WebFrameDocs/src/demo/flask-demo/apps/api/common/response.py
Bean-jun/LearnGuide
30a8567b222d18b15d3e9027a435b5bfe640a046
[ "MIT" ]
1
2022-02-23T13:42:01.000Z
2022-02-23T13:42:01.000Z
WebFrameDocs/src/demo/flask-demo/apps/api/common/response.py
Bean-jun/LearnGuide
30a8567b222d18b15d3e9027a435b5bfe640a046
[ "MIT" ]
null
null
null
WebFrameDocs/src/demo/flask-demo/apps/api/common/response.py
Bean-jun/LearnGuide
30a8567b222d18b15d3e9027a435b5bfe640a046
[ "MIT" ]
null
null
null
def response(code=200, msg=None, data=None): # 请求响应 msg = msg if msg else "" data = data if data else "" return { "code": code, "message": msg, "data": data }
18.545455
44
0.5
26
204
3.923077
0.461538
0.156863
0
0
0
0
0
0
0
0
0
0.023077
0.362745
204
10
45
20.4
0.761538
0.019608
0
0
0
0
0.075758
0
0
0
0
0
0
1
0.125
false
0
0
0
0.25
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
6bc370acc905253ff8d26ccf15fd3f3f085e6141
110
py
Python
contrib/pyln-spec/bolt7/pyln/spec/bolt7/gen_version.py
rsouvik/bitblue
a205fb77695a26512c7eb9ba33a7379f246ec357
[ "MIT" ]
4
2019-12-08T21:02:32.000Z
2020-08-06T12:14:37.000Z
contrib/pyln-spec/bolt7/pyln/spec/bolt7/gen_version.py
rsouvik/bitblue
a205fb77695a26512c7eb9ba33a7379f246ec357
[ "MIT" ]
5
2021-02-20T02:41:55.000Z
2021-06-01T20:04:08.000Z
contrib/pyln-spec/bolt7/pyln/spec/bolt7/gen_version.py
rsouvik/bitblue
a205fb77695a26512c7eb9ba33a7379f246ec357
[ "MIT" ]
7
2019-10-07T23:53:49.000Z
2021-11-23T18:26:30.000Z
__base_version__ = "1.0" __post_version__ = "137" __gitversion__ = "9e8e29af9b9a922eb114b2c716205d0772946e56"
27.5
59
0.827273
9
110
8.555556
0.888889
0
0
0
0
0
0
0
0
0
0
0.326733
0.081818
110
3
60
36.666667
0.435644
0
0
0
0
0
0.418182
0.363636
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
6bc548fb933264d47e42737add228df3e1a66805
3,509
py
Python
main.py
lhs9842/KNUTNoticeBot
cfc83f2abc079a660177d00da1eab288cad021b4
[ "MIT" ]
1
2022-02-23T01:54:07.000Z
2022-02-23T01:54:07.000Z
main.py
lhs9842/KNUTNoticeBot
cfc83f2abc079a660177d00da1eab288cad021b4
[ "MIT" ]
null
null
null
main.py
lhs9842/KNUTNoticeBot
cfc83f2abc079a660177d00da1eab288cad021b4
[ "MIT" ]
1
2022-02-23T07:17:31.000Z
2022-02-23T07:17:31.000Z
import setting import requests import threading import time import sqlite3 from bs4 import BeautifulSoup from urllib import parse public_board = [["BBSMSTR_000000000059", "일반소식"], ["BBSMSTR_000000000060", "장학안내"], ["BBSMSTR_000000000055", "학사공지사항"]] # [boardId, 게시판 명칭] db_conn = sqlite3.connect("NoticeBot.db", check_same_thread=False) db_cur = db_conn.cursor() db_cur.execute('SELECT * FROM sqlite_master WHERE type="table" AND name="final_ntt"') # 테이블 존재 여부 확인 r = db_cur.fetchall() if r: print("기존 데이터를 불러옵니다.") else: print("새로 데이터베이스를 구축합니다.") db_conn.execute('CREATE TABLE final_ntt(boardId TEXT, final_nttId TEXT)') for n in public_board: db_conn.execute('INSERT INTO final_ntt VALUES ("' + n[0] + '", "1049241")') # 초기값 부여 시 검색 대상 게시판 중 하나의 게시글 하나를 적당히 선택하여 그 게시글의 nttId로 지정할 것. 제대로 지정하지 않으면 최초 구동 시 Many Request로 텔레그램 API 서버가 오류 발생시킴. db_conn.commit() def send_message(channel, message): encode_message = parse.quote(message) url = 'https://api.telegram.org/bot' + setting.bot_token + '/sendmessage?chat_id=' + channel + '&text=' + encode_message response = requests.get(url) if response.status_code != 200: print("ERROR!!" + str(response.status_code)) def find_new_ntt(board_info): try: url = 'https://www.ut.ac.kr/cop/bbs/' + board_info[0] + '/selectBoardList.do' response = requests.get(url) if response.status_code == 200: db_cur.execute("SELECT final_nttId FROM final_ntt WHERE boardId='" + board_info[0] + "'") rows = db_cur.fetchall() final = int(rows[0][0]) html = response.text soup = BeautifulSoup(html, 'html.parser') result_id = soup.findAll('input', {'name':'nttId', 'type':'hidden'}) r_n = soup.findAll('input', {'type':'submit'}) result_name = [] for n in r_n: na = n.get('value') if (na != "검색") & (na != "등록하기"): # 최상부 검색 버튼 및 최하부 페이지 만족도 조사 부분의 submit 버튼 예외 처리 result_name.append(na) count = 0 result_name.reverse() result_id.reverse() for n in result_id: i = int(n.get('value')) if i == 0: # 최상부 검색 버튼 부분에 지정된 nttId 값 0에 대한 예외처리 break if i <= final: count += 1 continue send_message(setting.all_notice_channel, "[" + board_info[1] + "] " + result_name[count] + " : http://www.ut.ac.kr/cop/bbs/" + board_info[0] + "/selectBoardArticle.do?nttId=" + str(i)) db_conn.execute("UPDATE final_ntt SET final_nttId='" + str(i) + "' WHERE boardId='" + board_info[0] + "'") count += 1 db_conn.commit() except: now = time.localtime() message = "EXCEPT!! " + board_info[1] message += "%04d/%02d/%02d %02d:%02d:%02d" % (now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec) encode_message = parse.quote(message) url = 'https://api.telegram.org/bot' + setting.bot_token + '/sendmessage?chat_id=' + setting.admin_channel + '&text=' + encode_message response = requests.get(url) if response.status_code != 200: print("NETWORK ERROR!!" + str(response.status_code) + "\n" + message) find_new_ntt(board_info) def Bot_Start(): for c in public_board: find_new_ntt(c) threading.Timer(30, Bot_Start).start() Bot_Start()
44.987179
205
0.596751
477
3,509
4.224319
0.404612
0.035732
0.044665
0.032754
0.273449
0.206948
0.206948
0.206948
0.206948
0.159801
0
0.032133
0.263893
3,509
78
206
44.987179
0.747967
0.066686
0
0.152778
0
0
0.224159
0.021713
0
0
0
0
0
1
0.041667
false
0
0.097222
0
0.138889
0.055556
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6bc75bfe6f3b314f3347fa4156e55b1ddf54d5f2
785
py
Python
migrate_dns/destructo.py
jlin/inventory
c098c98e570c3bf9fadfd811eb75e1213f6ea428
[ "BSD-3-Clause" ]
22
2015-01-16T01:36:32.000Z
2020-06-08T00:46:18.000Z
migrate_dns/destructo.py
jlin/inventory
c098c98e570c3bf9fadfd811eb75e1213f6ea428
[ "BSD-3-Clause" ]
9
2019-03-15T11:39:32.000Z
2019-04-30T00:59:50.000Z
migrate_dns/destructo.py
jlin/inventory
c098c98e570c3bf9fadfd811eb75e1213f6ea428
[ "BSD-3-Clause" ]
13
2015-01-13T20:56:22.000Z
2022-02-23T06:01:17.000Z
from mozdns.address_record.models import AddressRecord from mozdns.cname.models import CNAME from mozdns.domain.models import Domain from mozdns.mx.models import MX from mozdns.nameserver.models import Nameserver from mozdns.ptr.models import PTR from mozdns.soa.models import SOA from mozdns.srv.models import SRV from mozdns.txt.models import TXT from reversion.models import Version, Revision def destroy(): Version.objects.all().delete() Revision.objects.all().delete() TXT.objects.all().delete() SRV.objects.all().delete() CNAME.objects.all().delete() Nameserver.objects.all().delete() PTR.objects.all().delete() MX.objects.all().delete() AddressRecord.objects.all().delete() SOA.objects.all().delete() Domain.objects.all().delete()
30.192308
54
0.743949
107
785
5.448598
0.214953
0.188679
0.301887
0
0
0
0
0
0
0
0
0
0.128662
785
25
55
31.4
0.852339
0
0
0
0
0
0
0
0
0
0
0
0
1
0.045455
true
0
0.454545
0
0.5
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
3
6bc7b78982d279f5295d2beb48fb63642b3d9439
255
py
Python
mundo-1/condicoes/aumentos_multiplos.py
Murilloalves/estudos-python
804a8f646efd86840a3183b65f775168ebb6cf37
[ "MIT" ]
null
null
null
mundo-1/condicoes/aumentos_multiplos.py
Murilloalves/estudos-python
804a8f646efd86840a3183b65f775168ebb6cf37
[ "MIT" ]
null
null
null
mundo-1/condicoes/aumentos_multiplos.py
Murilloalves/estudos-python
804a8f646efd86840a3183b65f775168ebb6cf37
[ "MIT" ]
null
null
null
salario = float(input('Qual é salário do funcionário? R$')) if salario <= 1250: aumento = salario + (salario*15/100) else: aumento = salario + (salario * 10 / 100) print('Quem ganhava R${} passa a ganhar R${:.2f} agora.'.format(salario, aumento))
36.428571
82
0.662745
36
255
4.694444
0.694444
0.16568
0.248521
0
0
0
0
0
0
0
0
0.07109
0.172549
255
6
83
42.5
0.729858
0
0
0
0
0
0.317647
0
0
0
0
0
0
1
0
false
0.166667
0
0
0
0.166667
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
3
6bc804671bae5a9914e56977a2eb91970281e7fd
319
py
Python
mysite/myapp/migrations/0003_remove_input_input2.py
Omnibyte27/Web-Collab
8f18b2f62f5497669a0f95cfbcfb128738db8c9e
[ "MIT" ]
1
2019-11-09T00:17:00.000Z
2019-11-09T00:17:00.000Z
mysite/myapp/migrations/0003_remove_input_input2.py
Omnibyte27/Web-Collab
8f18b2f62f5497669a0f95cfbcfb128738db8c9e
[ "MIT" ]
7
2020-06-06T00:19:39.000Z
2022-02-10T14:08:37.000Z
mysite/myapp/migrations/0003_remove_input_input2.py
Omnibyte27/Web-Collab
8f18b2f62f5497669a0f95cfbcfb128738db8c9e
[ "MIT" ]
1
2019-11-10T23:12:42.000Z
2019-11-10T23:12:42.000Z
# Generated by Django 2.2.5 on 2019-09-29 18:57 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('myapp', '0002_input_input2'), ] operations = [ migrations.RemoveField( model_name='input', name='input2', ), ]
17.722222
47
0.579937
34
319
5.352941
0.764706
0
0
0
0
0
0
0
0
0
0
0.094595
0.304075
319
17
48
18.764706
0.725225
0.141066
0
0
1
0
0.121324
0
0
0
0
0
0
1
0
false
0
0.090909
0
0.363636
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
6bc8119faa11d3034406f5e04fb3d2f0570e36ee
127
py
Python
django_frontend_presets/presets/__init__.py
mikemenard/django-frontend-presets
0d1837415282ae43488b3e6e66889bc94f1a45b4
[ "BSD-3-Clause" ]
null
null
null
django_frontend_presets/presets/__init__.py
mikemenard/django-frontend-presets
0d1837415282ae43488b3e6e66889bc94f1a45b4
[ "BSD-3-Clause" ]
null
null
null
django_frontend_presets/presets/__init__.py
mikemenard/django-frontend-presets
0d1837415282ae43488b3e6e66889bc94f1a45b4
[ "BSD-3-Clause" ]
null
null
null
from .Bootstrap import Bootstrap from .Init import Init from .React import React from .Reset import Reset from .Vue import Vue
21.166667
32
0.80315
20
127
5.1
0.35
0
0
0
0
0
0
0
0
0
0
0
0.15748
127
5
33
25.4
0.953271
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
6bc83f48988080bc745090b0af2be2b40f9b6a5e
2,041
py
Python
inference_methods_local.py
Yaakoubi/Struct-CKN
fa007fa71310866584bdf2e5b038e6663b94e965
[ "MIT" ]
1
2021-05-30T13:42:56.000Z
2021-05-30T13:42:56.000Z
inference_methods_local.py
Yaakoubi/Struct-CKN
fa007fa71310866584bdf2e5b038e6663b94e965
[ "MIT" ]
null
null
null
inference_methods_local.py
Yaakoubi/Struct-CKN
fa007fa71310866584bdf2e5b038e6663b94e965
[ "MIT" ]
2
2022-03-16T22:00:30.000Z
2022-03-29T20:08:57.000Z
import ad3 import numpy as np from pystruct.inference.common import _validate_params class InferenceException(Exception): pass def inference_ad3_local(unary_potentials, pairwise_potentials, edges, relaxed=False, verbose=0, return_energy=False, branch_and_bound=False, inference_exception=None, return_marginals=False): b_multi_type = isinstance(unary_potentials, list) if b_multi_type: res = ad3.general_graph(unary_potentials, edges, pairwise_potentials, verbose=verbose, n_iterations=4000, exact=branch_and_bound) else: n_states, pairwise_potentials = \ _validate_params(unary_potentials, pairwise_potentials, edges) unaries = unary_potentials.reshape(-1, n_states) res = ad3.general_graph(unaries, edges, pairwise_potentials, verbose=verbose, n_iterations=4000, exact=branch_and_bound) unary_marginals, pairwise_marginals, energy, solver_status = res if verbose: print(solver_status) if solver_status in ["fractional", "unsolved"] and relaxed: if b_multi_type: y = (unary_marginals, pairwise_marginals) else: unary_marginals = unary_marginals.reshape(unary_potentials.shape) y = (unary_marginals, pairwise_marginals) else: if b_multi_type: if inference_exception and solver_status in ["fractional", "unsolved"]: raise InferenceException(solver_status) ly = list() _cum_n_states = 0 for unary_marg in unary_marginals: ly.append(_cum_n_states + np.argmax(unary_marg, axis=-1)) _cum_n_states += unary_marg.shape[1] y = np.hstack(ly) else: y = np.argmax(unary_marginals, axis=-1) if return_energy: return y, -energy if return_marginals: return y, unary_marginals return y
39.25
92
0.632533
227
2,041
5.374449
0.295154
0.091803
0.032787
0.029508
0.290164
0.17541
0.116393
0.116393
0.116393
0.116393
0
0.012544
0.296913
2,041
51
93
40.019608
0.837631
0
0
0.25
0
0
0.01809
0
0
0
0
0
0
1
0.022727
false
0.022727
0.068182
0
0.181818
0.022727
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6bc877d2ff907a625b44f72caf624fa66d922ec9
839
py
Python
examples/dhc/deduplication_approximated_example.py
fruttasecca/hay_checker
2bbf4e8e90e0abc590dd74080fb6e4f445056354
[ "MIT" ]
2
2019-05-22T08:24:38.000Z
2020-12-04T13:36:30.000Z
examples/dhc/deduplication_approximated_example.py
fruttasecca/hay_checker
2bbf4e8e90e0abc590dd74080fb6e4f445056354
[ "MIT" ]
null
null
null
examples/dhc/deduplication_approximated_example.py
fruttasecca/hay_checker
2bbf4e8e90e0abc590dd74080fb6e4f445056354
[ "MIT" ]
3
2018-09-15T13:40:40.000Z
2021-06-29T23:31:18.000Z
#!/usr/bin/python3 from pyspark.sql import SparkSession from haychecker.dhc.metrics import deduplication_approximated spark = SparkSession.builder.appName("Deduplication_approximated_example").getOrCreate() df = spark.read.format("csv").option("header", "true").load("examples/resources/employees.csv") df.show() r1, r2 = deduplication_approximated(["title", "city"], df) print("Deduplication_approximated title: {}, deduplication_approximated city: {}".format(r1, r2)) task1 = deduplication_approximated(["title", "city"]) task2 = deduplication_approximated(["lastName"]) task3 = task1.add(task2) result = task3.run(df) r1, r2 = result[0]["scores"] r3 = result[1]["scores"][0] print("Deduplication_approximated title: {}, deduplication_approximated city: {}, " "deduplication_approximated lastName: {}".format(r1, r2, r3))
32.269231
97
0.747318
96
839
6.416667
0.479167
0.405844
0.194805
0.11039
0.207792
0.207792
0.207792
0
0
0
0
0.026247
0.091776
839
26
98
32.269231
0.782152
0.020262
0
0
0
0
0.36983
0.238443
0
0
0
0
0
1
0
false
0
0.133333
0
0.133333
0.133333
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
6bcafe8a760a06ba575713e54df859c88a35ea32
2,671
py
Python
test.py
Tang-Jie-Chang/keras-maskrcnn
2ad1380005265c4e3158858ae030b0039fd16eec
[ "Apache-2.0" ]
null
null
null
test.py
Tang-Jie-Chang/keras-maskrcnn
2ad1380005265c4e3158858ae030b0039fd16eec
[ "Apache-2.0" ]
null
null
null
test.py
Tang-Jie-Chang/keras-maskrcnn
2ad1380005265c4e3158858ae030b0039fd16eec
[ "Apache-2.0" ]
null
null
null
# import keras import keras # import keras_retinanet from keras_maskrcnn import models from keras_maskrcnn.utils.visualization import draw_mask from keras_retinanet.utils.visualization import draw_box, draw_caption, draw_annotations from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image from keras_retinanet.utils.colors import label_color # import miscellaneous modules import matplotlib.pyplot as plt import cv2 import os import numpy as np import time from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval from utils import binary_mask_to_rle # set tf backend to allow memory to grow, instead of claiming everything import tensorflow as tf def get_session(): config = tf.ConfigProto() config.gpu_options.allow_growth = True return tf.Session(config=config) # use this environment flag to change which GPU to use #os.environ["CUDA_VISIBLE_DEVICES"] = "1" # set the modified tf session as backend in keras keras.backend.tensorflow_backend.set_session(get_session()) # adjust this to point to your downloaded/trained model model_path = os.path.join('..', 'snapshots', 'resnet50_coco_v0.2.0.h5') # load retinanet model model = models.load_model(model_path, backbone_name='resnet50') #print(model.summary()) # load label to names mapping for visualization purposes labels_to_names = {0: 'aeroplane', 1: 'bicycle', 2: 'bird', 3: 'boat', 4: 'bottle', 5: 'bus', 6: 'car', 7: 'cat', 8: 'chair', 9: 'cow', 10: 'diningtable', 11: 'dog', 12: 'horse', 13: 'motorbike', 14: 'person', 15: 'pottedplant', 16: 'sheep', 17: 'sofa', 18: 'train', 19: 'tvmonitor'} coco_dt = [] for imgid in range(100): image = read_image_bgr("test_images/" + coco.loadImgs(ids=imgid)[0]['file_name']) image = preprocess_image(image) image, scale = resize_image(image) outputs = model.predict_on_batch(np.expand_dims(image, axis=0)) boxes = outputs[-4][0] scores = outputs[-3][0] labels = outputs[-2][0] masks = outputs[-1][0] # correct for image scale boxes /= scale # visualize detections for box, score, label, mask in zip(boxes, scores, labels, masks): if score < 0.5: break pred = {} pred['image_id'] = imgid # this imgid must be same as the key of test.json pred['category_id'] = label pred['segmentation'] = binary_mask_to_rle(mask) # save binary mask to RLE, e.g. 512x512 -> rle pred['score'] = score coco_dt.append(pred) with open("submission.json", "w") as f: json.dump(coco_dt, f)
34.688312
167
0.681767
376
2,671
4.707447
0.481383
0.025424
0.030508
0.038983
0
0
0
0
0
0
0
0.028787
0.206664
2,671
77
168
34.688312
0.806513
0.211906
0
0
0
0
0.114201
0.01142
0
0
0
0
0
1
0.021739
false
0
0.326087
0
0.369565
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
6bcb30fd29a6ef624f4b1ad7d00a496c9b08cdb4
6,611
py
Python
leo/modes/lotos.py
ATikhonov2/leo-editor
225aac990a9b2804aaa9dea29574d6e072e30474
[ "MIT" ]
1,550
2015-01-14T16:30:37.000Z
2022-03-31T08:55:58.000Z
leo/modes/lotos.py
ATikhonov2/leo-editor
225aac990a9b2804aaa9dea29574d6e072e30474
[ "MIT" ]
2,009
2015-01-13T16:28:52.000Z
2022-03-31T18:21:48.000Z
leo/modes/lotos.py
ATikhonov2/leo-editor
225aac990a9b2804aaa9dea29574d6e072e30474
[ "MIT" ]
200
2015-01-05T15:07:41.000Z
2022-03-07T17:05:01.000Z
# Leo colorizer control file for lotos mode. # This file is in the public domain. # Properties for lotos mode. properties = { "commentEnd": "*)", "commentStart": "(*", "indentNextLines": "\\s*(let|library|process|specification|type|>>).*|\\s*(\\(|\\[\\]|\\[>|\\|\\||\\|\\|\\||\\|\\[.*\\]\\||\\[.*\\]\\s*->)\\s*", } # Attributes dict for lotos_main ruleset. lotos_main_attributes_dict = { "default": "null", "digit_re": "", "escape": "", "highlight_digits": "false", "ignore_case": "true", "no_word_sep": "", } # Dictionary of attributes dictionaries for lotos mode. attributesDictDict = { "lotos_main": lotos_main_attributes_dict, } # Keywords dict for lotos_main ruleset. lotos_main_keywords_dict = { "accept": "keyword1", "actualizedby": "keyword1", "any": "keyword1", "basicnaturalnumber": "keyword2", "basicnonemptystring": "keyword2", "behavior": "keyword1", "behaviour": "keyword1", "bit": "keyword2", "bitnatrepr": "keyword2", "bitstring": "keyword2", "bool": "keyword2", "boolean": "keyword2", "choice": "keyword1", "decdigit": "keyword2", "decnatrepr": "keyword2", "decstring": "keyword2", "element": "keyword2", "endlib": "keyword1", "endproc": "keyword1", "endspec": "keyword1", "endtype": "keyword1", "eqns": "keyword1", "exit": "keyword1", "false": "literal1", "fbool": "keyword2", "fboolean": "keyword2", "for": "keyword1", "forall": "keyword1", "formaleqns": "keyword1", "formalopns": "keyword1", "formalsorts": "keyword1", "hexdigit": "keyword2", "hexnatrepr": "keyword2", "hexstring": "keyword2", "hide": "keyword1", "i": "keyword1", "in": "keyword1", "is": "keyword1", "let": "keyword1", "library": "keyword1", "nat": "keyword2", "natrepresentations": "keyword2", "naturalnumber": "keyword2", "noexit": "keyword1", "nonemptystring": "keyword2", "octdigit": "keyword2", "octet": "keyword2", "octetstring": "keyword2", "octnatrepr": "keyword2", "octstring": "keyword2", "of": "keyword1", "ofsort": "keyword1", "opnnames": "keyword1", "opns": "keyword1", "par": "keyword1", "process": "keyword1", "renamedby": "keyword1", "richernonemptystring": "keyword2", "set": "keyword2", "sortnames": "keyword1", "sorts": "keyword1", "specification": "keyword1", "stop": "keyword1", "string": "keyword2", "string0": "keyword2", "string1": "keyword2", "true": "literal1", "type": "keyword1", "using": "keyword1", "where": "keyword1", } # Dictionary of keywords dictionaries for lotos mode. keywordsDictDict = { "lotos_main": lotos_main_keywords_dict, } # Rules for lotos_main ruleset. def lotos_rule0(colorer, s, i): return colorer.match_span(s, i, kind="comment1", begin="(*", end="*)", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="",exclude_match=False, no_escape=False, no_line_break=False, no_word_break=False) def lotos_rule1(colorer, s, i): return colorer.match_seq(s, i, kind="operator", seq=">>", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def lotos_rule2(colorer, s, i): return colorer.match_seq(s, i, kind="operator", seq="[>", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def lotos_rule3(colorer, s, i): return colorer.match_seq(s, i, kind="operator", seq="|||", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def lotos_rule4(colorer, s, i): return colorer.match_seq(s, i, kind="operator", seq="||", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def lotos_rule5(colorer, s, i): return colorer.match_seq(s, i, kind="operator", seq="|[", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def lotos_rule6(colorer, s, i): return colorer.match_seq(s, i, kind="operator", seq="]|", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def lotos_rule7(colorer, s, i): return colorer.match_seq(s, i, kind="operator", seq="[]", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def lotos_rule8(colorer, s, i): return colorer.match_keywords(s, i) # Rules dict for lotos_main ruleset. rulesDict1 = { "(": [lotos_rule0,], "0": [lotos_rule8,], "1": [lotos_rule8,], "2": [lotos_rule8,], "3": [lotos_rule8,], "4": [lotos_rule8,], "5": [lotos_rule8,], "6": [lotos_rule8,], "7": [lotos_rule8,], "8": [lotos_rule8,], "9": [lotos_rule8,], ">": [lotos_rule1,], "@": [lotos_rule8,], "A": [lotos_rule8,], "B": [lotos_rule8,], "C": [lotos_rule8,], "D": [lotos_rule8,], "E": [lotos_rule8,], "F": [lotos_rule8,], "G": [lotos_rule8,], "H": [lotos_rule8,], "I": [lotos_rule8,], "J": [lotos_rule8,], "K": [lotos_rule8,], "L": [lotos_rule8,], "M": [lotos_rule8,], "N": [lotos_rule8,], "O": [lotos_rule8,], "P": [lotos_rule8,], "Q": [lotos_rule8,], "R": [lotos_rule8,], "S": [lotos_rule8,], "T": [lotos_rule8,], "U": [lotos_rule8,], "V": [lotos_rule8,], "W": [lotos_rule8,], "X": [lotos_rule8,], "Y": [lotos_rule8,], "Z": [lotos_rule8,], "[": [lotos_rule2,lotos_rule7,], "]": [lotos_rule6,], "a": [lotos_rule8,], "b": [lotos_rule8,], "c": [lotos_rule8,], "d": [lotos_rule8,], "e": [lotos_rule8,], "f": [lotos_rule8,], "g": [lotos_rule8,], "h": [lotos_rule8,], "i": [lotos_rule8,], "j": [lotos_rule8,], "k": [lotos_rule8,], "l": [lotos_rule8,], "m": [lotos_rule8,], "n": [lotos_rule8,], "o": [lotos_rule8,], "p": [lotos_rule8,], "q": [lotos_rule8,], "r": [lotos_rule8,], "s": [lotos_rule8,], "t": [lotos_rule8,], "u": [lotos_rule8,], "v": [lotos_rule8,], "w": [lotos_rule8,], "x": [lotos_rule8,], "y": [lotos_rule8,], "z": [lotos_rule8,], "|": [lotos_rule3,lotos_rule4,lotos_rule5,], } # x.rulesDictDict for lotos mode. rulesDictDict = { "lotos_main": rulesDict1, } # Import dict for lotos mode. importDict = {}
29.513393
149
0.570867
715
6,611
5.044755
0.244755
0.177433
0.022456
0.037427
0.440255
0.433879
0.418908
0.401164
0.401164
0.401164
0
0.032126
0.223113
6,611
223
150
29.64574
0.670171
0.062472
0
0.036269
0
0
0.250084
0.020463
0
0
0
0
0
1
0.046632
false
0
0.005181
0.046632
0.098446
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6bce53c9abe42e1145ff3c9feca2ddc25c7666a1
275
py
Python
src/ProblemSolving/DiagonalDifference.py
Feng-Zhao/hackerrankPy
fc04f0a11cf543ad3697860eca774103593abcd5
[ "Apache-2.0" ]
null
null
null
src/ProblemSolving/DiagonalDifference.py
Feng-Zhao/hackerrankPy
fc04f0a11cf543ad3697860eca774103593abcd5
[ "Apache-2.0" ]
null
null
null
src/ProblemSolving/DiagonalDifference.py
Feng-Zhao/hackerrankPy
fc04f0a11cf543ad3697860eca774103593abcd5
[ "Apache-2.0" ]
null
null
null
def diagonalDifference(arr): a = 0 b = 0 for i in range(0, len(arr)): a += arr[i][i] b += arr[i][len(arr) - i - 1] return abs(a - b) if __name__ == '__main__': arr = [[11, 2, 4],[4, 5, 6],[10, 8, -12]] print(diagonalDifference(arr))
21.153846
45
0.490909
45
275
2.822222
0.577778
0.094488
0
0
0
0
0
0
0
0
0
0.083333
0.301818
275
12
46
22.916667
0.578125
0
0
0
0
0
0.029091
0
0
0
0
0
0
1
0.1
false
0
0
0
0.2
0.1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6bcf37239c800dc8e2d66410da87a160446a390a
296
py
Python
robot/Cumulus/resources/BaseObjects.py
SFDO-Alliances/NPSP
3711a3cf8e3124bc2d7e61644d6abecb4042004e
[ "BSD-3-Clause" ]
413
2015-01-02T09:53:04.000Z
2019-12-05T15:31:25.000Z
robot/Cumulus/resources/BaseObjects.py
SFDO-Alliances/NPSP
3711a3cf8e3124bc2d7e61644d6abecb4042004e
[ "BSD-3-Clause" ]
2,471
2015-01-02T03:33:55.000Z
2019-12-13T17:55:10.000Z
robot/Cumulus/resources/BaseObjects.py
SFDO-Alliances/NPSP
3711a3cf8e3124bc2d7e61644d6abecb4042004e
[ "BSD-3-Clause" ]
296
2015-01-06T13:03:33.000Z
2019-12-11T14:19:31.000Z
from robot.libraries.BuiltIn import BuiltIn class BaseNPSPPage: @property def npsp(self): return self.builtin.get_library_instance('NPSP') @property def pageobjects(self): return self.builtin.get_library_instance("cumulusci.robotframework.PageObjects")
24.666667
88
0.716216
32
296
6.5
0.5625
0.105769
0.134615
0.201923
0.375
0.375
0.375
0
0
0
0
0
0.199324
296
12
88
24.666667
0.877637
0
0
0.25
0
0
0.13468
0.121212
0
0
0
0
0
1
0.25
false
0
0.125
0.25
0.75
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
6bd0d6670874846404621cb14ddcf0728b11d685
1,395
py
Python
balltze_simulation/balltze_pybullet/balltze/balltze.py
Kotochleb/Balltze
55b15cb57d20f7f212293bf838e1d6cf874bb4c2
[ "MIT" ]
1
2021-09-04T03:59:01.000Z
2021-09-04T03:59:01.000Z
balltze_simulation/balltze_pybullet/balltze/balltze.py
Kotochleb/Balltze
55b15cb57d20f7f212293bf838e1d6cf874bb4c2
[ "MIT" ]
null
null
null
balltze_simulation/balltze_pybullet/balltze/balltze.py
Kotochleb/Balltze
55b15cb57d20f7f212293bf838e1d6cf874bb4c2
[ "MIT" ]
null
null
null
import pybullet as p import time import numpy as np import pybullet_data from balltze_description import Balltze, BalltzeKinematics import math if __name__ == '__main__': time_step = 1./240. physicsClient = p.connect(p.GUI) p.setAdditionalSearchPath(pybullet_data.getDataPath()) p.setGravity(0,0,-9.81) p.setTimeStep(time_step) planeId = p.loadURDF('plane.urdf') robot = Balltze('../../../balltze_description/balltze_description/urdf/balltze.urdf', p, position=[0,0,0.11]) kinematics = BalltzeKinematics(None) i = 0.0 dir = 1 while True: try: ends = kinematics.body_inverse([0.0,0.0,i], [0.0,i/10,0.02], [[0.1, -0.1, -0.06],[0.1, 0.06, -0.02],[-0.1, -0.06, -0.06],[-0.1, 0.06, -0.06]]) joints = kinematics.inverse(ends) robot.set_joint_arr(np.array(joints.T).reshape(1,12)[0]) # print((kinematics.forward_leg(joints)*1000).astype(np.int64)/1000) # print(joints) # print(ends) except Exception as e: print(e) i += dir*0.0007 if i >= np.pi/10: dir = -1 if i <= -np.pi/10: dir = 1 # robot.set_joint_arr([0, -np.pi/2, np.pi/2]*4) p.stepSimulation() time.sleep(time_step) cubePos, cubeOrn = p.getBasePositionAndOrientation(robot) print(cubePos,cubeOrn) p.disconnect()
32.44186
154
0.597133
200
1,395
4.055
0.38
0.019729
0.018496
0.024661
0.073983
0.062885
0.054254
0
0
0
0
0.083491
0.244444
1,395
42
155
33.214286
0.685958
0.098925
0
0.058824
0
0
0.067093
0.052716
0
0
0
0
0
1
0
false
0
0.176471
0
0.176471
0.058824
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6bd20797291e733d3485db7e9a7d16d42673718a
8,850
py
Python
urbit_sniffer.py
laanwj/urbit-tools
b3823d50d5ab84c0852593e3255c0d7c51de6d1c
[ "MIT" ]
18
2015-02-03T19:27:18.000Z
2021-04-04T03:03:57.000Z
urbit_sniffer.py
laanwj/urbit-tools
b3823d50d5ab84c0852593e3255c0d7c51de6d1c
[ "MIT" ]
null
null
null
urbit_sniffer.py
laanwj/urbit-tools
b3823d50d5ab84c0852593e3255c0d7c51de6d1c
[ "MIT" ]
2
2015-10-02T01:37:13.000Z
2017-06-04T03:41:49.000Z
#!/usr/bin/python3 # Copyright (c) 2014 Wladimir J. van der Laan, Visucore # Distributed under the MIT software license, see # http://www.opensource.org/licenses/mit-license.php. ''' urbit UDP sniffer Usage: urbit_sniffer.py [-p <port1>-<port2>,<port3>,...] [-i <interface>] ''' import struct, sys, io, argparse, datetime from struct import pack,unpack from binascii import b2a_hex from urbit.util import format_hexnum,from_le,to_le,dump_noun from urbit.cue import cue from urbit.pname import pname from urbit.crua import de_crua from misc.sniffer import Sniffer, PCapLoader if sys.version_info[0:2] < (3,0): print("Requires python3", file=sys.stderr) exit(1) class Args: # default args # interface we're interested in interface = b'eth0' # ports we're interested in ports = set(list(range(4000,4008)) + [13337, 41954]) # known keys for decrypting packets keys = {} # dump entire nouns show_nouns = True # show hex for decrypted packets show_raw = False # show timestamps show_timestamps = False # show keyhashes for decrypted packets always_show_keyhashes = False # constants... CRYPTOS = {0:'%none', 1:'%open', 2:'%fast', 3:'%full'} # utilities... def ipv4str(addr): '''Bytes to IPv4 address''' return '.'.join(['%i' % i for i in addr]) def crypto_name(x): '''Name for crypto algo''' if x in CRYPTOS: return CRYPTOS[x] else: return 'unk%02i' % x def hexstr(x): '''Bytes to hex string''' return b2a_hex(x).decode() def colorize(str, col): return ('\x1b[38;5;%im' % col) + str + ('\x1b[0m') # cli colors and glyphs COLOR_TIMESTAMP = 38 COLOR_RECIPIENT = 51 COLOR_IP = 21 COLOR_HEADER = 27 COLOR_VALUE = 33 COLOR_DATA = 250 COLOR_DATA_ENC = 245 v_arrow = colorize('→', 240) v_attention = colorize('>', 34) + colorize('>', 82) + colorize('>', 118) v_colon = colorize(':', 240) v_equal = colorize('=', 245) def parse_args(): args = Args() parser = argparse.ArgumentParser(description='Urbit sniffer. Dump incoming and outgoing urbit packets.') pdefault = '4000-4007,13337,41954' # update this when Args changes... idefault = args.interface.decode() parser.add_argument('-p, --ports', dest='ports', help='Ports to listen on (default: '+pdefault+')') parser.add_argument('-i, --interface', dest='interface', help='Interface to listen on (default:'+idefault+')', default=idefault) parser.add_argument('-k, --keys', dest='keys', help='Import keys from file (with <keyhash> <key> per line)', default=None) parser.add_argument('-n, --no-show-nouns', dest='show_nouns', action='store_false', help='Don\'t show full noun representation of decoded packets', default=True) parser.add_argument('-r, --show-raw', dest='show_raw', action='store_true', help='Show raw hex representation of decoded packets', default=False) parser.add_argument('-t, --show-timestamp', dest='show_timestamps', action='store_true', help='Show timestamps', default=False) parser.add_argument('-l, --read', dest='read_dump', help='Read a pcap dump file (eg from tcpdump)', default=None) parser.add_argument('--always-show-keyhashes', dest='always_show_keyhashes', help='Show keyhashes even for decrypted packets (more spammy)', default=False) r = parser.parse_args() if r.read_dump is not None: args.packet_source = PCapLoader(r.read_dump) else: args.packet_source = Sniffer(r.interface.encode()) if r.ports is not None: args.ports = set() for t in r.ports.split(','): (a,_,b) = t.partition('-') ai = int(a) bi = int(b) if b else ai args.ports.update(list(range(int(ai), int(bi)+1))) if r.keys is not None: args.keys = {} print(v_attention + ' Loading decryption keys from ' + r.keys) with open(r.keys, 'r') as f: for line in f: line = line.strip() if not line or line.startswith('#'): continue l = line.split() # filter out '.' so that keys can be copied directly args.keys[int(l[0].replace('.',''))] = int(l[1].replace('.','')) args.show_nouns = r.show_nouns args.show_raw = r.show_raw args.show_timestamps = r.show_timestamps args.always_show_keyhashes = r.always_show_keyhashes return args def dump_urbit_packet(args, timestamp, srcaddr, sport, dstaddr, dport, data): try: # Urbit header and payload urhdr = unpack('<L', data[0:4])[0] proto = urhdr & 7 mug = (urhdr >> 3) & 0xfffff yax = (urhdr >> 23) & 3 yax_bytes = 1<<(yax+1) qax = (urhdr >> 25) & 3 qax_bytes = 1<<(qax+1) crypto = (urhdr >> 27) sender = from_le(data[4:4+yax_bytes]) receiver = from_le(data[4+yax_bytes:4+yax_bytes+qax_bytes]) payload = data[4+yax_bytes+qax_bytes:] if crypto == 2: # %fast keyhash = from_le(payload[0:16]) payload = payload[16:] else: keyhash = None except (IndexError, struct.error): print('Warn: invpkt') return # Decode packet if crypto known decrypted = False if crypto in [0,1]: # %none %open decrypted = True if crypto == 2 and keyhash in args.keys: # %fast payload = from_le(payload) payload = de_crua(args.keys[keyhash], payload) payload = to_le(payload) decrypted = True # Print packet hdata = [('proto', str(proto)), ('mug', '%05x' % mug), ('crypto', crypto_name(crypto))] if keyhash is not None and (args.always_show_keyhashes or not decrypted): hdata += [('keyhash', format_hexnum(keyhash))] if srcaddr is not None: metadata = '' if args.show_timestamps: metadata += colorize(datetime.datetime.utcfromtimestamp(timestamp).strftime('%H%M%S.%f'), COLOR_TIMESTAMP) + ' ' metadata += (colorize(ipv4str(srcaddr), COLOR_IP) + v_colon + colorize(str(sport), COLOR_IP) + ' ' + colorize(pname(sender), COLOR_RECIPIENT) + ' ' + v_arrow + ' ' + colorize(ipv4str(dstaddr), COLOR_IP) + v_colon + colorize(str(dport), COLOR_IP) + ' ' + colorize(pname(receiver), COLOR_RECIPIENT)) else: metadata = (' %fore ' + # nested packet colorize(pname(sender), COLOR_RECIPIENT) + ' ' + v_arrow + ' ' + colorize(pname(receiver), COLOR_RECIPIENT)) print( metadata + v_colon + ' ' + ' '.join(colorize(key, COLOR_HEADER) + v_equal + colorize(value, COLOR_VALUE) for (key,value) in hdata)) if decrypted: # decrypted or unencrypted data if args.show_raw: print(' ' + colorize(hexstr(payload), COLOR_DATA)) cake = cue(from_le(payload)) if cake[0] == 1701998438: # %fore subpacket = to_le(cake[1][1][1]) dump_urbit_packet(args, None, None, None, None, None, subpacket) else: if args.show_nouns: sys.stdout.write(' ') dump_noun(cake, sys.stdout) sys.stdout.write('\n') else: # [sealed] print(' [' + colorize(hexstr(payload), COLOR_DATA_ENC)+']') def main(args): print(v_attention + ' Listening on ' + args.packet_source.name + ' ports ' + (',').join(str(x) for x in args.ports)) for timestamp,packet in args.packet_source: try: # IP header iph = unpack('!BBHHHBBH4s4s', packet[0:20]) ihl = (iph[0] & 15)*4 if ihl < 20: # cannot handle IP headers <20 bytes # print("Warn: invhdr") continue protocol = iph[6] srcaddr = iph[8] dstaddr = iph[9] if protocol != 17: # not UDP #print("Warn: invproto") continue # UDP header (sport, dport, ulength, uchecksum) = unpack('!HHHH', packet[ihl:ihl+8]) data = packet[ihl+8:ihl+ulength] if len(data) != (ulength-8): print("Warn: invlength") continue # invalid length packet if dport not in args.ports and sport not in args.ports: # only urbit ports continue except (IndexError, struct.error): print('Warn: invpkt') continue dump_urbit_packet(args, timestamp, srcaddr, sport, dstaddr, dport, data) if __name__ == '__main__': # Force UTF8 out sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding='utf8', line_buffering=True) sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding='utf8', line_buffering=True) try: main(parse_args()) except KeyboardInterrupt: pass
38.146552
165
0.599887
1,135
8,850
4.567401
0.274009
0.020062
0.026235
0.007523
0.154514
0.092207
0.055941
0.039738
0.021605
0.021605
0
0.026981
0.262938
8,850
231
166
38.311688
0.767592
0.110508
0
0.152542
0
0.011299
0.110585
0.008319
0
0
0.000896
0
0
1
0.039548
false
0.00565
0.050847
0.00565
0.175141
0.050847
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6bd258b97e010833aa36fdeb89edf6a7e0ee7073
2,556
py
Python
Assignment_2_Pi/Pi_Archimedes/Pi_Archimedes.py
osccal/python-course
e815fd647fc5e5248188ed3976b62a181831e9ba
[ "MIT" ]
null
null
null
Assignment_2_Pi/Pi_Archimedes/Pi_Archimedes.py
osccal/python-course
e815fd647fc5e5248188ed3976b62a181831e9ba
[ "MIT" ]
null
null
null
Assignment_2_Pi/Pi_Archimedes/Pi_Archimedes.py
osccal/python-course
e815fd647fc5e5248188ed3976b62a181831e9ba
[ "MIT" ]
null
null
null
""" Python3 program to calculate pi to 100 places using Archimedes method """ from decimal import Decimal, getcontext from time import time def pi_archimedes(n): """ Calculate n iterations of Archimedes PI """ polygon_edge_length_squared = Decimal(2) # Decimal for precision polygon_sides = 2 for i in range(n): polygon_edge_length_squared = 2 - 2 * (1 - polygon_edge_length_squared / 4).sqrt() polygon_sides *= 2 return polygon_sides * polygon_edge_length_squared.sqrt() def main(): """ Try the series """ # Pi to 1000 places for reference Pi = 3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679821480865132823066470938446095505822317253594081284811174502841027019385211055596446229489549303819644288109756659334461284756482337867831652712019091456485669234603486104543266482133936072602491412737245870066063155881748815209209628292540917153643678925903600113305305488204665213841469519415116094330572703657595919530921861173819326117931051185480744623799627495673518857527248912279381830119491298336733624406566430860213949463952247371907021798609437027705392171762931767523846748184676694051320005681271452635608277857713427577896091736371787214684409012249534301465495853710507922796892589235420199561121290219608640344181598136297747713099605187072113499999983729780499510597317328160963185950244594553469083026425223082533446850352619311881710100031378387528865875332083814206171776691473035982534904287554687311595628638823537875937519577818577805321712268066130019278766111959092164201989 places = 100 old_result = None start = time() # Timestamp for timing for n in range(10*places): # Do calculations with double precision getcontext().prec = 2*places # Sets precision to 2*places. This is to reduce loss due to rounding result = pi_archimedes(n) # Print the result with single precision getcontext().prec = places result = +result # Do the rounding on result from 2*places to 1*places (ie the number we want) error = result - Decimal(Pi) # Simple error calculation #print("Result: ", result) #print("Error: ", Decimal(error)) if result == old_result: # If the numbers we get are the same, break. This is as close as we'll get break old_result = result print("Result: ", result) print("Error: ", Decimal(error)) print("Time: ", time()-start) if __name__ == "__main__": main()
54.382979
1,011
0.777778
208
2,556
9.423077
0.389423
0.022449
0.034694
0.04898
0.039796
0.039796
0.039796
0
0
0
0
0.480544
0.165493
2,556
46
1,012
55.565217
0.43835
0.225352
0
0
0
0
0.01501
0
0
0
0
0
0
1
0.071429
false
0
0.071429
0
0.178571
0.107143
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
6bd4306cbba78d516d0027f40cedd02fdc18d8f6
1,942
py
Python
tests/test_transforms.py
luiarthur/advi
e461a08dcbd32836d15a3c276d27417cf619298c
[ "MIT" ]
1
2020-04-04T12:58:18.000Z
2020-04-04T12:58:18.000Z
tests/test_transforms.py
luiarthur/advi
e461a08dcbd32836d15a3c276d27417cf619298c
[ "MIT" ]
null
null
null
tests/test_transforms.py
luiarthur/advi
e461a08dcbd32836d15a3c276d27417cf619298c
[ "MIT" ]
null
null
null
import unittest import torch import advi.transformations as trans class Test_Transformations(unittest.TestCase): def approx(self, a, b, eps=1E-8): self.assertTrue(abs(a - b) < eps) def test_bounded(self): p = torch.tensor(.5) self.assertTrue(trans.logit(p) == 0) q = torch.tensor(10.0) self.approx(trans.logit(q, 5, 16), -.1823216, 1e-5) self.approx(trans.invlogit(trans.logit(q, 5, 16), 5, 16), q, 1e-5) def test_simplex(self): p_orig = [.1, .3, .6] p = torch.tensor(p_orig) x = trans.invsoftmax(p) p_tti = torch.softmax(x, 0) self.assertTrue(p_tti.sum() == 1) for j in range(len(p_orig)): self.approx(p_tti[j].item(), p_orig[j], 1e-6) def test_lpdf_logx(self): gam = torch.distributions.gamma.Gamma(2, 3) x = torch.tensor(3.) z = trans.lpdf_logx(torch.log(x), gam.log_prob) print(z) self.approx(z, -4.6055508, eps=1e-6) def test_lpdf_logitx(self): beta = torch.distributions.beta.Beta(2, 3) x = torch.tensor(.6) z = trans.lpdf_logitx(trans.logit(x), beta.log_prob, a=torch.tensor(0.), b=torch.tensor(1.)) print(z) self.approx(z, -1.285616793366446, eps=1e-6) def test_lpdf_real_dirichlet(self): # This tests if the dirichlet in the two-dimensional case # (which is essentially a beta) works properly. # TODO: Higher dimensional cases are harder to check, but should # be done eventually. alpha = torch.tensor([2., 3.]) dirichlet = torch.distributions.dirichlet.Dirichlet(alpha) p = torch.tensor([.6, .4]) r = trans.invsoftmax(p) z = trans.lpdf_real_dirichlet(r, dirichlet.log_prob) print(z) self.approx(z, -1.285616793366446, eps=1e-6) if __name__ == '__main__': unittest.main()
33.482759
74
0.589083
279
1,942
3.985663
0.329749
0.089029
0.032374
0.026978
0.186151
0.123201
0.098022
0.070144
0.070144
0.070144
0
0.06511
0.2724
1,942
57
75
34.070175
0.721868
0.094748
0
0.116279
0
0
0.004564
0
0
0
0
0.017544
0.069767
1
0.139535
false
0
0.069767
0
0.232558
0.069767
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
6bd54aeda1cf3aa43806abaf7f0e2dafeca01c0d
1,638
py
Python
app/tests/v1/test_product.py
owezzy/StoreManager
821856c0d502b55bd499cfe9188cd4951c5b0b75
[ "MIT" ]
null
null
null
app/tests/v1/test_product.py
owezzy/StoreManager
821856c0d502b55bd499cfe9188cd4951c5b0b75
[ "MIT" ]
2
2018-10-10T22:32:35.000Z
2021-06-01T22:50:56.000Z
app/tests/v1/test_product.py
owezzy/StoreManager
821856c0d502b55bd499cfe9188cd4951c5b0b75
[ "MIT" ]
1
2018-10-25T12:42:41.000Z
2018-10-25T12:42:41.000Z
import unittest import json from app.app import create_app POST_PRODUCT_URL = '/api/v1/products' GET_A_SINGLE_PRODUCT = '/api/v1/product/1' GET_ALL_PRODUCTS = '/api/v1/products' class TestProduct(unittest.TestCase): def setUp(self): """Initialize the api with test variable""" self.app = create_app('testing') self.client = self.app.test_client() self.create_product = json.dumps(dict( product_name="shoes", stock=2, price=3000 )) def test_add_product(self): """Test for post product""" resource = self.client.post( POST_PRODUCT_URL, data=self.create_product, content_type='application/json') data = json.loads(resource.data.decode()) print(data) self.assertEqual(resource.status_code, 201, msg='CREATED') self.assertEqual(resource.content_type, 'application/json') def test_get_products(self): """test we can get products""" resource = self.client.get(POST_PRODUCT_URL, data=json.dumps(self.create_product), content_type='application/json') get_data = json.dumps(resource.data.decode()) print(get_data) self.assertEqual(resource.content_type, 'application/json') self.assertEqual(resource.status_code, 200) def test_get(self): """test we can get a single products""" resource = self.client.get(GET_A_SINGLE_PRODUCT) self.assertEqual(resource.status_code, 404) if __name__ == '__main__': unittest.main()
30.333333
72
0.623321
194
1,638
5.041237
0.309278
0.076687
0.117587
0.106339
0.381391
0.188139
0.188139
0
0
0
0
0.01495
0.264957
1,638
53
73
30.90566
0.797342
0.072039
0
0.108108
0
0
0.093396
0
0
0
0
0
0.135135
1
0.108108
false
0
0.081081
0
0.216216
0.054054
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6bd88438849c3a76c7d468249205380ab8ab8c38
1,346
py
Python
cryptoshredding/s3/client.py
hupe1980/cryptoshredding
1ab5ee452c4435f486006aa2cc1a7bee440d91fe
[ "MIT" ]
null
null
null
cryptoshredding/s3/client.py
hupe1980/cryptoshredding
1ab5ee452c4435f486006aa2cc1a7bee440d91fe
[ "MIT" ]
null
null
null
cryptoshredding/s3/client.py
hupe1980/cryptoshredding
1ab5ee452c4435f486006aa2cc1a7bee440d91fe
[ "MIT" ]
null
null
null
import boto3 from botocore.client import BaseClient from ..key_store import KeyStore from .object import CryptoObject from .stream_body_wrapper import StreamBodyWrapper class CryptoS3(object): def __init__( self, client: BaseClient, key_store: KeyStore, ) -> None: self._client = client self._key_store = key_store def put_object(self, CSEKeyId: str, Bucket: str, Key: str, **kwargs): obj = CryptoObject( key_store=self._key_store, object=boto3.resource("s3").Object(Bucket, Key), ) return obj.put(CSEKeyId=CSEKeyId, **kwargs) def get_object(self, **kwargs): obj = self._client.get_object(**kwargs) obj["Body"] = StreamBodyWrapper( key_store=self._key_store, stream_body=obj["Body"], metadata=obj["Metadata"], ) return obj def __getattr__(self, name: str): """Catch any method/attribute lookups that are not defined in this class and try to find them on the provided bridge object. :param str name: Attribute name :returns: Result of asking the provided client object for that attribute name :raises AttributeError: if attribute is not found on provided bridge object """ return getattr(self._client, name)
31.302326
88
0.641902
161
1,346
5.192547
0.403727
0.076555
0.043062
0.035885
0.047847
0
0
0
0
0
0
0.004082
0.271917
1,346
42
89
32.047619
0.84898
0.228083
0
0.068966
0
0
0.018145
0
0
0
0
0
0
1
0.137931
false
0
0.172414
0
0.448276
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6bd9272def1931c3aead22a640fadc1a05f50b8f
5,627
py
Python
tests/infra/test_subnet.py
bretttegart/treadmill
812109e31c503a6eddaee2d3f2e1faf2833b6aaf
[ "Apache-2.0" ]
2
2017-10-31T18:48:20.000Z
2018-03-04T20:35:20.000Z
tests/infra/test_subnet.py
bretttegart/treadmill
812109e31c503a6eddaee2d3f2e1faf2833b6aaf
[ "Apache-2.0" ]
null
null
null
tests/infra/test_subnet.py
bretttegart/treadmill
812109e31c503a6eddaee2d3f2e1faf2833b6aaf
[ "Apache-2.0" ]
null
null
null
""" Unit test for EC2 subnet. """ import unittest import mock from treadmill.infra.subnet import Subnet class SubnetTest(unittest.TestCase): @mock.patch('treadmill.infra.connection.Connection') def test_init(self, ConnectionMock): conn_mock = ConnectionMock() Subnet.ec2_conn = Subnet.route53_conn = conn_mock subnet = Subnet( id=1, vpc_id='vpc-id', metadata={ 'Tags': [{ 'Key': 'Name', 'Value': 'goo' }] } ) self.assertEquals(subnet.vpc_id, 'vpc-id') self.assertEquals(subnet.name, 'goo') self.assertEquals(subnet.ec2_conn, conn_mock) @mock.patch('treadmill.infra.connection.Connection') def test_create_tags(self, ConnectionMock): conn_mock = ConnectionMock() conn_mock.create_tags = mock.Mock() Subnet.ec2_conn = Subnet.route53_conn = conn_mock subnet = Subnet( name='foo', id='1', vpc_id='vpc-id' ) subnet.create_tags() conn_mock.create_tags.assert_called_once_with( Resources=['1'], Tags=[{ 'Key': 'Name', 'Value': 'foo' }] ) @mock.patch('treadmill.infra.connection.Connection') def test_create(self, ConnectionMock): ConnectionMock.context.region_name = 'us-east-1' conn_mock = ConnectionMock() subnet_json_mock = { 'SubnetId': '1' } conn_mock.create_subnet = mock.Mock(return_value={ 'Subnet': subnet_json_mock }) conn_mock.create_route_table = mock.Mock(return_value={ 'RouteTable': {'RouteTableId': 'route-table-id'} }) Subnet.ec2_conn = Subnet.route53_conn = conn_mock _subnet = Subnet.create( cidr_block='172.23.0.0/24', vpc_id='vpc-id', name='foo', gateway_id='gateway-id' ) self.assertEqual(_subnet.id, '1') self.assertEqual(_subnet.name, 'foo') self.assertEqual(_subnet.metadata, subnet_json_mock) conn_mock.create_subnet.assert_called_once_with( VpcId='vpc-id', CidrBlock='172.23.0.0/24', AvailabilityZone='us-east-1a' ) conn_mock.create_tags.assert_called_once_with( Resources=['1'], Tags=[{ 'Key': 'Name', 'Value': 'foo' }] ) conn_mock.create_route_table.assert_called_once_with( VpcId='vpc-id' ) conn_mock.create_route.assert_called_once_with( RouteTableId='route-table-id', DestinationCidrBlock='0.0.0.0/0', GatewayId='gateway-id' ) conn_mock.associate_route_table.assert_called_once_with( RouteTableId='route-table-id', SubnetId='1', ) @mock.patch('treadmill.infra.connection.Connection') def test_refresh(self, ConnectionMock): conn_mock = ConnectionMock() subnet_json_mock = { 'VpcId': 'vpc-id', 'Foo': 'bar' } conn_mock.describe_subnets = mock.Mock(return_value={ 'Subnets': [subnet_json_mock] }) Subnet.ec2_conn = Subnet.route53_conn = conn_mock _subnet = Subnet(id='subnet-id', vpc_id=None, metadata=None) _subnet.refresh() self.assertEqual(_subnet.vpc_id, 'vpc-id') self.assertEqual(_subnet.metadata, subnet_json_mock) @mock.patch.object(Subnet, 'refresh') @mock.patch.object(Subnet, 'get_instances') @mock.patch('treadmill.infra.connection.Connection') def test_show(self, ConnectionMock, get_instances_mock, refresh_mock): conn_mock = ConnectionMock() Subnet.ec2_conn = Subnet.route53_conn = conn_mock _subnet = Subnet(id='subnet-id', vpc_id='vpc-id', metadata=None) _subnet.instances = None result = _subnet.show() self.assertEqual( result, { 'VpcId': 'vpc-id', 'SubnetId': 'subnet-id', 'Instances': None } ) get_instances_mock.assert_called_once_with(refresh=True, role=None) refresh_mock.assert_called_once() @mock.patch('treadmill.infra.connection.Connection') def test_persisted(self, ConnectionMock): _subnet = Subnet(id='subnet-id', metadata={'foo': 'goo'}) self.assertFalse(_subnet.persisted) _subnet.metadata['SubnetId'] = 'subnet-id' self.assertTrue(_subnet.persisted) @mock.patch('treadmill.infra.connection.Connection') def test_persist(self, ConnectionMock): ConnectionMock.context.region_name = 'us-east-1' conn_mock = ConnectionMock() Subnet.ec2_conn = Subnet.route53_conn = conn_mock conn_mock.create_subnet = mock.Mock( return_value={ 'Subnet': { 'foo': 'bar' } } ) _subnet = Subnet( id='subnet-id', metadata=None, vpc_id='vpc-id', name='subnet-name' ) _subnet.persist( cidr_block='cidr-block', gateway_id='gateway-id', ) self.assertEqual(_subnet.metadata, {'foo': 'bar'}) conn_mock.create_subnet.assert_called_once_with( VpcId='vpc-id', CidrBlock='cidr-block', AvailabilityZone='us-east-1a' )
30.090909
78
0.563355
582
5,627
5.201031
0.139175
0.066072
0.04625
0.052858
0.629997
0.584077
0.502147
0.438057
0.326396
0.259663
0
0.014048
0.316865
5,627
186
79
30.252688
0.773413
0.004443
0
0.385621
0
0
0.135145
0.0463
0
0
0
0
0.137255
1
0.045752
false
0
0.019608
0
0.071895
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6bdac7bc37d6267a61f05383477b3f0ca7a95eab
5,138
py
Python
tests/building/test_tokenization.py
fossabot/langumo
2d8b30979878bb27fb07cc31879c13c5c186582c
[ "Apache-2.0" ]
7
2020-09-05T08:30:25.000Z
2021-11-01T14:07:58.000Z
tests/building/test_tokenization.py
fossabot/langumo
2d8b30979878bb27fb07cc31879c13c5c186582c
[ "Apache-2.0" ]
2
2020-09-11T14:19:47.000Z
2021-03-05T17:22:21.000Z
tests/building/test_tokenization.py
fossabot/langumo
2d8b30979878bb27fb07cc31879c13c5c186582c
[ "Apache-2.0" ]
3
2020-09-11T14:16:06.000Z
2021-10-31T14:18:10.000Z
import tempfile from langumo.building import TrainTokenizer, TokenizeSentences from langumo.utils import AuxiliaryFileManager _dummy_corpus_content = ( 'Wikipedia is a multilingual online encyclopedia created and maintained ' 'as an open collaboration project by a community of volunteer editors ' 'using a wiki-based editing system. It is the largest and most popular ' 'general reference work on the World Wide Web. It is also one of the 15 ' 'most popular websites ranked by Alexa, as of August 2020. It features ' 'exclusively free content and no commercial ads. It is hosted by the ' 'Wikimedia Foundation, a non-profit organization funded primarily through ' 'donations.\n' 'Wikipedia was launched on January 15, 2001, and was created by Jimmy ' 'Wales and Larry Sanger. Sanger coined its name as a portmanteau of the ' 'terms "wiki" and "encyclopedia". Initially an English-language ' 'encyclopedia, versions of Wikipedia in other languages were quickly ' 'developed. With 6.1 million articles, the English Wikipedia is the ' 'largest of the more than 300 Wikipedia encyclopedias. Overall, Wikipedia ' 'comprises more than 54 million articles attracting 1.5 billion unique ' 'visitors per month.\n' 'In 2005, Nature published a peer review comparing 42 hard science ' 'articles from Encyclopædia Britannica and Wikipedia and found that ' 'Wikipedia\'s level of accuracy approached that of Britannica, although ' 'critics suggested that it might not have fared so well in a similar ' 'study of a random sampling of all articles or one focused on social ' 'science or contentious social issues. The following year, Time stated ' 'that the open-door policy of allowing anyone to edit had made Wikipedia ' 'the biggest and possibly the best encyclopedia in the world, and was a ' 'testament to the vision of Jimmy Wales.\n' 'Wikipedia has been criticized for exhibiting systemic bias and for being ' 'subject to manipulation and spin in controversial topics; Edwin Black ' 'has criticized Wikipedia for presenting a mixture of "truth, half truth, ' 'and some falsehoods". Wikipedia has also been criticized for gender ' 'bias, particularly on its English-language version, where the dominant ' 'majority of editors are male. However, edit-a-thons have been held to ' 'encourage female editors and increase the coverage of women\'s topics. ' 'Facebook announced that by 2017 it would help readers detect fake news ' 'by suggesting links to related Wikipedia articles. YouTube announced a ' 'similar plan in 2018.' ) def test_subset_file_creation(): with tempfile.TemporaryDirectory() as tdir, \ AuxiliaryFileManager(f'{tdir}/workspace') as afm: corpus = afm.create() with corpus.open('w') as fp: fp.write('hello world!\n' * 100) with (TrainTokenizer(subset_size=1024) ._create_subset_file(afm, corpus) .open('r')) as fp: assert len(fp.readlines()) == 79 with (TrainTokenizer(subset_size=128) ._create_subset_file(afm, corpus) .open('r')) as fp: assert len(fp.readlines()) == 10 with (TrainTokenizer(subset_size=2000) ._create_subset_file(afm, corpus) .open('r')) as fp: assert len(fp.readlines()) == 100 def test_training_wordpiece_tokenizer(): with tempfile.TemporaryDirectory() as tdir, \ AuxiliaryFileManager(f'{tdir}/workspace') as afm: corpus = afm.create() with corpus.open('w') as fp: fp.write(_dummy_corpus_content) # Train WordPiece tokenizer and get vocabulary file. vocab = (TrainTokenizer(vocab_size=128, limit_alphabet=64, unk_token='[UNK]') .build(afm, corpus)) # Read subwords from the vocabulary file. with vocab.open('r') as fp: words = fp.readlines() # Check if the number of total words equals to vocabulary size and the # vocabulary contains unknown token. assert len(words) == 128 assert words[0].strip() == '[UNK]' def test_subword_tokenization(): with tempfile.TemporaryDirectory() as tdir, \ AuxiliaryFileManager(f'{tdir}/workspace') as afm: corpus = afm.create() with corpus.open('w') as fp: fp.write(_dummy_corpus_content) # Train WordPiece vocabulary and tokenize sentences. vocab = (TrainTokenizer(vocab_size=128, limit_alphabet=64) .build(afm, corpus)) tokenized = (TokenizeSentences(unk_token='[UNK]') .build(afm, corpus, vocab)) # Test if the tokenization is correctly applied to the corpus. Note # that the tokenizer model will normalize the sentences. with tokenized.open('r') as fp: assert (fp.read().strip().replace('##', '').replace(' ', '') == _dummy_corpus_content.lower().replace(' ', ''))
47.137615
79
0.665434
650
5,138
5.201538
0.436923
0.023957
0.010352
0.01331
0.216208
0.211772
0.196983
0.196983
0.169772
0.169772
0
0.018243
0.253211
5,138
108
80
47.574074
0.862914
0.071234
0
0.258824
0
0
0.47775
0
0
0
0
0
0.070588
1
0.035294
false
0
0.035294
0
0.070588
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6bdb7b37ea55baaa1973a1fff39476ce6ea71851
12,819
py
Python
examples/BertNewsClassification/news_classifier.py
mlflow/mlflow-torchserve
91663b630ef12313da3ad821767faf3fc409345b
[ "Apache-2.0" ]
40
2020-11-13T02:08:10.000Z
2022-03-27T07:41:57.000Z
examples/BertNewsClassification/news_classifier.py
Ideas2IT/mlflow-torchserve
d6300fb73f16d74ee2c7718c249faf485c4f3b62
[ "Apache-2.0" ]
23
2020-11-16T11:28:01.000Z
2021-09-23T11:28:24.000Z
examples/BertNewsClassification/news_classifier.py
Ideas2IT/mlflow-torchserve
d6300fb73f16d74ee2c7718c249faf485c4f3b62
[ "Apache-2.0" ]
15
2020-11-13T10:25:25.000Z
2022-02-01T10:13:20.000Z
# pylint: disable=W0221 # pylint: disable=W0613 # pylint: disable=E1102 # pylint: disable=W0223 import shutil from collections import defaultdict import numpy as np import pandas as pd import torch import torch.nn.functional as F from sklearn.model_selection import train_test_split from torch import nn from torch.utils.data import Dataset, DataLoader from transformers import ( BertModel, BertTokenizer, AdamW, get_linear_schedule_with_warmup, ) import argparse import os from tqdm import tqdm import requests import torchtext.datasets as td import mlflow.pytorch class_names = ["World", "Sports", "Business", "Sci/Tech"] class AGNewsDataset(Dataset): """ Constructs the encoding with the dataset """ def __init__(self, reviews, targets, tokenizer, max_len): self.reviews = reviews self.targets = targets self.tokenizer = tokenizer self.max_len = max_len def __len__(self): return len(self.reviews) def __getitem__(self, item): review = str(self.reviews[item]) target = self.targets[item] encoding = self.tokenizer.encode_plus( review, add_special_tokens=True, max_length=self.max_len, return_token_type_ids=False, padding="max_length", return_attention_mask=True, return_tensors="pt", truncation=True, ) return { "review_text": review, "input_ids": encoding["input_ids"].flatten(), "attention_mask": encoding["attention_mask"].flatten(), "targets": torch.tensor(target, dtype=torch.long), } class NewsClassifier(nn.Module): def __init__(self, args): super(NewsClassifier, self).__init__() self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.PRE_TRAINED_MODEL_NAME = "bert-base-uncased" self.EPOCHS = args.max_epochs self.df = None self.tokenizer = None self.df_train = None self.df_val = None self.df_test = None self.train_data_loader = None self.val_data_loader = None self.test_data_loader = None self.optimizer = None self.total_steps = None self.scheduler = None self.loss_fn = None self.BATCH_SIZE = 16 self.MAX_LEN = 160 self.NUM_SAMPLES_COUNT = args.num_samples n_classes = len(class_names) self.VOCAB_FILE_URL = args.vocab_file self.VOCAB_FILE = "bert_base_uncased_vocab.txt" self.drop = nn.Dropout(p=0.2) self.bert = BertModel.from_pretrained(self.PRE_TRAINED_MODEL_NAME) for param in self.bert.parameters(): param.requires_grad = False self.fc1 = nn.Linear(self.bert.config.hidden_size, 512) self.out = nn.Linear(512, n_classes) def forward(self, input_ids, attention_mask): """ :param input_ids: Input sentences from the batch :param attention_mask: Attention mask returned by the encoder :return: output - label for the input text """ pooled_output = self.bert(input_ids=input_ids, attention_mask=attention_mask).pooler_output output = F.relu(self.fc1(pooled_output)) output = self.drop(output) output = self.out(output) return output @staticmethod def process_label(rating): rating = int(rating) return rating - 1 def create_data_loader(self, df, tokenizer, max_len, batch_size): """ :param df: DataFrame input :param tokenizer: Bert tokenizer :param max_len: maximum length of the input sentence :param batch_size: Input batch size :return: output - Corresponding data loader for the given input """ ds = AGNewsDataset( reviews=df.description.to_numpy(), targets=df.label.to_numpy(), tokenizer=tokenizer, max_len=max_len, ) return DataLoader(ds, batch_size=batch_size, num_workers=4) def prepare_data(self): """ Creates train, valid and test dataloaders from the csv data """ td.AG_NEWS(root="data", split=("train", "test")) extracted_files = os.listdir("data/AG_NEWS") train_csv_path = None for fname in extracted_files: if fname.endswith("train.csv"): train_csv_path = os.path.join(os.getcwd(), "data/AG_NEWS", fname) self.df = pd.read_csv(train_csv_path) self.df.columns = ["label", "title", "description"] self.df.sample(frac=1) self.df = self.df.iloc[: self.NUM_SAMPLES_COUNT] self.df["label"] = self.df.label.apply(self.process_label) if not os.path.isfile(self.VOCAB_FILE): filePointer = requests.get(self.VOCAB_FILE_URL, allow_redirects=True) if filePointer.ok: with open(self.VOCAB_FILE, "wb") as f: f.write(filePointer.content) else: raise RuntimeError("Error in fetching the vocab file") self.tokenizer = BertTokenizer(self.VOCAB_FILE) RANDOM_SEED = 42 np.random.seed(RANDOM_SEED) torch.manual_seed(RANDOM_SEED) self.df_train, self.df_test = train_test_split( self.df, test_size=0.1, random_state=RANDOM_SEED, stratify=self.df["label"] ) self.df_val, self.df_test = train_test_split( self.df_test, test_size=0.5, random_state=RANDOM_SEED, stratify=self.df_test["label"] ) self.train_data_loader = self.create_data_loader( self.df_train, self.tokenizer, self.MAX_LEN, self.BATCH_SIZE ) self.val_data_loader = self.create_data_loader( self.df_val, self.tokenizer, self.MAX_LEN, self.BATCH_SIZE ) self.test_data_loader = self.create_data_loader( self.df_test, self.tokenizer, self.MAX_LEN, self.BATCH_SIZE ) def setOptimizer(self): """ Sets the optimizer and scheduler functions """ self.optimizer = AdamW(model.parameters(), lr=1e-3, correct_bias=False) self.total_steps = len(self.train_data_loader) * self.EPOCHS self.scheduler = get_linear_schedule_with_warmup( self.optimizer, num_warmup_steps=0, num_training_steps=self.total_steps ) self.loss_fn = nn.CrossEntropyLoss().to(self.device) def startTraining(self, model): """ Initialzes the Traning step with the model initialized :param model: Instance of the NewsClassifier class """ history = defaultdict(list) best_accuracy = 0 for epoch in range(self.EPOCHS): print(f"Epoch {epoch + 1}/{self.EPOCHS}") train_acc, train_loss = self.train_epoch(model) print(f"Train loss {train_loss} accuracy {train_acc}") val_acc, val_loss = self.eval_model(model, self.val_data_loader) print(f"Val loss {val_loss} accuracy {val_acc}") history["train_acc"].append(train_acc) history["train_loss"].append(train_loss) history["val_acc"].append(val_acc) history["val_loss"].append(val_loss) if val_acc > best_accuracy: torch.save(model.state_dict(), "best_model_state.bin") best_accuracy = val_acc def train_epoch(self, model): """ Training process happens and accuracy is returned as output :param model: Instance of the NewsClassifier class :result: output - Accuracy of the model after training """ model = model.train() losses = [] correct_predictions = 0 for data in tqdm(self.train_data_loader): input_ids = data["input_ids"].to(self.device) attention_mask = data["attention_mask"].to(self.device) targets = data["targets"].to(self.device) outputs = model(input_ids=input_ids, attention_mask=attention_mask) _, preds = torch.max(outputs, dim=1) loss = self.loss_fn(outputs, targets) correct_predictions += torch.sum(preds == targets) losses.append(loss.item()) loss.backward() nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0) self.optimizer.step() self.scheduler.step() self.optimizer.zero_grad() return ( correct_predictions.double() / len(self.train_data_loader) / self.BATCH_SIZE, np.mean(losses), ) def eval_model(self, model, data_loader): """ Validation process happens and validation / test accuracy is returned as output :param model: Instance of the NewsClassifier class :param data_loader: Data loader for either test / validation dataset :result: output - Accuracy of the model after testing """ model = model.eval() losses = [] correct_predictions = 0 with torch.no_grad(): for d in data_loader: input_ids = d["input_ids"].to(self.device) attention_mask = d["attention_mask"].to(self.device) targets = d["targets"].to(self.device) outputs = model(input_ids=input_ids, attention_mask=attention_mask) _, preds = torch.max(outputs, dim=1) loss = self.loss_fn(outputs, targets) correct_predictions += torch.sum(preds == targets) losses.append(loss.item()) return correct_predictions.double() / len(data_loader) / self.BATCH_SIZE, np.mean(losses) def get_predictions(self, model, data_loader): """ Prediction after the training step is over :param model: Instance of the NewsClassifier class :param data_loader: Data loader for either test / validation dataset :result: output - Returns prediction results, prediction probablities and corresponding values """ model = model.eval() review_texts = [] predictions = [] prediction_probs = [] real_values = [] with torch.no_grad(): for d in data_loader: texts = d["review_text"] input_ids = d["input_ids"].to(self.device) attention_mask = d["attention_mask"].to(self.device) targets = d["targets"].to(self.device) outputs = model(input_ids=input_ids, attention_mask=attention_mask) _, preds = torch.max(outputs, dim=1) probs = F.softmax(outputs, dim=1) review_texts.extend(texts) predictions.extend(preds) prediction_probs.extend(probs) real_values.extend(targets) predictions = torch.stack(predictions).cpu() prediction_probs = torch.stack(prediction_probs).cpu() real_values = torch.stack(real_values).cpu() return review_texts, predictions, prediction_probs, real_values if __name__ == "__main__": parser = argparse.ArgumentParser(description="PyTorch BERT Example") parser.add_argument( "--max_epochs", type=int, default=5, metavar="N", help="number of epochs to train (default: 14)", ) parser.add_argument( "--num_samples", type=int, default=15000, metavar="N", help="Number of samples to be used for training " "and evaluation steps (default: 15000) Maximum:100000", ) parser.add_argument( "--vocab_file", default="https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", help="Custom vocab file", ) parser.add_argument( "--model_save_path", type=str, default="models", help="Path to save mlflow model" ) args = parser.parse_args() mlflow.start_run() model = NewsClassifier(args) model = model.to(model.device) model.prepare_data() model.setOptimizer() model.startTraining(model) print("TRAINING COMPLETED!!!") test_acc, _ = model.eval_model(model, model.test_data_loader) print(test_acc.item()) y_review_texts, y_pred, y_pred_probs, y_test = model.get_predictions( model, model.test_data_loader ) print("\n\n\n SAVING MODEL") if os.path.exists(args.model_save_path): shutil.rmtree(args.model_save_path) mlflow.pytorch.save_model( model, path=args.model_save_path, requirements_file="requirements.txt", extra_files=["class_mapping.json", "bert_base_uncased_vocab.txt"], ) mlflow.end_run()
32.289673
99
0.620095
1,548
12,819
4.917959
0.209302
0.034152
0.01839
0.012479
0.283068
0.23959
0.222777
0.183765
0.159333
0.121765
0
0.007908
0.279897
12,819
396
100
32.371212
0.816813
0.109993
0
0.135338
0
0.003759
0.088915
0.004875
0
0
0
0
0
1
0.048872
false
0
0.06015
0.003759
0.146617
0.022556
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6bdbc0070f7b68477be910727f103f97f2f7c5f9
23
py
Python
nerddiary/__init__.py
mishamsk/nerddiary
2d0981c5034460f353c2994347fb95a5c94a55bd
[ "Apache-2.0" ]
null
null
null
nerddiary/__init__.py
mishamsk/nerddiary
2d0981c5034460f353c2994347fb95a5c94a55bd
[ "Apache-2.0" ]
5
2022-02-20T06:10:28.000Z
2022-03-28T03:22:41.000Z
nerddiary/__init__.py
mishamsk/nerddiary
2d0981c5034460f353c2994347fb95a5c94a55bd
[ "Apache-2.0" ]
null
null
null
__version__ = "0.2.2b"
11.5
22
0.652174
4
23
2.75
1
0
0
0
0
0
0
0
0
0
0
0.15
0.130435
23
1
23
23
0.4
0
0
0
0
0
0.26087
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
6bdcdf872b440de0263c9a18d6e09d85b322ab4c
272
py
Python
src/lib/fibonacci.py
lastnamevega/fastapi-demo
c2911c3e4d764c27e5ccf540ddc2d3c9bbc624a6
[ "MIT" ]
null
null
null
src/lib/fibonacci.py
lastnamevega/fastapi-demo
c2911c3e4d764c27e5ccf540ddc2d3c9bbc624a6
[ "MIT" ]
null
null
null
src/lib/fibonacci.py
lastnamevega/fastapi-demo
c2911c3e4d764c27e5ccf540ddc2d3c9bbc624a6
[ "MIT" ]
null
null
null
def fibonacci(desired_index: int): if desired_index < 0: raise ValueError(f'{desired_index} is less than 0') values = [0, 1] for i in range(2, desired_index + 1): values.append(values[i - 1] + values[i - 2]) return values[desired_index]
24.727273
59
0.628676
41
272
4.04878
0.536585
0.361446
0
0
0
0
0
0
0
0
0
0.039216
0.25
272
10
60
27.2
0.77451
0
0
0
0
0
0.110294
0
0
0
0
0
0
1
0.142857
false
0
0
0
0.285714
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
6bde8a8095cceac04979671e29124bd410698f7c
3,716
py
Python
src/toil/lib/aws/utils.py
rupertnash/toil
fd805d5fa14cca98f2bc64b322a4b546e163d6c9
[ "Apache-2.0" ]
6
2018-05-27T05:09:11.000Z
2020-07-01T17:02:40.000Z
src/toil/lib/aws/utils.py
rupertnash/toil
fd805d5fa14cca98f2bc64b322a4b546e163d6c9
[ "Apache-2.0" ]
1
2020-07-01T18:31:30.000Z
2020-07-08T14:03:39.000Z
src/toil/lib/aws/utils.py
rupertnash/toil
fd805d5fa14cca98f2bc64b322a4b546e163d6c9
[ "Apache-2.0" ]
1
2020-04-06T15:04:44.000Z
2020-04-06T15:04:44.000Z
# Copyright (C) 2015-2021 Regents of the University of California # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from typing import Optional from toil.lib.misc import printq from toil.lib.retry import retry from toil.lib import aws try: from boto.exception import BotoServerError except ImportError: BotoServerError = None # AWS/boto extra is not installed logger = logging.getLogger(__name__) @retry(errors=[BotoServerError]) def delete_iam_role(role_name: str, region: Optional[str] = None, quiet: bool = True): from boto.iam.connection import IAMConnection iam_client = aws.client('iam', region_name=region) iam_resource = aws.resource('iam', region_name=region) boto_iam_connection = IAMConnection() role = iam_resource.Role(role_name) # normal policies for attached_policy in role.attached_policies.all(): printq(f'Now dissociating policy: {attached_policy.name} from role {role.name}', quiet) role.detach_policy(PolicyName=attached_policy.name) # inline policies for attached_policy in role.policies.all(): printq(f'Deleting inline policy: {attached_policy.name} from role {role.name}', quiet) # couldn't find an easy way to remove inline policies with boto3; use boto boto_iam_connection.delete_role_policy(role.name, attached_policy.name) iam_client.delete_role(RoleName=role_name) printq(f'Role {role_name} successfully deleted.', quiet) @retry(errors=[BotoServerError]) def delete_iam_instance_profile(instance_profile_name: str, region: Optional[str] = None, quiet: bool = True): iam_resource = aws.resource('iam', region_name=region) instance_profile = iam_resource.InstanceProfile(instance_profile_name) for role in instance_profile.roles: printq(f'Now dissociating role: {role.name} from instance profile {instance_profile_name}', quiet) instance_profile.remove_role(RoleName=role.name) instance_profile.delete() printq(f'Instance profile "{instance_profile_name}" successfully deleted.', quiet) @retry(errors=[BotoServerError]) def delete_sdb_domain(sdb_domain_name: str, region: Optional[str] = None, quiet: bool = True): sdb_client = aws.client('sdb', region_name=region) sdb_client.delete_domain(DomainName=sdb_domain_name) printq(f'SBD Domain: "{sdb_domain_name}" successfully deleted.', quiet) @retry(errors=[BotoServerError]) def delete_s3_bucket(bucket: str, region: Optional[str], quiet: bool = True): printq(f'Deleting s3 bucket in region "{region}": {bucket}', quiet) s3_client = aws.client('s3', region_name=region) s3_resource = aws.resource('s3', region_name=region) paginator = s3_client.get_paginator('list_object_versions') for response in paginator.paginate(Bucket=bucket): versions = response.get('Versions', []) + response.get('DeleteMarkers', []) for version in versions: printq(f" Deleting {version['Key']} version {version['VersionId']}", quiet) s3_client.delete_object(Bucket=bucket, Key=version['Key'], VersionId=version['VersionId']) s3_resource.Bucket(bucket).delete() printq(f'\n * Deleted s3 bucket successfully: {bucket}\n\n', quiet)
45.876543
110
0.741389
503
3,716
5.328032
0.28827
0.061567
0.026866
0.043284
0.254104
0.21903
0.180597
0.180597
0.15
0
0
0.007318
0.154198
3,716
80
111
46.45
0.845371
0.194295
0
0.115385
0
0
0.201277
0.038306
0
0
0
0
0
1
0.076923
false
0
0.153846
0
0.230769
0.192308
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6bdeffb0d14dc1d4dca20f695831236be20df06b
3,389
py
Python
src/data/processed_data.py
Victoradukwu/titanic
18a4e8fe7dbe755a946512ca71b1d2a2f5932c64
[ "MIT" ]
null
null
null
src/data/processed_data.py
Victoradukwu/titanic
18a4e8fe7dbe755a946512ca71b1d2a2f5932c64
[ "MIT" ]
null
null
null
src/data/processed_data.py
Victoradukwu/titanic
18a4e8fe7dbe755a946512ca71b1d2a2f5932c64
[ "MIT" ]
null
null
null
import os import numpy as np import pandas as pd def read_data(): raw_data_path = os.path.join(os.path.pardir, 'data', 'raw') train_file_path = os.path.join(raw_data_path, 'train.csv') test_file_path = os.path.join(raw_data_path, 'test.csv') train_df = pd.read_csv(train_file_path, index_col = 'PassengerId') test_df = pd.read_csv(test_file_path, index_col = 'PassengerId') test_df['Survived'] = -100 df = pd.concat([train_df, test_df], sort=-False, axis=0) return df def process_data(df): return(df .assign(Title = lambda x: x.Name.map(get_title)) .pipe(fill_missing_values) .assign(Fare_Bin = lambda x: pd.qcut(x.Fare, 4, labels=['very_low', 'low', 'high', 'very_high'])) .assign(AgeState = lambda x: np.where(x.Age >= 18, 'Adult', 'Child')) .assign(FamilySize = lambda x: x.Parch + x.SibSp + 1) .assign(IsMother = lambda x: np.where(((x.Age > 18) & (x.Parch > 0) & (x.Title != 'Miss') & (x.Sex == 'female')), 1,0)) .assign(Cabin = lambda x: np.where(x.Cabin == 'T', np.nan, x.Cabin)) .assign(Deck = lambda x: x.Cabin.map(get_deck)) .assign(IsMale = lambda x: np.where(x.Sex == 'male', 1, 0)) .pipe(pd.get_dummies, columns=['Deck', 'Pclass', 'Title', 'Fare_Bin', 'Embarked', 'AgeState']) .drop(['Cabin', 'Name', 'Ticket', 'Parch', 'SibSp', 'Sex'], axis=1) .pipe(reorder_columns) ) # modify the function to reduce number of titles and return more meaningful functions def get_title(name): title_map = { 'mr': 'Mr', 'mrs': 'Mrs', 'mme': 'Mrs', 'ms': 'Mrs', 'miss': 'Miss', 'mlle': 'Miss', 'master': 'Master', 'don': 'Sir', 'rev': 'Sir', 'sir': 'Sir', 'jonkheer': 'Sir', 'dr': 'Officer', 'major': 'Officer', 'capt': 'Office', 'col': 'Officer', 'lady': 'Lady', 'the countess': 'Lady', 'dona': 'Lady' } first_name_with_title = name.split(',')[1] raw_title = first_name_with_title.split('.')[0] title = raw_title.strip().lower() return title_map[title] def get_deck(cabin): return np.where(pd.notnull(cabin), str(cabin)[0].upper(), 'Z') def fill_missing_values(df): #Embarked df.Embarked.fillna('C', inplace=True) # Fare median_fare = df[(df.Pclass == 3) & (df.Embarked == 'S')]['Fare'].median() df.Fare.fillna(median_fare, inplace=True) #Age title_age_median = df.groupby('Title').Age.transform('median') df.Age.fillna(title_age_median, inplace=True) return df def reorder_columns(df): columns = [column for column in df.columns if column != 'Survived'] columns = ['Survived'] + columns df = df[columns] return df def write_data(df): processed_data_path = os.path.join(os.path.pardir, 'data', 'processed') write_train_path = os.path.join(processed_data_path, 'train.csv') write_test_path = os.path.join(processed_data_path, 'test.csv') df.loc[df.Survived != -100].to_csv(write_train_path) columns = [column for column in df.columns if column != 'Survived'] df.loc[df.Survived == -100][columns].to_csv(write_test_path) if __name__ == '__main__': df = read_data() df = process_data(df) write_data(df)
33.554455
130
0.59044
467
3,389
4.104925
0.284797
0.025039
0.031299
0.043818
0.238915
0.204486
0.204486
0.116849
0.086594
0.051122
0
0.010054
0.236943
3,389
100
131
33.89
0.731245
0.029212
0
0.064935
0
0
0.122679
0
0
0
0
0
0
1
0.090909
false
0.025974
0.038961
0.025974
0.194805
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6be02c0cdca45be2d0933fe3cbd070df05fee26e
59,336
py
Python
clustergrammer/upload_pages/clustergrammer_old.py
delosrogers/clustergrammer-web
14102cfca328214d3bc8285e8331663fe0e5fad4
[ "MIT" ]
5
2018-04-04T16:25:06.000Z
2021-04-10T23:47:20.000Z
clustergrammer/upload_pages/clustergrammer_old.py
delosrogers/clustergrammer-web
14102cfca328214d3bc8285e8331663fe0e5fad4
[ "MIT" ]
8
2016-07-16T02:55:12.000Z
2022-02-02T16:42:17.000Z
clustergrammer/upload_pages/clustergrammer_old.py
delosrogers/clustergrammer-web
14102cfca328214d3bc8285e8331663fe0e5fad4
[ "MIT" ]
4
2019-05-28T08:52:41.000Z
2021-01-11T22:14:48.000Z
# define a class for networks class Network(object): ''' Networks have two states: the data state where they are stored as: matrix and nodes and a viz state where they are stored as: viz.links, viz.row_nodes, viz. col_nodes. The goal is to start in a data-state and produce a viz-state of the network that will be used as input to clustergram.js. ''' def __init__(self): # network: data-state self.dat = {} self.dat['nodes'] = {} self.dat['nodes']['row'] = [] self.dat['nodes']['col'] = [] # node_info holds the orderings (ini, clust, rank), classification ('cl'), # and other general information self.dat['node_info'] = {} for inst_rc in self.dat['nodes']: self.dat['node_info'][inst_rc] = {} self.dat['node_info'][inst_rc]['ini'] = [] self.dat['node_info'][inst_rc]['clust'] = [] self.dat['node_info'][inst_rc]['rank'] = [] self.dat['node_info'][inst_rc]['info'] = [] # classification is specifically used to color the class triangles self.dat['node_info'][inst_rc]['cl'] = [] self.dat['node_info'][inst_rc]['value'] = [] # initialize matrix self.dat['mat'] = [] # mat_info is an optional dictionary # so I'm not including it by default # network: viz-state self.viz = {} self.viz['row_nodes'] = [] self.viz['col_nodes'] = [] self.viz['links'] = [] def load_tsv_to_net(self, filename): f = open(filename,'r') lines = f.readlines() f.close() self.load_lines_from_tsv_to_net(lines) def pandas_load_tsv_to_net(self, file_buffer): ''' A user can add category information to the columns ''' import pandas as pd # get lines and check for category and value info lines = file_buffer.getvalue().split('\n') # check for category info in headers cat_line = lines[1].split('\t') add_cat = False if cat_line[0] == '': add_cat = True tmp_df = {} if add_cat: # read in names and categories tmp_df['mat'] = pd.read_table(file_buffer, index_col=0, header=[0,1]) else: # read in names only tmp_df['mat'] = pd.read_table(file_buffer, index_col=0, header=0) # save to self self.df_to_dat(tmp_df) # add categories if necessary if add_cat: cat_line = [i.strip() for i in cat_line] self.dat['node_info']['col']['cl'] = cat_line[1:] # make a dict of columns in categories ########################################## col_in_cat = {} for i in range(len(self.dat['node_info']['col']['cl'])): inst_cat = self.dat['node_info']['col']['cl'][i] inst_col = self.dat['nodes']['col'][i] if inst_cat not in col_in_cat: col_in_cat[inst_cat] = [] # collect col names for categories col_in_cat[inst_cat].append(inst_col) # save to node_info self.dat['node_info']['col_in_cat'] = col_in_cat def load_lines_from_tsv_to_net(self, lines): import numpy as np # get row/col labels and data from lines for i in range(len(lines)): # get inst_line inst_line = lines[i].rstrip().split('\t') # strip each element inst_line = [z.strip() for z in inst_line] # get column labels from first row if i == 0: tmp_col_labels = inst_line # add the labels for inst_elem in range(len(tmp_col_labels)): # skip the first element if inst_elem > 0: # get the column label inst_col_label = tmp_col_labels[inst_elem] # add to network data self.dat['nodes']['col'].append(inst_col_label) # get row info if i > 0: # save row labels self.dat['nodes']['row'].append(inst_line[0]) # get data - still strings inst_data_row = inst_line[1:] # convert to float inst_data_row = [float(tmp_dat) for tmp_dat in inst_data_row] # save the row data as an array inst_data_row = np.asarray(inst_data_row) # initailize matrix if i == 1: self.dat['mat'] = inst_data_row # add rows to matrix if i > 1: self.dat['mat'] = np.vstack( ( self.dat['mat'], inst_data_row ) ) def load_l1000cds2(self, l1000cds2): import scipy import numpy as np # process gene set result if 'upGenes' in l1000cds2['input']['data']: # add the names from all the results all_results = l1000cds2['result'] # grab col nodes - input sig and drugs self.dat['nodes']['col'] = [] for i in range(len(all_results)): inst_result = all_results[i] self.dat['nodes']['col'].append(inst_result['name']+'#'+str(i)) self.dat['node_info']['col']['value'].append(inst_result['score']) for type_overlap in inst_result['overlap']: self.dat['nodes']['row'].extend( inst_result['overlap'][type_overlap] ) self.dat['nodes']['row'] = sorted(list(set(self.dat['nodes']['row']))) # initialize the matrix self.dat['mat'] = scipy.zeros([ len(self.dat['nodes']['row']), len(self.dat['nodes']['col']) ]) # fill in the matrix with l10000 data ######################################## # fill in gene sigature as first column for i in range(len(self.dat['nodes']['row'])): inst_gene = self.dat['nodes']['row'][i] # get gene index inst_gene_index = self.dat['nodes']['row'].index(inst_gene) # if gene is in up add 1 otherwise add -1 if inst_gene in l1000cds2['input']['data']['upGenes']: self.dat['node_info']['row']['value'].append(1) else: self.dat['node_info']['row']['value'].append(-1) # save the name as a class for i in range(len(self.dat['nodes']['col'])): self.dat['node_info']['col']['cl'].append(self.dat['nodes']['col'][i]) # swap keys for aggravate and reverse if l1000cds2['input']['aggravate'] == False: # reverse gene set up_type = 'up/dn' dn_type = 'dn/up' else: # mimic gene set up_type = 'up/up' dn_type = 'dn/dn' # loop through drug results for inst_result_index in range(len(all_results)): inst_result = all_results[inst_result_index] # for non-mimic if up/dn then it should be negative since the drug is dn # for mimic if up/up then it should be positive since the drug is up for inst_dn in inst_result['overlap'][up_type]: # get gene index inst_gene_index = self.dat['nodes']['row'].index(inst_dn) # save -1 to gene row and drug column if up_type == 'up/dn': self.dat['mat'][ inst_gene_index, inst_result_index ] = -1 else: self.dat['mat'][ inst_gene_index, inst_result_index ] = 1 # for non-mimic if dn/up then it should be positive since the drug is up # for mimic if dn/dn then it should be negative since the drug is dn for inst_up in inst_result['overlap'][dn_type]: # get gene index inst_gene_index = self.dat['nodes']['row'].index(inst_up) # save 1 to gene row and drug column if dn_type == 'dn/up': self.dat['mat'][ inst_gene_index, inst_result_index ] = 1 else: self.dat['mat'][ inst_gene_index, inst_result_index ] = -1 # process a characteristic direction vector result else: all_results = l1000cds2['result'] # get gene names self.dat['nodes']['row'] = l1000cds2['input']['data']['up']['genes'] + l1000cds2['input']['data']['dn']['genes'] # save gene expression values tmp_exp_vect = l1000cds2['input']['data']['up']['vals'] + l1000cds2['input']['data']['dn']['vals'] for i in range(len(self.dat['nodes']['row'])): self.dat['node_info']['row']['value'].append(tmp_exp_vect[i]) # gather result names for i in range(len(all_results)): inst_result = all_results[i] # add result to list self.dat['nodes']['col'].append(inst_result['name']+'#'+str(i)) self.dat['node_info']['col']['cl'].append(inst_result['name']) # reverse signature, score [1,2] if l1000cds2['input']['aggravate'] == False: self.dat['node_info']['col']['value'].append( inst_result['score']-1 ) else: self.dat['node_info']['col']['value'].append( 1 - inst_result['score'] ) # concat up and down lists inst_vect = inst_result['overlap']['up'] + inst_result['overlap']['dn'] inst_vect = np.transpose(np.asarray(inst_vect)) inst_vect = inst_vect.reshape(-1,1) # initialize or add to matrix if type(self.dat['mat']) is list: self.dat['mat'] = inst_vect else: self.dat['mat'] = np.hstack(( self.dat['mat'], inst_vect)) def load_vect_post_to_net(self, vect_post): import numpy as np # get all signatures (a.k.a. columns) sigs = vect_post['columns'] # get all rows from signatures all_rows = [] all_sigs = [] for inst_sig in sigs: # gather sig names all_sigs.append(inst_sig['col_name']) # get column col_data = inst_sig['data'] # gather row names for inst_row_data in col_data: # get gene name all_rows.append( inst_row_data['row_name'] ) # get unique sorted list of genes all_rows = sorted(list(set(all_rows))) all_sigs = sorted(list(set(all_sigs))) print( 'found ' + str(len(all_rows)) + ' rows' ) print( 'found ' + str(len(all_sigs)) + ' columns\n' ) # save genes and sigs to nodes self.dat['nodes']['row'] = all_rows self.dat['nodes']['col'] = all_sigs # initialize numpy matrix of nans self.dat['mat'] = np.empty((len(all_rows),len(all_sigs))) self.dat['mat'][:] = np.nan is_up_down = False if 'is_up_down' in vect_post: if vect_post['is_up_down'] == True: is_up_down = True if is_up_down == True: self.dat['mat_up'] = np.empty((len(all_rows),len(all_sigs))) self.dat['mat_up'][:] = np.nan self.dat['mat_dn'] = np.empty((len(all_rows),len(all_sigs))) self.dat['mat_dn'][:] = np.nan # loop through all signatures and rows # and place information into self.dat for inst_sig in sigs: # get sig name inst_sig_name = inst_sig['col_name'] # get row data col_data = inst_sig['data'] # loop through column for inst_row_data in col_data: # add row data to signature matrix inst_row = inst_row_data['row_name'] inst_value = inst_row_data['val'] # find index of row and sig in matrix row_index = all_rows.index(inst_row) col_index = all_sigs.index(inst_sig_name) # save inst_value to matrix self.dat['mat'][row_index, col_index] = inst_value if is_up_down == True: self.dat['mat_up'][row_index, col_index] = inst_row_data['val_up'] self.dat['mat_dn'][row_index, col_index] = inst_row_data['val_dn'] def load_data_file_to_net(self, filename): # load json from file to new dictionary inst_dat = self.load_json_to_dict(filename) # convert dat['mat'] to numpy array and add to network self.load_data_to_net(inst_dat) def load_data_to_net(self, inst_net): ''' load data into nodes and mat, also convert mat to numpy array''' self.dat['nodes'] = inst_net['nodes'] self.dat['mat'] = inst_net['mat'] # convert to numpy array self.mat_to_numpy_arr() def export_net_json(self, net_type, indent='no-indent'): ''' export json string of dat ''' import json from copy import deepcopy if net_type == 'dat': exp_dict = deepcopy(self.dat) # convert numpy array to list if type(exp_dict['mat']) is not list: exp_dict['mat'] = exp_dict['mat'].tolist() elif net_type == 'viz': exp_dict = self.viz # make json if indent == 'indent': exp_json = json.dumps(exp_dict, indent=2) else: exp_json = json.dumps(exp_dict) return exp_json def write_json_to_file(self, net_type, filename, indent='no-indent'): import json # get dat or viz representation as json string if net_type == 'dat': exp_json = self.export_net_json('dat', indent) elif net_type == 'viz': exp_json = self.export_net_json('viz', indent) # save to file fw = open(filename, 'w') fw.write( exp_json ) fw.close() def set_node_names(self, row_name, col_name): '''give names to the rows and columns''' self.dat['node_names'] = {} self.dat['node_names']['row'] = row_name self.dat['node_names']['col'] = col_name def mat_to_numpy_arr(self): ''' convert list to numpy array - numpy arrays can not be saved as json ''' import numpy as np self.dat['mat'] = np.asarray( self.dat['mat'] ) def swap_nan_for_zero(self): import numpy as np self.dat['mat'][ np.isnan( self.dat['mat'] ) ] = 0 def filter_row_thresh( self, row_filt_int, filter_type='value' ): ''' Remove rows from matrix that do not meet some threshold value: The default filtering is value, in that at least one value in the row has to be higher than some threshold. num: Rows can be filtered by the number of non-zero values it has. sum: Rows can be filtered by the sum of the values ''' import scipy import numpy as np # max vlue in matrix mat = self.dat['mat'] max_mat = abs(max(mat.min(), mat.max(), key=abs)) # maximum number of measurements max_num = len(self.dat['nodes']['col']) mat_abs = abs(mat) sum_row = np.sum(mat_abs, axis=1) max_sum = max(sum_row) # transfer the nodes nodes = {} nodes['row'] = [] nodes['col'] = self.dat['nodes']['col'] # transfer the 'info' part of node_info if necessary node_info = {} node_info['row'] = [] node_info['col'] = self.dat['node_info']['col']['info'] # filter rows ################################# for i in range(len(self.dat['nodes']['row'])): # get row name inst_nodes_row = self.dat['nodes']['row'][i] # get node info - disregard ini, clust, and rank orders if len(self.dat['node_info']['row']['info']) > 0: inst_node_info = self.dat['node_info']['row']['info'][i] # get absolute value of row data row_vect = np.absolute(self.dat['mat'][i,:]) # value: is there at least one value over cutoff ################################################## if filter_type == 'value': # calc cutoff cutoff = row_filt_int * max_mat # count the number of values above some thresh found_tuple = np.where(row_vect >= cutoff) if len(found_tuple[0])>=1: # add name nodes['row'].append(inst_nodes_row) # add info if necessary if len(self.dat['node_info']['row']['info']) > 0: node_info['row'].append(inst_node_info) elif filter_type == 'num': num_nonzero = np.count_nonzero(row_vect) # use integer number of non-zero measurements cutoff = row_filt_int * 10 if num_nonzero>= cutoff: # add name nodes['row'].append(inst_nodes_row) # add info if necessary if len(self.dat['node_info']['row']['info']) > 0: node_info['row'].append(inst_node_info) elif filter_type == 'sum': inst_row_sum = sum(abs(row_vect)) if inst_row_sum > row_filt_int*max_sum: # add name nodes['row'].append(inst_nodes_row) # add info if necessary if len(self.dat['node_info']['row']['info']) > 0: node_info['row'].append(inst_node_info) # cherrypick data from self.dat['mat'] ################################## # filtered matrix filt_mat = scipy.zeros([ len(nodes['row']), len(nodes['col']) ]) if 'mat_up' in self.dat: filt_mat_up = scipy.zeros([ len(nodes['row']), len(nodes['col']) ]) filt_mat_dn = scipy.zeros([ len(nodes['row']), len(nodes['col']) ]) if 'mat_info' in self.dat: # initialize filtered mat_info dictionary with tuple keys filt_mat_info = {} # loop through the rows for i in range(len(nodes['row'])): inst_row = nodes['row'][i] # loop through the cols for j in range(len(nodes['col'])): inst_col = nodes['col'][j] # get row and col index pick_row = self.dat['nodes']['row'].index(inst_row) pick_col = self.dat['nodes']['col'].index(inst_col) # cherrypick ############### filt_mat[i,j] = self.dat['mat'][pick_row, pick_col] if 'mat_up' in self.dat: filt_mat_up[i,j] = self.dat['mat_up'][pick_row, pick_col] filt_mat_dn[i,j] = self.dat['mat_dn'][pick_row, pick_col] if 'mat_info' in self.dat: filt_mat_info[str((i,j))] = self.dat['mat_info'][str((pick_row,pick_col))] # save nodes array - list of node names self.dat['nodes'] = nodes # save node_info array - list of node infos self.dat['node_info']['row']['info'] = node_info['row'] self.dat['node_info']['col']['info'] = node_info['col'] # overwrite with new filtered data self.dat['mat'] = filt_mat # overwrite with up/dn data if necessary if 'mat_up' in self.dat: self.dat['mat_up'] = filt_mat_up self.dat['mat_dn'] = filt_mat_dn # overwrite mat_info if necessary if 'mat_info' in self.dat: self.dat['mat_info'] = filt_mat_info print( 'final mat shape' + str(self.dat['mat'].shape ) + '\n') def filter_col_thresh( self, cutoff, min_num_meet ): ''' remove rows and columns from matrix that do not have at least min_num_meet instances of a value with an absolute value above cutoff ''' import scipy import numpy as np # transfer the nodes nodes = {} nodes['row'] = self.dat['nodes']['row'] nodes['col'] = [] # transfer the 'info' part of node_info if necessary node_info = {} node_info['row'] = self.dat['node_info']['row']['info'] node_info['col'] = [] # add cols with non-zero values ################################# for i in range(len(self.dat['nodes']['col'])): # get col name inst_nodes_col = self.dat['nodes']['col'][i] # get node info - disregard ini, clust, and rank orders if len(self.dat['node_info']['col']['info']) > 0: inst_node_info = self.dat['node_info']['col']['info'][i] # get col vect col_vect = np.absolute(self.dat['mat'][:,i]) # check if there are nonzero values found_tuple = np.where(col_vect >= cutoff) if len(found_tuple[0])>=min_num_meet: # add name nodes['col'].append(inst_nodes_col) # add info if necessary if len(self.dat['node_info']['col']['info']) > 0: node_info['col'].append(inst_node_info) # cherrypick data from self.dat['mat'] ################################## # filtered matrix filt_mat = scipy.zeros([ len(nodes['row']), len(nodes['col']) ]) if 'mat_up' in self.dat: filt_mat_up = scipy.zeros([ len(nodes['row']), len(nodes['col']) ]) filt_mat_dn = scipy.zeros([ len(nodes['row']), len(nodes['col']) ]) if 'mat_info' in self.dat: # initialize filtered mat_info dictionary with tuple keys filt_mat_info = {} # loop through the rows for i in range(len(nodes['row'])): inst_row = nodes['row'][i] # loop through the cols for j in range(len(nodes['col'])): inst_col = nodes['col'][j] # get row and col index pick_row = self.dat['nodes']['row'].index(inst_row) pick_col = self.dat['nodes']['col'].index(inst_col) # cherrypick ############### filt_mat[i,j] = self.dat['mat'][pick_row, pick_col] if 'mat_up' in self.dat: filt_mat_up[i,j] = self.dat['mat_up'][pick_row, pick_col] filt_mat_dn[i,j] = self.dat['mat_dn'][pick_row, pick_col] if 'mat_info' in self.dat: filt_mat_info[str((i,j))] = self.dat['mat_info'][str((pick_row,pick_col))] # save nodes array - list of node names self.dat['nodes'] = nodes # save node_info array - list of node infos self.dat['node_info']['row']['info'] = node_info['row'] self.dat['node_info']['col']['info'] = node_info['col'] # overwrite with new filtered data self.dat['mat'] = filt_mat # overwrite with up/dn data if necessary if 'mat_up' in self.dat: self.dat['mat_up'] = filt_mat_up self.dat['mat_dn'] = filt_mat_dn # overwrite mat_info if necessary if 'mat_info' in self.dat: self.dat['mat_info'] = filt_mat_info print( 'final mat shape' + str(self.dat['mat'].shape ) + '\n') def filter_network_thresh( self, cutoff, min_num_meet ): ''' remove rows and columns from matrix that do not have at least min_num_meet instances of a value with an absolute value above cutoff ''' import scipy import numpy as np # transfer the nodes nodes = {} nodes['row'] = [] nodes['col'] = [] # transfer the 'info' part of node_info if necessary node_info = {} node_info['row'] = [] node_info['col'] = [] # add rows with non-zero values ################################# for i in range(len(self.dat['nodes']['row'])): # get row name inst_nodes_row = self.dat['nodes']['row'][i] # get node info - disregard ini, clust, and rank orders if len(self.dat['node_info']['row']['info']) > 0: inst_node_info = self.dat['node_info']['row']['info'][i] # get row vect row_vect = np.absolute(self.dat['mat'][i,:]) # check if there are nonzero values found_tuple = np.where(row_vect >= cutoff) if len(found_tuple[0])>=min_num_meet: # add name nodes['row'].append(inst_nodes_row) # add info if necessary if len(self.dat['node_info']['row']['info']) > 0: node_info['row'].append(inst_node_info) # add cols with non-zero values ################################# for i in range(len(self.dat['nodes']['col'])): # get col name inst_nodes_col = self.dat['nodes']['col'][i] # get node info - disregard ini, clust, and rank orders if len(self.dat['node_info']['col']['info']) > 0: inst_node_info = self.dat['node_info']['col']['info'][i] # get col vect col_vect = np.absolute(self.dat['mat'][:,i]) # check if there are nonzero values found_tuple = np.where(col_vect >= cutoff) if len(found_tuple[0])>=min_num_meet: # add name nodes['col'].append(inst_nodes_col) # add info if necessary if len(self.dat['node_info']['col']['info']) > 0: node_info['col'].append(inst_node_info) # cherrypick data from self.dat['mat'] ################################## # filtered matrix filt_mat = scipy.zeros([ len(nodes['row']), len(nodes['col']) ]) if 'mat_up' in self.dat: filt_mat_up = scipy.zeros([ len(nodes['row']), len(nodes['col']) ]) filt_mat_dn = scipy.zeros([ len(nodes['row']), len(nodes['col']) ]) if 'mat_info' in self.dat: # initialize filtered mat_info dictionary with tuple keys filt_mat_info = {} # loop through the rows for i in range(len(nodes['row'])): inst_row = nodes['row'][i] # loop through the cols for j in range(len(nodes['col'])): inst_col = nodes['col'][j] # get row and col index pick_row = self.dat['nodes']['row'].index(inst_row) pick_col = self.dat['nodes']['col'].index(inst_col) # cherrypick ############### filt_mat[i,j] = self.dat['mat'][pick_row, pick_col] if 'mat_up' in self.dat: filt_mat_up[i,j] = self.dat['mat_up'][pick_row, pick_col] filt_mat_dn[i,j] = self.dat['mat_dn'][pick_row, pick_col] if 'mat_info' in self.dat: filt_mat_info[str((i,j))] = self.dat['mat_info'][str((pick_row,pick_col))] # save nodes array - list of node names self.dat['nodes'] = nodes # save node_info array - list of node infos self.dat['node_info']['row']['info'] = node_info['row'] self.dat['node_info']['col']['info'] = node_info['col'] # overwrite with new filtered data self.dat['mat'] = filt_mat # overwrite with up/dn data if necessary if 'mat_up' in self.dat: self.dat['mat_up'] = filt_mat_up self.dat['mat_dn'] = filt_mat_dn # overwrite mat_info if necessary if 'mat_info' in self.dat: self.dat['mat_info'] = filt_mat_info print( 'final mat shape' + str(self.dat['mat'].shape ) + '\n') def keep_max_num_links(self, keep_num_links): print('\trun keep_max_num_links') max_mat_value = abs(self.dat['mat']).max() # check the total number of links inst_thresh = 0 inst_pct_max = 0 inst_num_links = (abs(self.dat['mat'])>inst_thresh).sum() print('initially there are '+str(inst_num_links)+' links ') print('there are initially '+str(inst_num_links)+'\n') thresh_fraction = 100 while (inst_num_links > keep_num_links): # increase the threshold as a pct of max value in mat inst_pct_max = inst_pct_max + 1 # increase threshold inst_thresh = max_mat_value*(float(inst_pct_max)/thresh_fraction) # check the number of links above the curr threshold inst_num_links = (abs(self.dat['mat'])>inst_thresh).sum() print('there are '+str(inst_num_links)+ ' links at threshold '+str(inst_pct_max)+'pct and value of ' +str(inst_thresh)+'\n') # if there are no links then increas thresh back up if inst_num_links == 0: inst_pct_max = inst_pct_max - 1 inst_thresh = max_mat_value*(float(inst_pct_max)/thresh_fraction) print('final number of links '+str(inst_num_links)) # replace values that are less than thresh with zero self.dat['mat'][ abs(self.dat['mat']) < inst_thresh] = 0 # return number of links return (abs(self.dat['mat'])>inst_thresh).sum() def cluster_row_and_col(self, dist_type='cosine', linkage_type='average', dendro=True, \ run_clustering=True, run_rank=True): ''' cluster net.dat and make visualization json, net.viz. optionally leave out dendrogram colorbar groups with dendro argument ''' import scipy import numpy as np from scipy.spatial.distance import pdist from copy import deepcopy # do not make dendrogram is you are not running clusttering if run_clustering == False: dendro = False # make distance matrices ########################## # get number of rows and columns from self.dat num_row = len(self.dat['nodes']['row']) num_col = len(self.dat['nodes']['col']) # initialize distance matrices row_dm = scipy.zeros([num_row,num_row]) col_dm = scipy.zeros([num_col,num_col]) # make copy of matrix tmp_mat = deepcopy(self.dat['mat']) # calculate distance matrix row_dm = pdist( tmp_mat, metric=dist_type ) col_dm = pdist( tmp_mat.transpose(), metric=dist_type ) # prevent negative values row_dm[row_dm < 0] = float(0) col_dm[col_dm < 0] = float(0) # initialize clust order clust_order = self.ini_clust_order() # initial ordering ################### clust_order['row']['ini'] = range(num_row, -1, -1) clust_order['col']['ini'] = range(num_col, -1, -1) # cluster if run_clustering == True: clust_order['row']['clust'], clust_order['row']['group'] = \ self.clust_and_group(row_dm, linkage_type=linkage_type) clust_order['col']['clust'], clust_order['col']['group'] = \ self.clust_and_group(col_dm, linkage_type=linkage_type) # rank if run_rank == True: clust_order['row']['rank'] = self.sort_rank_nodes('row') clust_order['col']['rank'] = self.sort_rank_nodes('col') # save clustering orders to node_info if run_clustering == True: self.dat['node_info']['row']['clust'] = clust_order['row']['clust'] self.dat['node_info']['col']['clust'] = clust_order['col']['clust'] else: self.dat['node_info']['row']['clust'] = clust_order['row']['ini'] self.dat['node_info']['col']['clust'] = clust_order['col']['ini'] if run_rank == True: self.dat['node_info']['row']['rank'] = clust_order['row']['rank'] self.dat['node_info']['col']['rank'] = clust_order['col']['rank'] else: self.dat['node_info']['row']['rank'] = clust_order['row']['ini'] self.dat['node_info']['col']['rank'] = clust_order['col']['ini'] # transfer ordereings # row self.dat['node_info']['row']['ini'] = clust_order['row']['ini'] self.dat['node_info']['row']['group'] = clust_order['row']['group'] # col self.dat['node_info']['col']['ini'] = clust_order['col']['ini'] self.dat['node_info']['col']['group'] = clust_order['col']['group'] #!! disabled temporarily # if len(self.dat['node_info']['col']['cl']) > 0: # self.calc_cat_clust_order() # make the viz json - can optionally leave out dendrogram self.viz_json(dendro) def calc_cat_clust_order(self): from clustergrammer import Network from copy import deepcopy col_in_cat = self.dat['node_info']['col_in_cat'] # alpha order categories all_cats = sorted(col_in_cat.keys()) # cluster each category ############################## # calc clustering of each category all_cat_orders = [] # this is the ordering of the columns based on their category, not # including their clustering order on top of their category tmp_col_names_list = [] for inst_cat in all_cats: inst_cols = col_in_cat[inst_cat] # keep a list of the columns tmp_col_names_list.extend(inst_cols) cat_net = deepcopy(Network()) cat_net.dat['mat'] = deepcopy(self.dat['mat']) cat_net.dat['nodes'] = deepcopy(self.dat['nodes']) # get dataframe, to simplify column filtering cat_df = cat_net.dat_to_df() # get subset of dataframe sub_df = {} sub_df['mat'] = cat_df['mat'][inst_cols] # load back to dat cat_net.df_to_dat(sub_df) try: cat_net.cluster_row_and_col('cos') inst_cat_order = cat_net.dat['node_info']['col']['clust'] except: inst_cat_order = range(len(cat_net.dat['nodes']['col'])) prev_order_len = len(all_cat_orders) # add previous order length to the current order number inst_cat_order = [i+prev_order_len for i in inst_cat_order] all_cat_orders.extend(inst_cat_order) # sort tmp_col_names_lust by the integers in all_cat_orders names_col_cat_clust = [x for (y,x) in sorted(zip(all_cat_orders,tmp_col_names_list))] # calc category-cluster order ############################## final_order = [] for i in range(len(self.dat['nodes']['col'])): # get the rank of the col in the order of col_nodes inst_col_name = self.dat['nodes']['col'][i] inst_col_num = names_col_cat_clust.index(inst_col_name) final_order.append(inst_col_num) self.dat['node_info']['col']['cl_index'] = final_order def clust_and_group( self, dm, linkage_type='average' ): import scipy.cluster.hierarchy as hier # calculate linkage Y = hier.linkage( dm, method=linkage_type ) Z = hier.dendrogram( Y, no_plot=True ) # get ordering inst_clust_order = Z['leaves'] all_dist = self.group_cutoffs() # generate distance cutoffs inst_groups = {} for inst_dist in all_dist: inst_key = str(inst_dist).replace('.','') inst_groups[inst_key] = hier.fcluster(Y, inst_dist*dm.max(), 'distance') inst_groups[inst_key] = inst_groups[inst_key].tolist() return inst_clust_order, inst_groups def sort_rank_node_values( self, rowcol ): import numpy as np from operator import itemgetter from copy import deepcopy # make a copy of nodes and node_info inst_nodes = deepcopy(self.dat['nodes'][rowcol]) inst_vals = deepcopy(self.dat['node_info'][rowcol]['value']) tmp_arr = [] for i in range(len(inst_nodes)): inst_dict = {} # get name of the node inst_dict['name'] = inst_nodes[i] # get value inst_dict['value'] = inst_vals[i] tmp_arr.append(inst_dict) # sort dictionary by value tmp_arr = sorted( tmp_arr, key=itemgetter('value') ) # get list of sorted nodes tmp_sort_nodes = [] for inst_dict in tmp_arr: tmp_sort_nodes.append( inst_dict['name'] ) # get the sorted index sort_index = [] for inst_node in inst_nodes: sort_index.append( tmp_sort_nodes.index(inst_node) ) return sort_index def sort_rank_nodes( self, rowcol ): import numpy as np from operator import itemgetter from copy import deepcopy # make a copy of node information inst_nodes = deepcopy(self.dat['nodes'][rowcol]) inst_mat = deepcopy(self.dat['mat']) sum_term = [] for i in range(len(inst_nodes)): inst_dict = {} # get name of the node inst_dict['name'] = inst_nodes[i] # sum values of the node if rowcol == 'row': inst_dict['total'] = np.sum(inst_mat[i,:]) else: inst_dict['total'] = np.sum(inst_mat[:,i]) # add this to the list of dicts sum_term.append(inst_dict) # sort dictionary by number of terms sum_term = sorted( sum_term, key=itemgetter('total'), reverse=False ) # get list of sorted nodes tmp_sort_nodes = [] for inst_dict in sum_term: tmp_sort_nodes.append(inst_dict['name']) # get the sorted index sort_index = [] for inst_node in inst_nodes: sort_index.append( tmp_sort_nodes.index(inst_node) ) return sort_index def viz_json(self, dendro=True): ''' make the dictionary for the clustergram.js visualization ''' # get dendrogram cutoff distances all_dist = self.group_cutoffs() # make nodes for viz ##################### # make rows and cols for inst_rc in self.dat['nodes']: for i in range(len( self.dat['nodes'][inst_rc] )): inst_dict = {} inst_dict['name'] = self.dat['nodes'][inst_rc][i] inst_dict['ini'] = self.dat['node_info'][inst_rc]['ini'][i] #!! clean this up so I do not have to get the index here inst_dict['clust'] = self.dat['node_info'][inst_rc]['clust'].index(i) inst_dict['rank'] = self.dat['node_info'][inst_rc]['rank'][i] # add node class cl if len(self.dat['node_info'][inst_rc]['cl']) > 0: inst_dict['cl'] = self.dat['node_info'][inst_rc]['cl'][i] # add node class cl_index if 'cl_index' in self.dat['node_info'][inst_rc] > 0: inst_dict['cl_index'] = self.dat['node_info'][inst_rc]['cl_index'][i] # add node class val if len(self.dat['node_info'][inst_rc]['value']) > 0: inst_dict['value'] = self.dat['node_info'][inst_rc]['value'][i] # add node information # if 'info' in self.dat['node_info'][inst_rc]: if len(self.dat['node_info'][inst_rc]['info']) > 0: inst_dict['info'] = self.dat['node_info'][inst_rc]['info'][i] # group info if dendro==True: inst_dict['group'] = [] for tmp_dist in all_dist: # read group info in correct order tmp_dist = str(tmp_dist).replace('.','') inst_dict['group'].append( float( self.dat['node_info'][inst_rc]['group'][tmp_dist][i] ) ) # append dictionary to list of nodes self.viz[inst_rc+'_nodes'].append(inst_dict) # links ######## for i in range(len( self.dat['nodes']['row'] )): for j in range(len( self.dat['nodes']['col'] )): if abs( self.dat['mat'][i,j] ) > 0: inst_dict = {} inst_dict['source'] = i inst_dict['target'] = j inst_dict['value'] = self.dat['mat'][i,j] # add up/dn values if necessary if 'mat_up' in self.dat: inst_dict['value_up'] = self.dat['mat_up'][i,j] if 'mat_up' in self.dat: inst_dict['value_dn'] = self.dat['mat_dn'][i,j] # add information if necessary - use dictionary with tuple key # each element of the matrix needs to have information if 'mat_info' in self.dat: # use tuple string inst_dict['info'] = self.dat['mat_info'][str((i,j))] # add highlight if necessary - use dictionary with tuple key if 'mat_hl' in self.dat: inst_dict['highlight'] = self.dat['mat_hl'][i,j] # append link self.viz['links'].append( inst_dict ) def df_to_dat(self, df): import numpy as np import pandas as pd self.dat['mat'] = df['mat'].values self.dat['nodes']['row'] = df['mat'].index.tolist() self.dat['nodes']['col'] = df['mat'].columns.tolist() # check if there is category information in the column names if type(self.dat['nodes']['col'][0]) is tuple: self.dat['nodes']['col'] = [i[0] for i in self.dat['nodes']['col']] if 'mat_up' in df: self.dat['mat_up'] = df['mat_up'].values self.dat['mat_dn'] = df['mat_dn'].values def dat_to_df(self): import numpy as np import pandas as pd df = {} # always return 'mat' dataframe df['mat'] = pd.DataFrame(data = self.dat['mat'], columns=self.dat['nodes']['col'], index=self.dat['nodes']['row']) if 'mat_up' in self.dat: df['mat_up'] = pd.DataFrame(data = self.dat['mat_up'], columns=self.dat['nodes']['col'], index=self.dat['nodes']['row']) df['mat_dn'] = pd.DataFrame(data = self.dat['mat_dn'], columns=self.dat['nodes']['col'], index=self.dat['nodes']['row']) return df def make_filtered_views(self, dist_type='cosine', run_clustering=True, \ dendro=True, views=['filter_row_sum','N_row_sum'], calc_col_cats=True, \ linkage_type='average'): from copy import deepcopy ''' This will calculate multiple views of a clustergram by filtering the data and clustering after each filtering. This filtering will keep the top N rows based on some quantity (sum, num-non-zero, etc). ''' print('running make_filtered_views') print('dist_type '+str(dist_type)) # get dataframe dictionary of network and remove rows/cols with all zero values df = self.dat_to_df() # each row or column must have at least one non-zero value threshold = 0.0001 df = self.df_filter_row(df, threshold) df = self.df_filter_col(df, threshold) # calculate initial view with no row filtering ################################################## # swap back in the filtered df to dat self.df_to_dat(df) # cluster initial view self.cluster_row_and_col(dist_type=dist_type, linkage_type=linkage_type, \ run_clustering=run_clustering, dendro=dendro) # set up views all_views = [] # generate views for each column category (default to only one) all_col_cat = ['all_category'] # check for column categories and check whether category specific clustering # should be calculated if len(self.dat['node_info']['col']['cl']) > 0 and calc_col_cats: tmp_cats = sorted(list(set(self.dat['node_info']['col']['cl']))) # gather all col_cats all_col_cat.extend(tmp_cats) for inst_col_cat in all_col_cat: # make a copy of df to send to filters send_df = deepcopy(df) # add N_row_sum views if 'N_row_sum' in views: print('add N top views') all_views = self.add_N_top_views( send_df, all_views, dist_type=dist_type, current_col_cat=inst_col_cat ) if 'filter_row_sum' in views: all_views = self.add_pct_top_views( send_df, all_views, dist_type=dist_type, current_col_cat=inst_col_cat ) # add views to viz self.viz['views'] = all_views print('finished make_filtered_views') def add_pct_top_views(self, df, all_views, dist_type='cosine', \ current_col_cat='all_category'): from clustergrammer import Network from copy import deepcopy import numpy as np # make a copy of the network so that filtering is not propagated copy_net = deepcopy(self) # filter columns by category if necessary - do this on df, which is a copy if current_col_cat != 'all_category': keep_cols = copy_net.dat['node_info']['col_in_cat'][current_col_cat] df['mat'] = copy_net.grab_df_subset(df['mat'], keep_rows='all', keep_cols=keep_cols) # gather category key is_col_cat = False if len(self.dat['node_info']['col']['cl']) > 0 and current_col_cat=='all_category': is_col_cat = True cat_key_col = {} for i in range(len(self.dat['nodes']['col'])): cat_key_col[ self.dat['nodes']['col'][i] ] = self.dat['node_info']['col']['cl'][i] # filter between 0% and 90% of some threshoold all_filt = range(10) all_filt = [i/float(10) for i in all_filt] # row filtering values mat = deepcopy(df['mat']) sum_row = np.sum(mat, axis=1) max_sum = max(sum_row) for inst_filt in all_filt: cutoff = inst_filt * max_sum # make a copy of the network so that filtering is not propagated copy_net = deepcopy(self) # make copy of df inst_df = deepcopy(df) # filter row in df inst_df = copy_net.df_filter_row(inst_df, cutoff, take_abs=False) # filter columns by category if necessary if current_col_cat != 'all_category': keep_cols = copy_net.dat['node_info']['col_in_cat'][current_col_cat] inst_df['mat'] = copy_net.grab_df_subset(inst_df['mat'], keep_rows='all', keep_cols=keep_cols) if 'mat_up' in inst_df: # grab up and down data inst_df['mat_up'] = copy_net.grab_df_subset(inst_df['mat_up'], keep_rows='all', keep_cols=keep_cols) inst_df['mat_dn'] = copy_net.grab_df_subset(inst_df['mat_dn'], keep_rows='all', keep_cols=keep_cols) # ini net net = deepcopy(Network()) # transfer to dat net.df_to_dat(inst_df) # add col categories if necessary if is_col_cat: inst_col_cats = [] for inst_col_name in copy_net.dat['nodes']['col']: inst_col_cats.append( cat_key_col[inst_col_name] ) # transfer category information net.dat['node_info']['col']['cl'] = inst_col_cats # add col_in_cat net.dat['node_info']['col_in_cat'] = copy_net.dat['node_info']['col_in_cat'] # try to cluster try: try: # cluster net.cluster_row_and_col(dist_type=dist_type,run_clustering=True) except: # cluster net.cluster_row_and_col(dist_type=dist_type,run_clustering=False) # add view inst_view = {} inst_view['filter_row_sum'] = inst_filt inst_view['dist'] = 'cos' inst_view['col_cat'] = current_col_cat inst_view['nodes'] = {} inst_view['nodes']['row_nodes'] = net.viz['row_nodes'] inst_view['nodes']['col_nodes'] = net.viz['col_nodes'] all_views.append(inst_view) except: print('\t*** did not cluster pct filtered view') return all_views def add_N_top_views(self, df, all_views, dist_type='cosine',\ current_col_cat='all_category'): from clustergrammer import Network from copy import deepcopy # make a copy of hte network copy_net = deepcopy(self) # filter columns by category if necessary if current_col_cat != 'all_category': keep_cols = copy_net.dat['node_info']['col_in_cat'][current_col_cat] df['mat'] = copy_net.grab_df_subset(df['mat'], keep_rows='all', keep_cols=keep_cols) # gather category key is_col_cat = False if len(self.dat['node_info']['col']['cl']) > 0 and current_col_cat=='all_category': is_col_cat = True cat_key_col = {} for i in range(len(self.dat['nodes']['col'])): cat_key_col[ self.dat['nodes']['col'][i] ] = self.dat['node_info']['col']['cl'][i] # keep the following number of top rows keep_top = ['all',500,400,300,200,100,90,80,70,60,50,40,30,20,10] # get copy of df and take abs value, cell line cols and gene rows df_abs = deepcopy(df['mat']) # transpose to get gene columns df_abs = df_abs.transpose() # sum the values of the genes in the cell lines tmp_sum = df_abs.sum(axis=0) # take absolute value to keep most positive and most negative rows tmp_sum = tmp_sum.abs() # sort rows by value tmp_sum.sort(ascending=False) rows_sorted = tmp_sum.index.values.tolist() for inst_keep in keep_top: # initialize df tmp_df = deepcopy(df) # filter columns by category if necessary if current_col_cat != 'all_category': keep_cols = copy_net.dat['node_info']['col_in_cat'][current_col_cat] tmp_df['mat'] = copy_net.grab_df_subset(tmp_df['mat'], keep_rows='all', keep_cols=keep_cols) if 'mat_up' in df: # grab up and down data tmp_df['mat_up'] = copy_net.grab_df_subset(tmp_df['mat_up'], keep_rows='all', keep_cols=keep_cols) tmp_df['mat_dn'] = copy_net.grab_df_subset(tmp_df['mat_dn'], keep_rows='all', keep_cols=keep_cols) if inst_keep < len(rows_sorted) or inst_keep == 'all': # initialize netowrk net = deepcopy(Network()) # filter the rows if inst_keep != 'all': # get the labels of the rows that will be kept keep_rows = rows_sorted[0:inst_keep] # filter the matrix tmp_df['mat'] = tmp_df['mat'].ix[keep_rows] if 'mat_up' in tmp_df: tmp_df['mat_up'] = tmp_df['mat_up'].ix[keep_rows] tmp_df['mat_dn'] = tmp_df['mat_dn'].ix[keep_rows] # filter columns - some columns may have all zero values tmp_df = self.df_filter_col(tmp_df,0.001) # transfer to dat net.df_to_dat(tmp_df) else: net.df_to_dat(tmp_df) # add col categories if necessary if is_col_cat: inst_col_cats = [] for inst_col_name in self.dat['nodes']['col']: inst_col_cats.append( cat_key_col[inst_col_name] ) # transfer category information net.dat['node_info']['col']['cl'] = inst_col_cats # add col_in_cat net.dat['node_info']['col_in_cat'] = copy_net.dat['node_info']['col_in_cat'] # try to cluster try: try: # cluster net.cluster_row_and_col(dist_type,run_clustering=True) except: # cluster net.cluster_row_and_col(dist_type,run_clustering=False) # add view inst_view = {} inst_view['N_row_sum'] = inst_keep inst_view['dist'] = 'cos' inst_view['col_cat'] = current_col_cat inst_view['nodes'] = {} inst_view['nodes']['row_nodes'] = net.viz['row_nodes'] inst_view['nodes']['col_nodes'] = net.viz['col_nodes'] all_views.append(inst_view) except: print('\t*** did not cluster N filtered view') return all_views def fast_mult_views(self, dist_type='cos', run_clustering=True, dendro=True): import numpy as np import pandas as pd from clustergrammer import Network from copy import deepcopy ''' This will use Pandas to calculte multiple views of a clustergram Currently, it is only filtering based on row-sum and it is disregarding link information (used to add click functionality). ''' # gather category key is_col_cat = False if len(self.dat['node_info']['col']['cl']) > 0: is_col_cat = True cat_key_col = {} for i in range(len(self.dat['nodes']['col'])): cat_key_col[ self.dat['nodes']['col'][i] ] = self.dat['node_info']['col']['cl'][i] # get dataframe dictionary of network and remove rows/cols with all zero values df = self.dat_to_df() # each row or column must have at least one non-zero value threshold = 0.001 df = self.df_filter_row(df, threshold) df = self.df_filter_col(df, threshold) # calculate initial view with no row filtering ################################################# # swap back in filtered df to dat self.df_to_dat(df) # cluster initial view self.cluster_row_and_col('cos',run_clustering=run_clustering, dendro=dendro) # set up views all_views = [] # set up initial view inst_view = {} inst_view['filter_row_sum'] = 0 inst_view['dist'] = 'cos' inst_view['nodes'] = {} inst_view['nodes']['row_nodes'] = self.viz['row_nodes'] inst_view['nodes']['col_nodes'] = self.viz['col_nodes'] # add view with no filtering all_views.append(inst_view) # filter between 0% and 90% of some threshoold all_filt = range(10) all_filt = [i/float(10) for i in all_filt] # row filtering values mat = self.dat['mat'] mat_abs = abs(mat) sum_row = np.sum(mat_abs, axis=1) max_sum = max(sum_row) for inst_filt in all_filt: # skip zero filtering if inst_filt > 0: cutoff = inst_filt * max_sum # filter row df = self.df_filter_row(df, cutoff, take_abs=True) print('\tfiltering at cutoff ' + str(inst_filt) + ' mat shape: ' + str(df['mat'].shape)) # ini net net = deepcopy(Network()) # transfer to dat net.df_to_dat(df) # add col categories if necessary if is_col_cat: inst_col_cats = [] for inst_col_name in self.dat['nodes']['col']: inst_col_cats.append( cat_key_col[inst_col_name] ) net.dat['node_info']['col']['cl'] = inst_col_cats # try to cluster try: # cluster net.cluster_row_and_col('cos') # add view inst_view = {} inst_view['filter_row_sum'] = inst_filt inst_view['dist'] = 'cos' inst_view['nodes'] = {} inst_view['nodes']['row_nodes'] = net.viz['row_nodes'] inst_view['nodes']['col_nodes'] = net.viz['col_nodes'] all_views.append(inst_view) except: print('\t*** did not cluster filtered view') # add views to viz self.viz['views'] = all_views print('\tfinished fast_mult_views') def make_mult_views(self, dist_type='cos',filter_row=['value'], filter_col=False, run_clustering=True, dendro=True): ''' This will calculate multiple views of a clustergram by filtering the data and clustering after each fitlering. By default row filtering will be turned on and column filteirng will not. The filtering steps are defined as a percentage of the maximum value found in the network. ''' from clustergrammer import Network from copy import deepcopy # filter between 0% and 90% of some to be determined value all_filt = range(10) all_filt = [i/float(10) for i in all_filt] # cluster default view self.cluster_row_and_col('cos', run_clustering=run_clustering, dendro=dendro) self.viz['views'] = [] all_views = [] # Perform row filterings ########################### if len(filter_row) > 0: # perform multiple types of row filtering ########################################### for inst_type in filter_row: for row_filt_int in all_filt: # initialize new net net = deepcopy(Network()) net.dat = deepcopy(self.dat) # filter rows net.filter_row_thresh(row_filt_int, filter_type=inst_type) # filter columns since some columns might be all zero net.filter_col_thresh(0.001,1) # try to cluster - will not work if there is one row try: # cluster net.cluster_row_and_col('cos') inst_name = 'filter_row'+'_'+inst_type # add view inst_view = {} inst_view[inst_name] = row_filt_int inst_view['dist'] = 'cos' inst_view['nodes'] = {} inst_view['nodes']['row_nodes'] = net.viz['row_nodes'] inst_view['nodes']['col_nodes'] = net.viz['col_nodes'] all_views.append(inst_view) except: print('\t***did not cluster filtered view') # Default col Filtering ########################### inst_meet = 1 if filter_col == True: # col filtering ##################### for col_filt in all_filt: # print(col_filt) # initialize new net net = deepcopy(Network()) net.dat = deepcopy(self.dat) filt_value = col_filt * max_mat # filter cols net.filter_col_thresh(filt_value, inst_meet) # try to cluster - will not work if there is one col try: # cluster net.cluster_row_and_col('cos') # add view inst_view = {} inst_view['filter_col'] = col_filt inst_view['dist'] = 'cos' inst_view['nodes'] = {} inst_view['nodes']['row_nodes'] = net.viz['row_nodes'] inst_view['nodes']['col_nodes'] = net.viz['col_nodes'] all_views.append(inst_view) except: print('did not cluster filtered view') # add views to viz self.viz['views'] = all_views @staticmethod def df_filter_row(df, threshold, take_abs=True): ''' filter rows in matrix at some threshold and remove columns that have a sum below this threshold ''' import pandas as pd from copy import deepcopy from clustergrammer import Network net = Network() # take absolute value if necessary if take_abs == True: df_copy = deepcopy(df['mat'].abs()) else: df_copy = deepcopy(df['mat']) ini_rows = df_copy.index.values.tolist() # transpose df df_copy = df_copy.transpose() # sum the values of the rows tmp_sum = df_copy.sum(axis=0) # take absolute value to keep most positive and most negative rows tmp_sum = tmp_sum.abs() # sort rows by value tmp_sum.sort(ascending=False) # filter series using threshold tmp_sum = tmp_sum[tmp_sum>threshold] # get keep_row names keep_rows = sorted(tmp_sum.index.values.tolist()) if len(keep_rows) < len(ini_rows): # grab the subset of the data df['mat'] = net.grab_df_subset(df['mat'], keep_rows=keep_rows) if 'mat_up' in df: # grab up and down data df['mat_up'] = net.grab_df_subset(df['mat_up'], keep_rows=keep_rows) df['mat_dn'] = net.grab_df_subset(df['mat_dn'], keep_rows=keep_rows) return df @staticmethod def df_filter_col(df, threshold, take_abs=True): ''' filter columns in matrix at some threshold and remove rows that have all zero values ''' import pandas from copy import deepcopy from clustergrammer import Network net = Network() # take absolute value if necessary if take_abs == True: df_copy = deepcopy(df['mat'].abs()) else: df_copy = deepcopy(df['mat']) # filter columns to remove columns with all zero values # transpose df_copy = df_copy.transpose() df_copy = df_copy[df_copy.sum(axis=1) > threshold] # transpose back df_copy = df_copy.transpose() # filter rows df_copy = df_copy[df_copy.sum(axis=1) > 0] # get df ready for export if take_abs == True: inst_rows = df_copy.index.tolist() inst_cols = df_copy.columns.tolist() df['mat'] = net.grab_df_subset(df['mat'], inst_rows, inst_cols) else: # just transfer the copied data df['mat'] = df_copy return df @staticmethod def grab_df_subset(df, keep_rows='all', keep_cols='all'): if keep_cols != 'all': # filter columns df = df[keep_cols] if keep_rows != 'all': # filter rows df = df.ix[keep_rows] return df @staticmethod def load_gmt(filename): f = open(filename, 'r') lines = f.readlines() f.close() gmt = {} # loop through the lines of the gmt for i in range(len(lines)): # get the inst line, strip off the new line character inst_line = lines[i].rstrip() inst_term = inst_line.split('\t')[0] # get the elements inst_elems = inst_line.split('\t')[2:] # save the drug-kinase sets gmt[inst_term] = inst_elems return gmt @staticmethod def load_json_to_dict(filename): ''' load json to python dict and return dict ''' import json f = open(filename, 'r') inst_dict = json.load(f) f.close() return inst_dict @staticmethod def save_dict_to_json(inst_dict, filename, indent='no-indent'): import json # save as a json fw = open(filename, 'w') if indent == 'indent': fw.write( json.dumps(inst_dict, indent=2) ) else: fw.write( json.dumps(inst_dict) ) fw.close() @staticmethod def ini_clust_order(): rowcol = ['row','col'] orderings = ['clust','rank','group','ini'] clust_order = {} for inst_node in rowcol: clust_order[inst_node] = {} for inst_order in orderings: clust_order[inst_node][inst_order] = [] return clust_order @staticmethod def threshold_vect_comparison(x, y, cutoff): import numpy as np # x vector ############ # take absolute value of x x_abs = np.absolute(x) # this returns a tuple found_tuple = np.where(x_abs >= cutoff) # get index array found_index_x = found_tuple[0] # y vector ############ # take absolute value of y y_abs = np.absolute(y) # this returns a tuple found_tuple = np.where(y_abs >= cutoff) # get index array found_index_y = found_tuple[0] # get common intersection found_common = np.intersect1d(found_index_x, found_index_y) # apply cutoff thresh_x = x[found_common] thresh_y = y[found_common] # return the threshold data return thresh_x, thresh_y @staticmethod def group_cutoffs(): # generate distance cutoffs all_dist = [] for i in range(11): all_dist.append(float(i)/10) return all_dist @staticmethod def find_dict_in_list(list_dict, search_value, search_string): ''' find a dict in a list of dicts by searching for a value ''' # get all the possible values of search_value all_values = [d[search_value] for d in list_dict] # check if the search value is in the keys if search_string in all_values: # find the dict found_dict = (item for item in list_dict if item[search_value] == search_string).next() else: found_dict = {} # return the found dictionary return found_dict
31.528162
130
0.603546
8,648
59,336
3.928885
0.056545
0.056656
0.029461
0.034877
0.601054
0.542279
0.497042
0.459605
0.423875
0.402625
0
0.005392
0.253017
59,336
1,882
131
31.528162
0.761185
0.217254
0
0.508351
0
0
0.11193
0
0.001044
0
0
0
0
1
0.041754
false
0
0.056367
0
0.11691
0.021921
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6be28791480695f18cfb74b2706fdb1c647fff80
523
py
Python
May 2019/brick_sort.py
Gern-Yataro/Daily-CS
194e8c2b23964300f39a8ed4f06d355360197766
[ "MIT" ]
1
2019-04-15T01:09:04.000Z
2019-04-15T01:09:04.000Z
May 2019/brick_sort.py
Gern-Yataro/Daily-CS
194e8c2b23964300f39a8ed4f06d355360197766
[ "MIT" ]
null
null
null
May 2019/brick_sort.py
Gern-Yataro/Daily-CS
194e8c2b23964300f39a8ed4f06d355360197766
[ "MIT" ]
null
null
null
numbers = [9, 8, 7, 6, 5, 4, 3, 2, 1] def brick_sort(array): n = len(array)-1 swapped = True while swapped == True: swapped = False for index in range(0, n, 2): if array[index] > array[index+1]: array[index], array[index+1] = array[index+1], array[index] swapped = True for index in range(1, n, 2): if array[index] > array[index+1]: array[index], array[index+1] = array[index+1], array[index] swapped = True return array print(brick_sort(numbers))
23.772727
67
0.581262
81
523
3.728395
0.345679
0.397351
0.218543
0.317881
0.516556
0.516556
0.516556
0.516556
0.516556
0.516556
0
0.052219
0.267686
523
21
68
24.904762
0.736292
0
0
0.4375
0
0
0
0
0
0
0
0
0
1
0.0625
false
0
0
0
0.125
0.0625
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
6be3244eac9c140c3d63372c0ad3149ef60a64c1
1,676
py
Python
sanctuary/topic/api.py
20CM/Sanctuary
14694d9bd6376bdc05248741a91df778400e9f66
[ "BSD-3-Clause" ]
1
2017-05-29T11:53:06.000Z
2017-05-29T11:53:06.000Z
sanctuary/topic/api.py
20CM/Sanctuary
14694d9bd6376bdc05248741a91df778400e9f66
[ "BSD-3-Clause" ]
null
null
null
sanctuary/topic/api.py
20CM/Sanctuary
14694d9bd6376bdc05248741a91df778400e9f66
[ "BSD-3-Clause" ]
null
null
null
from rest_framework import status from rest_framework.response import Response from rest_framework.permissions import BasePermission, SAFE_METHODS, IsAuthenticatedOrReadOnly from .models import Topic, Reply from sanctuary.viewsets import NoDestroyModelViewSet from .serializers import TopicSerializer, ReplySerializer class CreateWithAuthorMixin(object): """ Create a model instance. """ def create(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.initial_data["author"] = self.request.user.id serializer.is_valid(raise_exception=True) self.perform_create(serializer) headers = self.get_success_headers(serializer.data) return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers) def perform_create(self, serializer): serializer.save(author=self.request.user) class IsSuperAdminOrAuthor(BasePermission): def has_object_permission(self, request, view, obj): if request.method in SAFE_METHODS: return True user = request.user return user.is_superuser or user == obj.author class TopicViewSet(CreateWithAuthorMixin, NoDestroyModelViewSet): queryset = Topic.objects.all() serializer_class = TopicSerializer permission_classes = (IsAuthenticatedOrReadOnly, IsSuperAdminOrAuthor) filter_fields = ('author', 'tags') class ReplyViewSet(CreateWithAuthorMixin, NoDestroyModelViewSet): queryset = Reply.objects.all() serializer_class = ReplySerializer permission_classes = (IsAuthenticatedOrReadOnly, IsSuperAdminOrAuthor) filter_fields = ('topic', 'author')
35.659574
94
0.755967
170
1,676
7.311765
0.417647
0.035398
0.04103
0.033789
0.119067
0.119067
0
0
0
0
0
0.002141
0.164081
1,676
46
95
36.434783
0.885082
0.01432
0
0.0625
0
0
0.016504
0
0
0
0
0
0
1
0.09375
false
0
0.1875
0
0.75
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
6be3f12ed534c88956efb0cde9bfba8da5449ad9
1,113
py
Python
server/face_recogniser.py
fvalle1/ai_server
0bbb2b842a329ca8bbeb6529a2008b61dcc77cc0
[ "MIT" ]
1
2021-03-10T15:37:21.000Z
2021-03-10T15:37:21.000Z
server/face_recogniser.py
fvalle1/ai_server
0bbb2b842a329ca8bbeb6529a2008b61dcc77cc0
[ "MIT" ]
null
null
null
server/face_recogniser.py
fvalle1/ai_server
0bbb2b842a329ca8bbeb6529a2008b61dcc77cc0
[ "MIT" ]
null
null
null
import cv2 as cv from model import model class face_recogniser(model): def __init__(self): super() self.net = cv.dnn.readNet('/home/pi/inception/face-detection-adas-0001.xml','/home/pi/inception/face-detection-adas-0001.bin') self.net.setPreferableTarget(cv.dnn.DNN_TARGET_MYRIAD) def add_face_rectangle(self, frame): # Prepare input blob and perform an inference. blob = cv.dnn.blobFromImage(frame, size=(672, 384), ddepth=cv.CV_8U) self.net.setInput(blob) out = self.net.forward() # Draw detected faces on the frame. for detection in out.reshape(-1, 7): confidence = float(detection[2]) xmin = int(detection[3] * frame.shape[1]) ymin = int(detection[4] * frame.shape[0]) xmax = int(detection[5] * frame.shape[1]) ymax = int(detection[6] * frame.shape[0]) if confidence > 0.5: cv.rectangle(frame, (xmin, ymin), (xmax, ymax), color=(0, 255, 0)) return frame def process(self, frame): return self.add_face_rectangle(frame)
39.75
134
0.615454
150
1,113
4.486667
0.5
0.041605
0.044577
0.056464
0.106984
0.106984
0.106984
0
0
0
0
0.040964
0.254268
1,113
27
135
41.222222
0.76988
0.070081
0
0
0
0
0.091085
0.091085
0
0
0
0
0
1
0.136364
false
0
0.090909
0.045455
0.363636
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6be456b6f75f64cea006c0e15110ea8fb947a3b6
503
py
Python
code/countries_centroids.py
loris2222/ChocoData
d5b8c49f201228dd55db03dbdbafcbe846e36200
[ "MIT" ]
null
null
null
code/countries_centroids.py
loris2222/ChocoData
d5b8c49f201228dd55db03dbdbafcbe846e36200
[ "MIT" ]
null
null
null
code/countries_centroids.py
loris2222/ChocoData
d5b8c49f201228dd55db03dbdbafcbe846e36200
[ "MIT" ]
null
null
null
from geopy.exc import GeocoderTimedOut from geopy.geocoders import Nominatim import numpy as np import pandas as pd # function to find the coordinate # of a given city def find_geocode(city): try: # Specify the user_agent as your # app name it should not be none geolocator = Nominatim(user_agent="your_app_name") return geolocator.geocode(city) except GeocoderTimedOut: return None loc = find_geocode('italy') print(loc.latitude) print(loc.longitude)
23.952381
58
0.72167
70
503
5.1
0.614286
0.05042
0.061625
0
0
0
0
0
0
0
0
0
0.2167
503
21
59
23.952381
0.906091
0.2167
0
0
0
0
0.046154
0
0
0
0
0
0
1
0.076923
false
0
0.307692
0
0.538462
0.153846
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
6be4827cb6db3797b4e0960a8f9afb82862b44ab
2,783
py
Python
tests/test_cpymadtools/test_generators.py
fsoubelet/PyhDToolk
aef9d5f1fac2d4c4307d50ba5c53f6a5eb635a66
[ "MIT" ]
5
2020-05-28T09:16:01.000Z
2021-12-27T18:59:15.000Z
tests/test_cpymadtools/test_generators.py
fsoubelet/PyhDToolk
aef9d5f1fac2d4c4307d50ba5c53f6a5eb635a66
[ "MIT" ]
71
2020-02-20T20:32:43.000Z
2022-03-24T17:04:28.000Z
tests/test_cpymadtools/test_generators.py
fsoubelet/PyhDToolk
aef9d5f1fac2d4c4307d50ba5c53f6a5eb635a66
[ "MIT" ]
2
2021-09-28T16:01:06.000Z
2022-03-16T19:04:23.000Z
import random import pytest from pyhdtoolkit.cpymadtools.generators import LatticeGenerator class TestLatticeGenerator: def test_base_cas_lattice_generation(self): base_cas_lattice = LatticeGenerator.generate_base_cas_lattice() assert isinstance(base_cas_lattice, str) assert len(base_cas_lattice) == 1493 def test_onesext_cas_lattice(self): onesext_cas_lattice = LatticeGenerator.generate_onesext_cas_lattice() assert isinstance(onesext_cas_lattice, str) assert len(onesext_cas_lattice) == 2051 def test_oneoct_cas_lattice(self): oneoct_cas_lattice = LatticeGenerator.generate_oneoct_cas_lattice() assert isinstance(oneoct_cas_lattice, str) assert len(oneoct_cas_lattice) == 2050 def test_tripleterrors_study_reference(self): tripleterrors_study_reference = LatticeGenerator.generate_tripleterrors_study_reference() assert isinstance(tripleterrors_study_reference, str) assert len(tripleterrors_study_reference) == 1617 @pytest.mark.parametrize( "randseed, tferror", [ ("", ""), ("95", "195"), ("105038", "0.001"), (str(random.randint(0, 1e7)), str(random.randint(0, 1e7))), (random.randint(0, 1e7), random.randint(0, 1e7)), ], ) def test_tripleterrors_study_tferror_job(self, randseed, tferror): tripleterrors_study_tferror_job = LatticeGenerator.generate_tripleterrors_study_tferror_job( rand_seed=randseed, tf_error=tferror, ) assert isinstance(tripleterrors_study_tferror_job, str) assert len(tripleterrors_study_tferror_job) == 2521 + len(str(randseed)) + len(str(tferror)) assert f"eoption, add, seed = {randseed};" in tripleterrors_study_tferror_job assert f"B2r = {tferror};" in tripleterrors_study_tferror_job @pytest.mark.parametrize( "randseed, mserror", [ ("", ""), ("95", "195"), ("105038", "0.001"), (str(random.randint(0, 1e7)), str(random.randint(0, 1e7))), (random.randint(0, 1e7), random.randint(0, 1e7)), ], ) def test_tripleterrors_study_mserror_job(self, randseed, mserror): tripleterrors_study_mserror_job = LatticeGenerator.generate_tripleterrors_study_mserror_job( rand_seed=randseed, ms_error=mserror, ) assert isinstance(tripleterrors_study_mserror_job, str) assert len(tripleterrors_study_mserror_job) == 2384 + len(str(randseed)) + len(str(mserror)) assert f"eoption, add, seed = {randseed};" in tripleterrors_study_mserror_job assert f"ealign, ds := {mserror} * 1E-3 * TGAUSS(GCUTR);" in tripleterrors_study_mserror_job
42.166667
100
0.680201
308
2,783
5.824675
0.201299
0.190635
0.06243
0.075808
0.389075
0.218506
0.181717
0.181717
0.181717
0.12709
0
0.037207
0.217751
2,783
65
101
42.815385
0.786863
0
0
0.25
0
0
0.06935
0
0
0
0
0
0.285714
1
0.107143
false
0
0.053571
0
0.178571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6be4e81e9d40d83f06f1cfa243fa8007e8370a2f
1,789
py
Python
preprocessing/act_bed_construction.py
shtoneyan/sea-lion
7e1ce9a18a147eea42e6172a2329d696f6e6aef9
[ "MIT" ]
1
2022-02-10T21:21:32.000Z
2022-02-10T21:21:32.000Z
preprocessing/act_bed_construction.py
shtoneyan/sea-lion
7e1ce9a18a147eea42e6172a2329d696f6e6aef9
[ "MIT" ]
null
null
null
preprocessing/act_bed_construction.py
shtoneyan/sea-lion
7e1ce9a18a147eea42e6172a2329d696f6e6aef9
[ "MIT" ]
null
null
null
import pandas as pd import re import sys #read bed file #constructure acitivity table #output tfr file def main(): bed_file = sys.argv[1] act_table = sys.argv[2] data = pd.read_csv(act_table,sep = '\t') data.rename(columns={'Unnamed: 0':'loci'}, inplace=True) chrom = [i.split(':')[0] for i in list(data.loci)] coord = [re.split(':()',i)[-1] for i in list(data.loci)] start = [i.split('(')[0].split('-')[0] for i in coord] end = [i.split('(')[0].split('-')[1] for i in coord] strand = [i[-2] for i in coord] data = data.drop(columns=['loci']) # chrom = [i.split(':')[0] for i in list(data.loci)] # start = [re.split(':|-',i)[1] for i in list(data.loci)] # end = [re.split(":|-",i)[2] for i in list(data.loci)] # clean_end = [i[:-3] for i in end] # strand = [i[-2] for i in end] data['chrom'] = chrom data['start'] = start data['end'] = end data['strand'] = strand cols = data.columns.tolist() cols = cols[-4:]+cols[:-4] data = data[cols] output_act = act_table.split('.txt')[0]+'.bed' data.to_csv(output_act,sep='\t',index = False) ############################################################## def align_seqs_scores_1hot(seq_vecs, seq_scores, sort=True): if sort: seq_headers = sorted(seq_vecs.keys()) else: seq_headers = seq_vecs.keys() # construct lists of vectors train_scores = [] train_seqs = [] for header in seq_headers: train_seqs.append(seq_vecs[header]) train_scores.append(seq_scores[header]) # stack into matrices train_seqs = np.vstack(train_seqs) train_scores = np.vstack(train_scores) return train_seqs, train_scores if __name__ == '__main__': main()
28.854839
66
0.567915
260
1,789
3.757692
0.296154
0.040942
0.061412
0.051177
0.188332
0.174002
0.126919
0.116684
0.116684
0.116684
0
0.013768
0.228619
1,789
61
67
29.327869
0.694203
0.196758
0
0
0
0
0.047619
0
0
0
0
0
0
1
0.052632
false
0
0.078947
0
0.157895
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6be8cef9f2811735b313ed8611fa3362dad56bc1
2,072
py
Python
aiserver/backup/simpleserver2.py
hirasaki1985/Oreilly_deepLearning
378b60ccec67dc616669fcd65ad14c7eddae6767
[ "MIT" ]
null
null
null
aiserver/backup/simpleserver2.py
hirasaki1985/Oreilly_deepLearning
378b60ccec67dc616669fcd65ad14c7eddae6767
[ "MIT" ]
null
null
null
aiserver/backup/simpleserver2.py
hirasaki1985/Oreilly_deepLearning
378b60ccec67dc616669fcd65ad14c7eddae6767
[ "MIT" ]
null
null
null
import sys, socket import json import cgi try: from StringIO import StringIO except ImportError: from io import StringIO import numpy as np from http.server import BaseHTTPRequestHandler, HTTPServer from modules.controller import Controller # setting host = '' port = 8000 class MyHandler(BaseHTTPRequestHandler): def do_POST(self): print("simpleserver do_POST exec()") if self.path.endswith('favicon.ico'): return; self.controller = Controller() # request form = self.getRequestData() print(type(form)) # logic #logicResult = "" logicResult = self.controller.webLogic(form) # make result result = self.makeResponseData(logicResult) # send self.sendResponse(result) return def getRequestData(self): # POST されたフォームデータを解析する form = cgi.FieldStorage( fp=self.rfile, headers=self.headers, environ={'REQUEST_METHOD':'POST', 'CONTENT_TYPE':'png', }) print(form) #image = {"test":"requestData"} return form def makeResponseData(self, result): print("### simpleserver makeResponseData exec") #result = {"test":"responseData"} print(result) print(type(result)) return result def sendResponse(self, result): print("### simpleserver sendResponse exec") self.send_response(200) self.send_header('Content-type', 'text/json') self.send_header('Access-Control-Allow-Origin', 'http://deeplearning.local.com') self.end_headers() #self.wfile.flush() self.wfile.write(str(result).encode('UTF-8')) self.wfile.close() return try: server = HTTPServer((host, port), MyHandler) server.serve_forever() except KeyboardInterrupt: print ('^C received, shutting down the web server') server.socket.close()
25.9
89
0.586873
200
2,072
6.035
0.465
0.042254
0.024855
0.044739
0
0
0
0
0
0
0
0.005587
0.30888
2,072
79
90
26.227848
0.837291
0.074807
0
0.075472
0
0
0.145594
0.014778
0
0
0
0
0
1
0.075472
false
0
0.169811
0
0.339623
0.150943
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6bedb7df070733efc849ec3f70b009e7c5d82ea3
1,214
py
Python
get_machine_id.py
Server-Factory/Parallels-Utils
9b5c724b59832abf0506c5f632b0122573e71cd7
[ "Apache-2.0" ]
1
2021-01-01T23:24:31.000Z
2021-01-01T23:24:31.000Z
get_machine_id.py
Server-Factory/Parallels-Utils
9b5c724b59832abf0506c5f632b0122573e71cd7
[ "Apache-2.0" ]
null
null
null
get_machine_id.py
Server-Factory/Parallels-Utils
9b5c724b59832abf0506c5f632b0122573e71cd7
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python import subprocess import sys def main(): if len(sys.argv) > 1: machines = dict() image = sys.argv[1] row_home = "Home:" row_id_open = "{" row_id_close = "}" output = subprocess.check_output(['prlctl', 'list', '-a', '-i']) items = str(output).split("ID:") for item in items: if row_home in item: home = "" machine_id = "" rows = item.strip().split('\\n') for row in rows: if row_id_open in row.strip() and row_id_close in row.strip(): machine_id = row.replace(row_id_open, "").replace(row_id_close, "").strip() if row_home in row: home = row.replace(row_home, "").strip() machines[home] = machine_id for machine_id in machines: if machine_id.startswith(image): machine = machines[machine_id] print(machine) sys.exit(0) print("Unknown_ID") sys.exit(1) else: print("No image path provided") sys.exit(1) if __name__ == "__main__": main()
28.904762
99
0.490939
142
1,214
3.971831
0.338028
0.053191
0.047872
0.039007
0
0
0
0
0
0
0
0.006684
0.383855
1,214
41
100
29.609756
0.747326
0.01318
0
0.058824
0
0
0.055973
0
0
0
0
0
0
1
0.029412
false
0
0.058824
0
0.088235
0.088235
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6bf3184d75c0305e0ed63aa9eb50baca989c89e5
1,813
py
Python
Books/DeepLearningLearningFromTheFounderOfKeras/chapter7/sub7_2_2.py
Tim232/Python-Things
05f0f373a4cf298e70d9668c88a6e3a9d1cd8146
[ "MIT" ]
2
2020-12-05T07:42:55.000Z
2021-01-06T23:23:18.000Z
Books/DeepLearningLearningFromTheFounderOfKeras/chapter7/sub7_2_2.py
Tim232/Python-Things
05f0f373a4cf298e70d9668c88a6e3a9d1cd8146
[ "MIT" ]
null
null
null
Books/DeepLearningLearningFromTheFounderOfKeras/chapter7/sub7_2_2.py
Tim232/Python-Things
05f0f373a4cf298e70d9668c88a6e3a9d1cd8146
[ "MIT" ]
null
null
null
#-*-coding: utf-8-*- #todo p.333 ~ p.340 #todo code 7-7 ~ code 7-9 #todo 7.2.2 텐서보드 소개: 텐서플로의 시각화 프레임워크 import os import keras from keras import layers from keras.datasets import imdb from keras.preprocessing import sequence max_features = 2000 # 특성으로 사용할 단어의 수 max_len = 500 # 사용할 텍스트의 길이 (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features) x_train = sequence.pad_sequences(sequences=x_train, maxlen=max_len) x_test = sequence.pad_sequences(sequences=x_test, maxlen=max_len) model = keras.models.Sequential() model.add(layers.Embedding(input_dim=max_features, output_dim=128, input_length=max_len, name='embed')) model.add(layers.Conv1D(filters=32, kernel_size=7, activation='relu')) model.add(layers.MaxPooling1D(pool_size=5)) model.add(layers.Conv1D(filters=32, kernel_size=7, activation='relu')) model.add(layers.GlobalAveragePooling1D()) model.add(layers.Dense(1)) model.summary() model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc']) # ▣ keras.utils.plot_model # - 모델의 층 그래프를 그려 주는 기능 from keras.utils import plot_model plot_model(model=model, to_file=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'model_graph', 'model_graph.png'), show_shapes=True) callbacks = [ keras.callbacks.TensorBoard( log_dir=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tensorboard_log'), histogram_freq=1, # 1 에포크마다 활성화 출력의 히스토그램을 기록 embeddings_freq=1 # 1 에포크마다 임베딩 데이터를 기록 ) ] history = model.fit( x=x_train, y=y_train, epochs=1, batch_size=128, validation_split=0.2, callbacks=callbacks )
31.258621
110
0.661335
258
1,813
4.46124
0.472868
0.041703
0.07298
0.050391
0.239791
0.187663
0.187663
0.187663
0.187663
0.187663
0
0.032532
0.220077
1,813
58
111
31.258621
0.780764
0.11914
0
0.088889
0
0
0.052267
0
0
0
0
0.017241
0
1
0
false
0
0.133333
0
0.133333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
6bf3a11f4eaf5ef256dd41b159fcdf1ed04aaca8
12,158
py
Python
transform/preprocess/student_preprocess.py
WillianFuks/papis19
479c5460218c8f02716dbd5c2b0b9121a4328ab0
[ "Apache-2.0" ]
4
2019-06-24T13:20:22.000Z
2020-11-12T01:19:02.000Z
transform/preprocess/student_preprocess.py
WillianFuks/papis19
479c5460218c8f02716dbd5c2b0b9121a4328ab0
[ "Apache-2.0" ]
7
2019-12-16T21:55:20.000Z
2022-02-10T00:16:54.000Z
transform/preprocess/student_preprocess.py
WillianFuks/papis19
479c5460218c8f02716dbd5c2b0b9121a4328ab0
[ "Apache-2.0" ]
8
2019-06-24T12:27:51.000Z
2021-04-20T18:33:24.000Z
# Copyright 2019 Willian Fuks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function import os import sys import argparse import six import tensorflow as tf import tensorflow_transform as tft import apache_beam as beam from apache_beam.options.pipeline_options import PipelineOptions import tensorflow_transform.beam.impl as tft_beam from tensorflow_transform.tf_metadata import dataset_metadata, dataset_schema from tensorflow_transform.beam import impl as beam_impl from tensorflow_transform.coders import example_proto_coder from tensorflow_transform.beam.tft_beam_io import transform_fn_io import ast import six import preprocess.metadata as metadata import tempfile if not six.PY2: sys.exit("ERROR: Must use Python2.7") def build_bq_query(filename, project_id, init_date, end_date): query = open(filename).read().format(project_id=project_id, init_date=init_date, end_date=end_date) return query def build_pipeline_options(args): """ Apache Beam Pipelines must receive a set of options for setting how the engine should run. Args ---- args: argparse.Namespace Returns ------- pipeline_options: defines how to run beam job. """ options = {} options['runner'] = args.runner if args.temp_location: options['temp_location'] = args.temp_location if args.project: options['project'] = args.project if args.staging_location: options['staging_location'] = args.staging_location if args.job_name: options['job_name'] = args.job_name if args.max_num_workers: options['max_num_workers'] = args.max_num_workers if args.machine_type: options['machine_type'] = args.machine_type options.update({'save_main_session': True}) options.update({'setup_file': './setup.py'}) pipeline_options = PipelineOptions(**options) return pipeline_options class FlattenInteractionsFn(beam.DoFn): def process(self, element): """ flattens table """ for hit in element[1]: yield {'customer_id': element[0], 'sku': hit['sku'], 'action': hit['action']} def preprocess_fn(dictrow): return { 'customer_id': tft.string_to_int(dictrow['customer_id'], vocab_filename='customers_mapping'), 'sku': tft.string_to_int(dictrow['sku'], vocab_filename='skus_mapping'), 'action': dictrow['action'] } def aggregate_customers_sessions(sessions): """ Receives as input what products customers interacted with and returns their final aggregation. Args ---- sessions: list of list of dicts. List where each element is a list of dict of type: [{'action': '', 'sku': ''}] Returns ------- results: list of dicts Each resulting dict is aggregated on the sku and action level (repeating clauses are filtered out). """ result = [] for session in sessions: for hit in session: result.append(hit) return [dict(t) for t in {tuple(d.items()) for d in result}] def build_final_results(row): """ row = (customer_id, [{sku:, action}, {sku:, action}]) """ skus_list = [e['sku'] for e in row[1]] actions_list = [e['action'] for e in row[1]] return { 'customer_id': row[0], 'skus_list': skus_list, 'actions_list': actions_list } def build_test_results(row): """ ('customer2', {'test': [{'skus_list': [1, 1], 'actions_list': ['AddedToBasket', 'Browsed'], 'customer_id': 'customer2'}], 'train': [{'skus_list': [1, 1], 'actions_list': ['AddedToBasket', 'Browsed'], 'customer_id': 'customer2'}]}) """ result = {} result['customer_id'] = row[0] inner_dicts = row[1] # customers that had empty interactions after filtering out test dataset. if not inner_dicts['test']: return # customers that were not present in training data. if not inner_dicts['train']: return test_dict = inner_dicts['test'][0] result['skus_list'] = test_dict['skus_list'] result['actions_list'] = test_dict['actions_list'] train_dict = inner_dicts['train'][0] result['trained_skus_list'] = train_dict['skus_list'] result['trained_actions_list'] = train_dict['actions_list'] return result def read_input_data(args, pipeline, flag): """ Reads train and test pipelines. args: input args. pipeline: input pipeline where all transformations will take place. flag: either train or test. """ if args.input_sql: train_query = build_bq_query(args.input_sql, args.project, args.train_init_date, args.train_end_date) test_query = build_bq_query(args.input_sql, args.project, args.test_init_date, args.test_end_date) data = ( pipeline | '{} read'.format(flag) >> beam.io.Read(beam.io.BigQuerySource( query=train_query if flag == 'train' else test_query, use_standard_sql=True) ) ) else: data = ( pipeline | '{} read'.format(flag) >> beam.io.ReadFromText( args.input_train_data if flag == 'train' else args.input_test_data ) | '{} to json'.format(flag) >> beam.Map(lambda x: ast.literal_eval(x)) ) data = ( data | '{} filter empty hits'.format(flag) >> beam.Filter(lambda x: x['hits']) | '{} prepare customer grouping'.format(flag) >> beam.Map(lambda x: ( x['customer_id'], [{'action': e['action'], 'sku': e['productSku']} for e in x['hits'] if e['action'] in ['Browsed', 'AddedToBasket']]) ) | '{} group customers'.format(flag) >> beam.GroupByKey() | '{} aggregate customers sessions'.format(flag) >> beam.Map(lambda x: ( x[0], aggregate_customers_sessions(x[1])) ) | '{} flatten'.format(flag) >> beam.ParDo(FlattenInteractionsFn()) ) return data def write_total_distinct_keys_to_file(data, filename, key): """ Counts how many distinct items of "key" is present in data. Key here is either sku or customer_id. Args ---- data: pcollection. filename: where to write results to. key: on which value to count for. """ _ = ( data | 'get {}'.format(key) >> beam.Map(lambda x: x[key]) | 'group {}'.format(key) >> beam.RemoveDuplicates() | 'count {}'.format(key) >> beam.combiners.Count.Globally() | 'write {}'.format(key) >> beam.io.WriteToText(filename) ) def write_tfrecords(data, schema, filename, name): """ Converts input pcollection into a file of tfrecords following schema. Args ---- data: pcollection. schema: dataset_schema from tensorflow transform. name: str to identify operations. """ _ = ( data | '{} tfrecords write'.format(name) >> beam.io.tfrecordio.WriteToTFRecord( filename, coder=example_proto_coder.ExampleProtoCoder(dataset_schema.Schema(schema))) ) def aggregate_transformed_data(transformed_data, flag): """ One of the final steps into our pipelining transformations where data that has been transformed (in our case, skus went from string names to integer indices) is aggregated on the user level. transformed_data: pcollection. flag: identifies train or test Returns ------- transformed_data aggregated on user level. """ if flag == 'test': transformed_data = ( transformed_data | 'test filter out invalid skus' >> beam.Filter(lambda x: x['sku'] != -1) ) transformed_agg_data = ( transformed_data | '{} prepare grouping'.format(flag) >> beam.Map(lambda x: ( x['customer_id'], {'sku': x['sku'], 'action': x['action']}) ) | '{} transformed agg group'.format(flag) >> beam.GroupByKey() | '{} final results'.format(flag) >> beam.Map(lambda x: build_final_results(x)) ) return transformed_agg_data def aggregate_final_test_data(train_data, test_data): """ Joins train dataset with test so that only customers that we can make recommendations are present in final dataset. Remember that, in order to make them, we need to know a priori what customers interacted with. That's why we join the train data so we know customers preferences when we need to interact with them with our system. """ data = ( { 'train': train_data | 'train prepare customer key' >> beam.Map(lambda x: ( x['customer_id'], x)), 'test': test_data | 'test prepare customer key' >> beam.Map(lambda x: ( x['customer_id'], x)) } | 'cogroup' >> beam.CoGroupByKey() | 'build final rows' >> beam.Map(build_test_results) | 'filter customers out of test' >> beam.Filter(lambda x: x) ) return data def run_tft_pipeline(args): """ This is where all the data we have available in our database is processed and transformed into Tensorflow tfrecords for later training and testing. The code runs in distributed manner automatically in the engine choosen by the `runner` argument in input. """ pipeline_options = build_pipeline_options(args) temp_tft_folder = ( tempfile.mkdtemp(dir='/tmp/') if not args.tft_temp else args.tft_temp ) tft_transform_folder = ( tempfile.mkdtemp(dir='/tmp/') if not args.tft_transform else args.tft_transform ) with beam.Pipeline(options=pipeline_options) as pipeline: with beam_impl.Context(temp_dir=temp_tft_folder): train_data = read_input_data(args, pipeline, 'train') write_total_distinct_keys_to_file(train_data, args.nitems_filename, 'sku') train_dataset = (train_data, metadata.RAW_DATA_METADATA) (train_data, transformed_train_metadata), transform_fn = ( train_dataset | beam_impl.AnalyzeAndTransformDataset(preprocess_fn) ) _ = ( transform_fn | 'WriteTransformFn' >> transform_fn_io.WriteTransformFn(tft_transform_folder) ) train_data = aggregate_transformed_data( train_data, 'train' ) write_tfrecords(train_data, metadata.OUTPUT_TRAIN_SCHEMA, args.output_train_filename, 'output train') test_data = read_input_data(args, pipeline, 'test') test_dataset = (test_data, metadata.RAW_DATA_METADATA) (test_data, _) = ( (test_dataset, transform_fn) | beam_impl.TransformDataset()) test_data = aggregate_transformed_data( test_data, 'test' ) test_data = aggregate_final_test_data( train_data, test_data ) write_tfrecords(test_data, metadata.OUTPUT_TEST_SCHEMA, args.output_test_filename, 'output test') def main(): args = parse_args() run_tft_pipeline(args) if __name__ == '__main__': main()
31.661458
89
0.618358
1,450
12,158
4.991034
0.230345
0.017963
0.02128
0.015476
0.148404
0.11151
0.086638
0.07434
0.063562
0.052784
0
0.003408
0.276032
12,158
383
90
31.744125
0.818791
0.246833
0
0.122642
0
0
0.113934
0
0
0
0
0
0
1
0.066038
false
0
0.084906
0.004717
0.207547
0.004717
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6bf3c5d43692793eab438e1d78d5c5d9f2609b80
114
py
Python
material/assignment1/csv_example.py
mpambasange/MachineLearning
8b813345264513a57934317b01e1311628dc5b01
[ "MIT" ]
16
2016-09-01T08:50:59.000Z
2022-02-15T20:56:07.000Z
material/assignment1/csv_example.py
olehermanse/INF3490-PythonAI
8b813345264513a57934317b01e1311628dc5b01
[ "MIT" ]
2
2016-10-20T09:36:19.000Z
2017-08-29T00:28:54.000Z
material/assignment1/csv_example.py
olehermanse/INF3490-PythonAI
8b813345264513a57934317b01e1311628dc5b01
[ "MIT" ]
15
2016-10-31T12:30:37.000Z
2021-03-15T12:12:50.000Z
import csv with open("european_cities.csv", "r") as f: data = list(csv.reader(f, delimiter=';')) print(data)
19
45
0.657895
18
114
4.111111
0.777778
0
0
0
0
0
0
0
0
0
0
0
0.149123
114
5
46
22.8
0.762887
0
0
0
0
0
0.184211
0
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0.25
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
6bf4ad195f97897af7bd9a29940a148f1e9542dc
271
py
Python
spacy_lookup/about.py
BasileDjigg/spacy-lookup
16929120d03fa1215f574286072377f10b7e089b
[ "MIT" ]
null
null
null
spacy_lookup/about.py
BasileDjigg/spacy-lookup
16929120d03fa1215f574286072377f10b7e089b
[ "MIT" ]
null
null
null
spacy_lookup/about.py
BasileDjigg/spacy-lookup
16929120d03fa1215f574286072377f10b7e089b
[ "MIT" ]
null
null
null
__title__ = 'spacy-lookup' __version__ = '0.0.1' __summary__ = 'spaCy pipeline component for Named Entity Recognition based on dictionaries.' __url__ = 'https://github.com/mpuig/spacy-lookup' __author__ = 'Marc Puig' __email__ = 'marc.puig@gmail.com' __license__ = 'MIT'
33.875
92
0.756458
35
271
5.057143
0.8
0.124294
0
0
0
0
0
0
0
0
0
0.0125
0.114391
271
7
93
38.714286
0.725
0
0
0
0
0
0.594096
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
6bf66439de1fb3ae4352db93cc562648f32d838f
751
py
Python
leetcode/p690.py
mythnc/lab
9f69482a063e3cfce2ce8832c2ef1425658c31b9
[ "MIT" ]
null
null
null
leetcode/p690.py
mythnc/lab
9f69482a063e3cfce2ce8832c2ef1425658c31b9
[ "MIT" ]
null
null
null
leetcode/p690.py
mythnc/lab
9f69482a063e3cfce2ce8832c2ef1425658c31b9
[ "MIT" ]
null
null
null
# https://leetcode.com/problems/employee-importance/ """ # Definition for Employee. class Employee: def __init__(self, id: int, importance: int, subordinates: List[int]): self.id = id self.importance = importance self.subordinates = subordinates """ class Solution: def getImportance(self, employees: List['Employee'], id: int) -> int: table = {} for e in employees: table[e.id] = e result = table[id].importance q = [] q.append(table[id].subordinates) while len(q) > 0: ids = q.pop() for id_ in ids: result += table[id_].importance q.append(table[id_].subordinates) return result
28.884615
74
0.559254
82
751
5.036585
0.378049
0.067797
0.062954
0.11138
0.239709
0
0
0
0
0
0
0.001961
0.320905
751
25
75
30.04
0.807843
0.356858
0
0
0
0
0.016842
0
0
0
0
0
0
1
0.071429
false
0
0.214286
0
0.428571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6bf727625ea059c4f0a7f91f758f96269dcdd254
3,916
py
Python
python/ranking/examples/fess/train_model.py
codelibs/logana
48b475e9fd5224821bfba7d41e755d8d64806651
[ "Apache-2.0" ]
2
2020-09-30T12:42:28.000Z
2020-11-04T01:34:20.000Z
python/ranking/examples/fess/train_model.py
codelibs/logana
48b475e9fd5224821bfba7d41e755d8d64806651
[ "Apache-2.0" ]
null
null
null
python/ranking/examples/fess/train_model.py
codelibs/logana
48b475e9fd5224821bfba7d41e755d8d64806651
[ "Apache-2.0" ]
null
null
null
import dataclasses import datetime import gzip import json import logging import os from typing import Any, Dict import numpy as np import tensorflow as tf from absl import flags from loganary.ranking.common import NumpyJsonEncoder, setup_logging, setup_seed from loganary.ranking.model import ( RankingModel, RankingModelConfig, RankingModelEmbeddingField, RankingModelField, ) flags.DEFINE_string("train_path", None, "Path of .tfrecords file for training.") flags.DEFINE_string("eval_path", None, "Path of .tfrecords file for evaluation.") flags.DEFINE_string("keyword_path", None, "Path of vocabulary file for keyword field.") flags.DEFINE_string("title_path", None, "Path of vocabulary file for title field.") flags.DEFINE_string("model_path", None, "Path of trained model files.") flags.DEFINE_integer("num_train_steps", 15000, "The number of train steps.") flags.DEFINE_list("hidden_layer_dims", ["64", "32", "16"], "Sizes for hidden layers.") flags.DEFINE_integer( "keyword_embedding_dim", 20, "Dimention of an embedding for keyword field." ) flags.DEFINE_integer( "title_embedding_dim", 20, "Dimention of an embedding for title field." ) flags.DEFINE_integer("batch_size", 32, "Batch size.") flags.DEFINE_integer("list_size", 100, "List size.") flags.DEFINE_float("learning_rate", 0.05, "Learning rate.") flags.DEFINE_integer("group_size", 10, "Group size.") flags.DEFINE_float("dropout_rate", 0.8, "Dropout rate.") flags.DEFINE_bool("verbose", False, "Set a logging level as debug.") FLAGS = flags.FLAGS logger = logging.getLogger(__name__) def main(_) -> None: setup_seed() setup_logging(FLAGS.verbose) now_str = datetime.datetime.now().strftime("%Y%m%d%H%M") model_path: str = f"{FLAGS.model_path}/{now_str}" config: RankingModelConfig = RankingModelConfig( model_path=model_path, train_path=FLAGS.train_path, eval_path=FLAGS.eval_path, context_fields=[ RankingModelEmbeddingField( name="keyword", vocabulary_file=FLAGS.keyword_path, dimension=FLAGS.keyword_embedding_dim, ), ], example_fields=[ RankingModelEmbeddingField( name="title", vocabulary_file=FLAGS.title_path, dimension=FLAGS.title_embedding_dim, ), ], label_field=RankingModelField( name="relevance", column_type="numeric", default_value=-1, ), num_train_steps=FLAGS.num_train_steps, hidden_layer_dims=FLAGS.hidden_layer_dims, batch_size=FLAGS.batch_size, list_size=FLAGS.list_size, learning_rate=FLAGS.learning_rate, group_size=FLAGS.group_size, dropout_rate=FLAGS.dropout_rate, ) logger.info(f"Config: {config}") model: RankingModel = RankingModel(config) result = model.train() logger.info(f"Result: {result}") export_model_path: str = model.save_model() saved_model_path: str = f"{model_path}/saved_model" os.rename(export_model_path, saved_model_path) logger.info(f"Output Model Path: {saved_model_path}") with gzip.open(f"{model_path}/result.json.gz", mode="wt", encoding="utf-8") as f: config_dict: Dict[str, Any] = dataclasses.asdict(config) del config_dict["eval_metric"] f.write( json.dumps( { "config": config_dict, "result": result, }, ensure_ascii=False, cls=NumpyJsonEncoder, ) ) if __name__ == "__main__": flags.mark_flag_as_required("train_path") flags.mark_flag_as_required("eval_path") flags.mark_flag_as_required("keyword_path") flags.mark_flag_as_required("title_path") flags.mark_flag_as_required("model_path") tf.compat.v1.app.run()
34.350877
87
0.671348
484
3,916
5.163223
0.293388
0.066026
0.043217
0.028011
0.186074
0.123249
0.080032
0.031212
0
0
0
0.009785
0.217058
3,916
113
88
34.654867
0.805284
0
0
0.088235
0
0
0.223442
0.025536
0
0
0
0
0
1
0.009804
false
0
0.117647
0
0.127451
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
6bffc6fb525922ad40eb7287ee43188f6f55ec06
615
py
Python
shortner/migrations/0005_auto_20200407_1237.py
OakNinja/shortner
8b051f4d74dc70bccd62e983ce300f8115cc72c8
[ "BSD-3-Clause" ]
1
2020-04-29T15:02:14.000Z
2020-04-29T15:02:14.000Z
shortner/migrations/0005_auto_20200407_1237.py
OakNinja/shortner
8b051f4d74dc70bccd62e983ce300f8115cc72c8
[ "BSD-3-Clause" ]
null
null
null
shortner/migrations/0005_auto_20200407_1237.py
OakNinja/shortner
8b051f4d74dc70bccd62e983ce300f8115cc72c8
[ "BSD-3-Clause" ]
1
2020-04-29T07:05:00.000Z
2020-04-29T07:05:00.000Z
# Generated by Django 3.0.5 on 2020-04-07 12:37 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('shortner', '0004_auto_20200407_0905'), ] operations = [ migrations.AlterField( model_name='entry', name='creator', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='entries', to=settings.AUTH_USER_MODEL), ), ]
27.954545
145
0.682927
73
615
5.60274
0.643836
0.05868
0.06846
0.107579
0
0
0
0
0
0
0
0.063786
0.209756
615
21
146
29.285714
0.777778
0.073171
0
0
1
0
0.088028
0.040493
0
0
0
0
0
1
0
false
0
0.2
0
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
d40116f3963560e5bc86c79d514d9d85b3020138
22,630
py
Python
build-fortress.py
DFIRmadness/infosec-fortress
cc20c5c5ecf5194fdd270e7accdf927b71ed2952
[ "MIT" ]
33
2021-06-22T01:42:06.000Z
2022-03-27T14:41:44.000Z
build-fortress.py
ED-209-MK7/infosec-fortress
cc20c5c5ecf5194fdd270e7accdf927b71ed2952
[ "MIT" ]
1
2021-06-24T09:10:03.000Z
2021-06-28T13:25:59.000Z
build-fortress.py
ED-209-MK7/infosec-fortress
cc20c5c5ecf5194fdd270e7accdf927b71ed2952
[ "MIT" ]
5
2021-06-23T08:04:22.000Z
2022-03-27T14:41:45.000Z
#!/bin/python3 ''' Title: build-fortress.py Purpose: Build the infosec-fortress Author: James Smith (DFIRmadness) Contributors: Check the github page. Notes: Beta Version: 0.5 Usage: ./build-fortress.py Functions: + apt update + dist upgrade + install base packages + create /opt/infosec-fortress + start log + install starter packages (min. pkgs to let script run) + install the REMnux Distribution + install SIFT + install base security packages + install Metasploit Framework + install wordlists + install and update exploitdb (searchsploit) + log2Timeline + elasticsearch containers + powershell Core (turns out its part of REMnux) + install impacket + install enum4linux + enum4linux https://github.com/cddmp/enum4linux-ng + display message about updating ZAP and Burp after reboot ''' # Globals PKG_MGR = 'apt' FORTRESS_DIR = '/opt/infosec-fortress/' BUILD_LOG = 'build-fortress.log' LOG = FORTRESS_DIR + BUILD_LOG # Minimal Package list to get started starterPackagesList = [ 'net-tools', 'curl', 'git' ] # List of packages to have APT install. Change if you want. You break it you buy it. aptPackageList = [ 'tmux', 'torbrowser-launcher', 'nmap', 'smbclient', 'locate', 'radare2-cutter', 'snort', 'dirb', 'gobuster', 'medusa', 'masscan', 'whois', 'libjenkins-htmlunit-core-js-java', 'autopsy', 'hashcat', 'kismet', 'kismet-plugins', 'airgraph-ng', 'wifite', 'dnsenum', 'dnsmap', 'ettercap-common', 'ettercap-graphical', 'netdiscover', 'sqsh', 'install nfs-common' ] # List of packages to have SNAP install. Change if you want. You break it you buy it. snapPackageList = [ 'chromium', 'sqlmap', 'john-the-ripper' ] # Snaps that need --classic # Avoid these. It's better to scrape a git for the latest and install. Zaproxy is a great example. snapClassicPackageList =[ #'zaproxy' ] ######################################################## # Colors GREEN = '\033[32m' RED = '\033[31m' YELLOW = '\033[33m' NOCOLOR = '\033[m' from datetime import datetime from getpass import getpass from hashlib import sha1 from os import geteuid,path,makedirs from os.path import expanduser from subprocess import run from urllib.request import urlopen from requests import get from re import search # Check that the user is root def checkIfRoot(): if geteuid() != 0: print(RED + '[!] You need sudo/root permissions to run this... exiting.' + NOCOLOR) exit(0) # Check for internet connection def checkForInternet(): try: check = urlopen('https://www.google.com', timeout=3.0) print(GREEN +'[+] Internet connection looks good!' + NOCOLOR) except: print(RED + '[-] Internet connection looks down. You will need internet for this to run (most likely). Fix and try again.' + NOCOLOR) exit(1) def initNotice(): print('[!] This script requires user input once or twice.\n\ [!] It is not completely "Set and Forget".') nullInput = input('Hit Enter.') # Get starting Disk Room def freeSpaceStart(): # Needs Regex Impovement with RE Search. Non Gig sized systems will break this. global FREE_SPACE_START_INT freeSpaceStart = run(['df -h /'],shell=True,capture_output=True).stdout.decode().split('G')[2].strip() writeToLog('[i] Gigs of Free Space on / at the Start of the build: ' + freeSpaceStart + 'G') FREE_SPACE_START_INT = float(freeSpaceStart) return(FREE_SPACE_START_INT) def freeSpaceEnd(): # Needs Regex Impovement with RE Search. Non Gig sized systems will break this. freeSpaceEnd = run(['df -h /'],shell=True,capture_output=True).stdout.decode().split('G')[2].strip() writeToLog('[i] Gigs of Free Space on / at the Start of the build: ' + freeSpaceEnd + 'G') freeSpaceEndInt = float(freeSpaceEnd) spaceUsed = FREE_SPACE_START_INT - freeSpaceEndInt writeToLog('[i] Gigs of Space used for InfoSec-Fortress Buildout: ' + str(spaceUsed) + 'G') # Check/Inform about for unattended upgrade def informAboutUnattendedUpgade(): print('[!][!][!][!][!][!][!][!]\nUnattended Upgades firing while this script is running will break it.\ \nKill or complete the upgrades if you recently booted or rebooted. Then continue.\ \nIT MAY REQUIRE A REBOOT! If so, kill this script. Reboot. Run the updates. Run this script again.') nullInput = input('Hit any key to continue.') def createFortressDir(FORTRESS_DIR): print('[*] Creating InfoSec Fortress Dir at:',FORTRESS_DIR) try: makedirs(FORTRESS_DIR, exist_ok=True) except FileExistsError: print('[i] ' + FORTRESS_DIR + ' already exists. Continuing.') except Exception as e: print('[-] Error creating the ' + FORTRESS_DIR + '. Error ' + str(e)) def startLogFile(): try: now = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ") if not path.isfile(LOG): with open(LOG, 'a') as log: log.write(now + " - Log Started.\n") return('Succeeded') else: with open(LOG, 'a') as log: log.write(now + " - Log Started. Strange, the log file appears to exist already? Continuing anyways.\n") return('Succeeded') except: return('Failed') # For now just simply exit here exit(1) def writeToLog(stringToLog): now = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ") with open(LOG, 'a') as log: log.write(now + " - " + stringToLog + '\n') if '[+]' in stringToLog: print('\n' + GREEN + stringToLog + NOCOLOR + '\n----------------------------------------------------------\n') elif '[-]' in stringToLog: print('\n' + RED + stringToLog + NOCOLOR + '\n----------------------------------------------------------\n') elif '[i]' in stringToLog + NOCOLOR: print('\n' + YELLOW + stringToLog + NOCOLOR + '\n----------------------------------------------------------\n') else: print('\n' + stringToLog + '\n----------------------------------------------------------\n') def buildStarterPackageList(): listOfPackagesCommand = '' for package in starterPackagesList: listOfPackagesCommand = (listOfPackagesCommand + ' ' + package).strip() return(listOfPackagesCommand) def buildAptPackageList(): listOfPackagesCommand = '' for package in aptPackageList: listOfPackagesCommand = (listOfPackagesCommand + ' ' + package).strip() return(listOfPackagesCommand) def buildSnapPackageList(): listOfPackagesCommand = '' for package in snapPackageList: listOfPackagesCommand = (listOfPackagesCommand + ' ' + package).strip() return(listOfPackagesCommand) def buildSnapClassicPackagesList(): listOfPackagesCommand = '' for package in snapClassicPackageList: listOfPackagesCommand = (listOfPackagesCommand + ' ' + package).strip() return(listOfPackagesCommand) # apt update def updateOS(): #writeToLog('[+] Beginning OS updates...') try: run(['/usr/bin/apt','update']) except Exception as e: writeToLog('[-] APT Updating failed. Fix and try again. Error:',str(e)) exit(1) try: run(['/usr/bin/apt','upgrade','-y']) except Exception as e: writeToLog('[-] APT Updating failed. Fix and try again. Error:',str(e)) exit(1) try: run(['/usr/bin/apt','dist-upgrade','-y']) except Exception as e: writeToLog('[-] APT Updating failed. Fix and try again. Error:',str(e)) exit(1) # Minimal packages def installStarterPackages(): starterPackages = buildStarterPackageList() writeToLog('[*] Attempting installation of the following starter packages: ' + starterPackages) try: run(['/usr/bin/apt install -y ' + starterPackages],shell=True) writeToLog('[+] Starter Packages installed.') except Exception as e: writeToLog('[-] Starter Packages installation failed:',str(e)) # the REMnux Distribution def installREMnux(): writeToLog('[+] Installing REMnux. This will take quite awhile. Verify the hash from the site later.') try: run(['/usr/bin/wget https://REMnux.org/remnux-cli'],shell=True) run(['/usr/bin/mv remnux-cli remnux'],shell=True) run(['/usr/bin/chmod +x remnux'],shell=True) run(['/usr/bin/mv remnux /usr/local/bin'],shell=True) run(['/usr/local/bin/remnux install --mode=addon'],shell=True) writeToLog('[+] REMnux Added On (downloaded and ran).') except Exception as e: writeToLog('[-] Something went wrong during the REMnux install. Error: ' + str(e)) # Install SIFT def installSIFTPackages(): writeToLog('[*] Finding latest SIFT Release.') try: latestLinkPage = get('https://github.com/sans-dfir/sift-cli/releases/latest').text.splitlines() latestSIFTBinLine = [match for match in latestLinkPage if "sift-cli-linux" in match][0].split('"')[1] latestSIFTBin = 'https://github.com/' + latestSIFTBinLine #latestSIFTBin = search('https:.*sift-cli-linux',latestSIFTBinLine)[0] writeToLog('[+] latest SIFT BIN: ' + latestSIFTBin) except Exception as e: writeToLog('[-] latest SIFT Bin not found. Error: ' + str(e)) return writeToLog('[*] Installing SIFT Packages.') try: run(['/usr/bin/curl -Lo /usr/local/bin/sift ' + latestSIFTBin],shell=True) run(['/usr/bin/chmod +x /usr/local/bin/sift'],shell=True) run(['/usr/local/bin/sift install --mode=packages-only'],shell=True) writeToLog('[+] SIFT Packages installed (downloaded and ran).') except Exception as e: writeToLog('[-] Installation of SIFT Packages had an error. Error: '+str(e)) # install base packages def installAPTandSNAPPackages(): print('[i] If Wireshark asks - say YES non-super users can capture packets.\n\n\ [i] When snort asks about a monitoring interface enter lo.\n\ [i] Setting the interface to "lo" (no quotes) sets it for local use.\n\ [i] Set any private network for the "home" network.\n\n\ [i] KISMET - Say YES to the sticky bit. Add your username to the Kismet Goup at the prompt.') nullInput = input('Hit Enter.') aptPackages = buildAptPackageList() snapPackages = buildSnapPackageList() snapClassicPackages = buildSnapClassicPackagesList() writeToLog('[*] Attempting installation of the following ATP packages: ' + aptPackages) try: run(['/usr/bin/apt install -y ' + aptPackages],shell=True) writeToLog('[+] APT Packages installed.') except Exception as e: writeToLog('[-] APT Packages installation failed:',str(e)) writeToLog('[*] Attempting installation of the following Snap Packages: ' + snapPackages) try: run(['/usr/bin/snap install ' + snapPackages],shell=True) writeToLog('[+] Snap Packages installed.') except Exception as e: writeToLog('[-] Snap packages installation failed:',str(e)) if len(snapClassicPackages) == 0: writeToLog('[*] No snap classics to install.') return writeToLog('[*] Attempting installation of the following Snap Classic Packages: ' + snapClassicPackages) for package in snapClassicPackageList: try: run(['/usr/bin/snap install --classic ' + package],shell=True) writeToLog('[+] Snap Classic ' + package + ' installed.') except Exception as e: writeToLog('[-] Snap packages ' + package + ' failed:',str(e)) # Swap Netcats # Change out netcat-bsd for netcat-traditional def swapNetcat(): writeToLog('[*] Attempting to trade out netcat-bsd for netcat-traditional') try: run(['/usr/bin/apt purge -y netcat-openbsd'],shell=True) run(['/usr/bin/apt install -y netcat-traditional'],shell=True) writeToLog('[+] netcat-traditional installed.') except Exception as e: writeToLog('[-] Installation of netcat-traditional failed. Error: '+str(e)) # Metasploit Framework def installMSF(): writeToLog('[+] Installing Metasploit Framework.') try: run(['/usr/bin/curl https://raw.githubusercontent.com/rapid7/metasploit-omnibus/master/config/templates/metasploit-framework-wrappers/msfupdate.erb > msfinstall'],shell=True) run(['/usr/bin/chmod 755 msfinstall'],shell=True) run(['./msfinstall'],shell=True) writeToLog('[+] MSF Installed Successfully.') except Exception as e: writeToLog('[-] Something went wrong during the MSF install. Error: ' + str(e)) # Install wordlists # Git clone the default wordlists # Add Rockyou2021 # Add fuzzing list for burp/SQLI (xplatform.txt) def installWordlists(): # Error handling using git in this way (with run) sucks. writeToLog('[*] Installing Wordlists to /usr/share/wordlists') makedirs('/usr/share/wordlists/', exist_ok=True) try: run(['/usr/bin/git clone https://github.com/3ndG4me/KaliLists.git /usr/share/wordlists/'],shell=True) run(['/usr/bin/rm /usr/share/wordlists/README.md'],shell=True) run(['/usr/bin/gunzip /usr/share/wordlists/rockyou.txt.gz'],shell=True) writeToLog('[+] Kali default wordlists added and unpacked.') except Exception as e: writeToLog('[-] There was an error installing Kali default wordlists. Error: ' + str(e)) try: run(['/usr/bin/wget https://raw.githubusercontent.com/fuzzdb-project/fuzzdb/master/attack/sql-injection/detect/xplatform.txt \ -O /usr/share/wordlists/xplatform.txt'],shell=True) writeToLog('[+] Xplatform.txt SQLI Validation list added.') except Exception as e: writeToLog('[-] There was an error adding xplatform.txt. Error: ' + str(e)) #Install exploit-db def installExploitDb(): writeToLog('[*] Installing ExploitDB.') try: run(['/usr/bin/git clone https://github.com/offensive-security/exploitdb.git /opt/exploitdb'],shell=True) run(['/usr/bin/ln -sf /opt/exploitdb/searchsploit /usr/local/bin/searchsploit'],shell=True) writeToLog('[+] Exploit DB Added.') except Exception as e: writeToLog('[-] There was an error installing ExploitDB. Error: ' + str(e)) try: writeToLog('[*] Updating ExploitDB...') run(['/usr/local/bin/searchsploit -u'],shell=True) writeToLog('[+] Exploit DB Updated.') except Exception as e: writeToLog('[-] There was an error updating ExploitDB. Error: ' + str(e)) # elasticsearch containers? # powershell Core # REMnux already installs it. #def installPosh(): # writeToLog('[*] Installing Powershell.') # try: # run(['/usr/bin/apt-get update\ # && /usr/bin/apt-get install -y wget apt-transport-https software-properties-common\ # && /usr/bin/wget -q https://packages.microsoft.com/config/ubuntu/20.04/packages-microsoft-prod.deb\ # && /usr/bin/dpkg -i packages-microsoft-prod.deb\ # && /usr/bin/apt-get update\ # && /usr/bin/add-apt-repository universe\ # && /usr/bin/apt-get install -y powershell'],shell=True) # writeToLog('[+] Powershell installed.') # except Exception as e: # writeToLog('[-] There was an error installing Powershell. Error: ' + str(e)) # Install Impacket def installImpacket(): writeToLog('[*] Installing Impacket.') try: run(['/usr/bin/git clone https://github.com/SecureAuthCorp/impacket.git /opt/impacket'],shell=True) run(['/usr/bin/python3 -m pip install /opt/impacket/.'],shell=True) # It seems that it takes running this twice to get it to complete run(['/usr/bin/python3 -m pip install /opt/impacket/.'],shell=True) writeToLog('[+] Impacket Installed.') except Exception as e: writeToLog('[-] There was an error installing Impacket. Error: ' + str(e)) # enum4Linux def installEnum(): writeToLog('[*] Installing Enum4Linux.') try: run(['/usr/bin/git clone https://github.com/CiscoCXSecurity/enum4linux.git /opt/enum4linux'],shell=True) run(['/usr/bin/ln -sf /opt/enum4linux/enum4linux.pl /usr/local/bin/enum4linux.pl'],shell=True) writeToLog('[+] Enum4Linux Installed.') except Exception as e: writeToLog('[-] There was an error installing Enum4Linux. Error: ' + str(e)) # enum4linux def installEnumNG(): writeToLog('[*] Installing Enum4Linux-ng.') try: run(['/usr/bin/git clone https://github.com/cddmp/enum4linux-ng /opt/enum4linux-ng'],shell=True) run(['/usr/bin/ln -sf /opt/enum4linux-ng/enum4linux-ng.py /usr/local/bin/enum4linux-ng.py'],shell=True) writeToLog('[+] Enum4Linux-ng Installed.') except Exception as e: writeToLog('[-] There was an error installing Enum4Linux-ng. Error: ' + str(e)) # Install WebShells def installWebShells(): writeToLog('[*] Installing Kali\'s Webshells') try: run(['/usr/bin/git clone https://gitlab.com/kalilinux/packages/webshells.git /usr/share/webshells'],shell=True) writeToLog('[+] Kali\'s WebShells Cloned to /usr/share/webshells') except Exception as e: writeToLog('[-] There was an error installing Enum4Linux. Error: ' + str(e)) # Install Windows Resources def installWindowsResources(): writeToLog('[*] Installing Kali\'s Windows Resources') try: run(['/usr/bin/git clone https://gitlab.com/kalilinux/packages/windows-binaries.git /usr/share/windows-resources'],shell=True) writeToLog('[+] Kali\'s Windows Resources Cloned to /usr/share/webshells') except Exception as e: writeToLog('[-] There was an error installing Enum4Linux. Error: ' + str(e)) # Install Bloodhound def installBloodhound(): writeToLog('[*] Finding latest Blood Hound Release.') try: latestLinkPage = get('https://github.com/BloodHoundAD/BloodHound/releases/latest').text.splitlines() latestBloodHoundZip = [match for match in latestLinkPage if "BloodHound-linux-x64.zip" in match][0].split('"')[1] writeToLog('[+] latest Blood Hound Zip at: ' + latestBloodHoundZip) except Exception as e: writeToLog('[-] latest Blood Hound Zip not found. Error: ' + str(e)) return writeToLog('[*] Installing Bloodhound...') try: run(['/usr/bin/curl -Lo /tmp/bloodhound.zip https://github.com' + latestBloodHoundZip],shell=True) run(['/usr/bin/unzip -o /tmp/bloodhound.zip -d /opt/'],shell=True) except Exception as e: writeToLog('[-] Bloodhound not installed. Error: ' + str(e)) # Find and install latest Zaproxy def installZaproxy(): writeToLog('[*] Finding latest Zaproxy Release.') try: latestLinkPage = get('https://github.com/zaproxy/zaproxy/releases/latest').text.splitlines() latestZapDeb = [match for match in latestLinkPage if "_all.deb" in match][0].split('"')[1] writeToLog('[+] latest Zaproxy Zip at: ' + latestZapDeb) except Exception as e: writeToLog('[-] latest Zaproxy Zip not found. Error: ' + str(e)) return writeToLog('[*] Installing Zaproxy...') try: run(['/usr/bin/curl -Lo /tmp/zaproxy.deb ' + latestZapDeb],shell=True) run(['/usr/bin/dpkg -i /tmp/zaproxy.deb'],shell=True) except Exception as e: writeToLog('[-] Zaproxy not installed. Error: ' + str(e)) def installZeek(): # instll Zeek writeToLog('[*] Installing Zeek...') try: run(['/usr/bin/echo \'deb http://download.opensuse.org/repositories/security:/zeek/xUbuntu_20.04/ /\' | sudo tee /etc/apt/sources.list.d/security:zeek.list'],shell=True) run(['/usr/bin/curl -fsSL https://download.opensuse.org/repositories/security:zeek/xUbuntu_20.04/Release.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/security_zeek.gpg > /dev/null'],shell=True) run(['/usr/bin/apt update'],shell=True) run(['/usr/bin/apt -y install zeek'],shell=True) except Exception as e: writeToLog('[-] Zeek not installed. Error: ' + str(e)) # add /opt/zeek/bin to the path permanently try: writeToLog('[i] Writing Zeeks path to the current users bashrc. You may need to manually add: \'export PATH=$PATH:/opt/zeek/bin\' to yours.') run(['/usr/bin/echo "export PATH=$PATH:/opt/zeek/bin" >> ~/.bashrc'],shell=True) run(['export PATH=$PATH:/opt/zeek/bin'],shell=True) except Exception as e: writeToLog('[-] Zeek path not added. Error: ' + str(e)) # display log def displayLog(): print('[*] The following activities were logged:\n') with open(LOG,'r') as log: allLines = log.readlines() for line in allLines: print(line.strip()) # display fortress artwork def displayImage(): try: run(['/usr/bin/curl -Lo ' + FORTRESS_DIR + 'fortress.jpg https://dfirmadness.com/wp-content/uploads/2021/06/infosec-fortress-2500.jpg'],shell=True) run(['/usr/bin/eog ' + FORTRESS_DIR + 'fortress.jpg'],shell=True) run(['/usr/bin/rm ' + FORTRESS_DIR + 'fortress.jpg'],shell=True) except: return # display message about updating ZAP and Burp after reboot def giveUserNextSteps(): print(GREEN + '[+]' + '-----------------------------------------------------------------------------------' + NOCOLOR) print(GREEN + '[+]' + '------------------------ ! Script Complete ! --------------------------------------' + NOCOLOR) print('\n\n[!] REBOOT the system. After Reboot you will want to run Burp, Zap and Ghidra. Each will ask you to update.\ \n You should update these. If they have you download a .deb file you simple run ' + GREEN + 'dpkg -i foo.deb' + NOCOLOR + '.\ \n Don\'t forget to run: \'echo "export PATH=$PATH:/opt/zeek/bin" >> ~/.bashrc\' to add the Zeek bins to your user (non-root) path') nullInput = input('Hit Enter.') # Re-enable unattended upgrade #Only needed if auto kill of unattended upgrades is added def main(): checkIfRoot() checkForInternet() initNotice() informAboutUnattendedUpgade() createFortressDir(FORTRESS_DIR) startLogFile() freeSpaceStart() updateOS() installStarterPackages() installREMnux() installSIFTPackages() installAPTandSNAPPackages() swapNetcat() installMSF() installWordlists() installExploitDb() installImpacket() installEnum() installEnumNG() installWebShells() installWindowsResources() installBloodhound() installZaproxy() installZeek() freeSpaceEnd() displayLog() displayImage() giveUserNextSteps() exit(0) main() if __name__== "__main__": main()
40.996377
210
0.644057
2,677
22,630
5.428838
0.220022
0.021056
0.027868
0.035918
0.369573
0.328287
0.25824
0.1895
0.163628
0.121861
0
0.005682
0.198939
22,630
551
211
41.07078
0.796006
0.136677
0
0.26699
0
0.046117
0.376436
0.050796
0
0
0
0
0
1
0.082524
false
0.002427
0.021845
0
0.116505
0.043689
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d4051d0a2b00e9998b74b852734dd381524320f3
955
py
Python
substance/constants.py
philraj/substance
c68c8343e22fd2ac1e83b7567140c2a20f417984
[ "Apache-2.0" ]
null
null
null
substance/constants.py
philraj/substance
c68c8343e22fd2ac1e83b7567140c2a20f417984
[ "Apache-2.0" ]
null
null
null
substance/constants.py
philraj/substance
c68c8343e22fd2ac1e83b7567140c2a20f417984
[ "Apache-2.0" ]
null
null
null
class Constants(object): class ConstError(TypeError): pass def __init__(self, **kwargs): for name, value in list(kwargs.items()): super(Constants, self).__setattr__(name, value) def __setattr__(self, name, value): if name in self.__dist__: raise self.ConstError("Can't rebind const(%s)" % name) self.__dict__[name] = value def __delattr__(self, name): if name in self.__dict__: raise self.ConstError("Can't unbind const(%s)" % name) raise NameError(name) Tables = Constants( BOXES="boxes" ) DefaultEngineBox = 'turbulent/substance-box:1.0' EngineStates = Constants( RUNNING="running", STOPPED="stopped", SUSPENDED="suspended", UNKNOWN="unknown", INEXISTENT="inexistent" ) Syncher = Constants( UP=">>", DOWN="<<", BOTH="<>" ) Orchestrators = Constants( DOCKWRKR="dockwrkr", COMPOSE="docker-compose" )
20.76087
66
0.618848
102
955
5.519608
0.509804
0.063943
0.042629
0.042629
0.081705
0
0
0
0
0
0
0.00277
0.243979
955
45
67
21.222222
0.777008
0
0
0
0
0
0.150943
0.028302
0
0
0
0
0
1
0.088235
false
0.029412
0
0
0.147059
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d4051f0da6a3085bed81c035499b45d737816f30
12,482
py
Python
scripts/identify_taxonomic_trees.py
AtilioA/wikidata-evaluation-based-on-ontologies
b726cc40a80312e92e7aa42fc24f1eee21bc40be
[ "Unlicense" ]
2
2020-12-06T21:57:36.000Z
2020-12-11T16:07:00.000Z
scripts/identify_taxonomic_trees.py
AtilioA/wikidata-evaluation-based-on-ontologies
b726cc40a80312e92e7aa42fc24f1eee21bc40be
[ "Unlicense" ]
null
null
null
scripts/identify_taxonomic_trees.py
AtilioA/wikidata-evaluation-based-on-ontologies
b726cc40a80312e92e7aa42fc24f1eee21bc40be
[ "Unlicense" ]
null
null
null
import time import json import sys from pathlib import Path from pprint import pprint import wikidata_utils from graphviz import Digraph NL = "\n" def find_subclasses_between(subclass, superclass): # Query Stardog for subclasses subclassesJSON = wikidata_utils.query_subclasses_stardog(superclass, subclass)[ "results" ]["bindings"] subclassesList = [] try: # Parse JSON for results subclassesList = [result["entity"]["value"] for result in subclassesJSON] # Look for QID in all the strings subclassesList = wikidata_utils.regex_match_QID(subclassesList) except: pass print(f"Subclasses between '{subclass}' and '{superclass}':\n{subclassesList}") # print(subclassLabels) try: # Remove superclass from the list (it is included by SPARQL) subclassesList.remove(superclass) except: pass # Return reversed list so we can use it immediately in the right order with graphviz return list(reversed(subclassesList)) def graph_from_superclasses_dict(treesDictFilename, **kwargs): # PROBLEM: Given a dictionary with entities, their superclasses and subclasses, create a "maximal" graph that displays the relation between entities dotsTime = int(time.time()) # Optional argument; if it exists, will include only entities from the ranking rankingEntities = kwargs.get("rankingEntities", None) useRandomColors = kwargs.get("useRandomColors", None) remainingEntities = set(rankingEntities) totalEntities = len(remainingEntities) with open(Path(treesDictFilename), "r+", encoding="utf8") as dictFile: entitiesDict = json.load(dictFile) # Filter out entities without any subclasses in the ranking # Entities of interest here are entities without superclasses or whose superclasses are themselves entitiesDict = dict( filter( lambda x: x[1]["subclasses"] != [] and (x[1]["superclasses"] == [] or [x[0]] == x[1]["superclasses"]), entitiesDict.items(), ) ) keepEntity = "1" keptDict = {} pprint(entitiesDict.keys()) while(len(keepEntity) > 0): if not keptDict: keepEntity = input("What entity to generate graphs for? [Enter] for All: ") else: keepEntity = input("What entity to generate graphs for? [Enter] to leave: ") if keepEntity: kept = entitiesDict.pop(keepEntity) keptDict[keepEntity] = kept else: break print(f"Kept {keepEntity}") if keptDict: entitiesDict = keptDict # Number of entities to be processed print(f"{len(entitiesDict)} superclasses") nodesDict = {} for entity in entitiesDict.items(): # Get label for each main entity entityLabel = wikidata_utils.get_entity_label(entity[0]) nSubclasses = len(entity[1]["subclasses"]) print(f"\nBuilding graph for {entity[0]} ({entityLabel}).") print(f"{entityLabel.capitalize()} has at least {nSubclasses} subclasses from the ranking.\n") # Create graph for each main entity nodesep = "0.1" ranksep = "0.5" if nSubclasses > 50: nodesep = "0.15" ranksep = "1" dot = Digraph( comment=entityLabel, strict=True, encoding="utf8", graph_attr={"nodesep": nodesep, "ranksep": ranksep, "rankdir": "BT"}, ) # Create a bigger node for each main entity dot.node(f"{entityLabel}\n{entity[0]}", fontsize="24") # Add entity QID to nodes' dict nodesDict[entity[0]] = True print( f"{totalEntities - len(remainingEntities)} entities (of {totalEntities}) from the ranking processed so far." ) for subclass in entity[1]["subclasses"]: # Get label for each subclass subclassLabel = wikidata_utils.get_entity_label(subclass) # If label is unavailable, use ID if subclassLabel != "Label unavailable": subclassNodeLabel = f"{subclassLabel}\n{subclass}" else: subclassNodeLabel = subclass print( f'Finding subclasses between "{subclassLabel}" and "{entityLabel}"...' ) # Get random color for nodes and edges argsColor = "#111111" if useRandomColors: argsColor = wikidata_utils.random_color_hex() edgeLabel = None if not nodesDict.get(subclass, False): # Create subclass node dot.node(f"{subclassLabel}\n{subclass}", color=argsColor) # Add subclass QID to nodes' dict nodesDict[subclass] = True # Query intermediary entities between "subclass" and "entity" (returns ordered list) subclassesBetween = find_subclasses_between(subclass, entity[0]) # Default styling for intermediary subclasses subclassNodeArgs = { "shape": "square", "color": "#777777", "fontsize": "10", "fontcolor": "#555555", } # remainingEntitiesLastIteration = {totalEntities - len(remainingEntities)} if rankingEntities: # Filter out subclasses that aren't from the ranking subclassesBetween = { subclass: True for subclass in subclassesBetween if subclass in rankingEntities } print(f"Subclasses between: {subclassesBetween}") # Use no particular styling instead subclassNodeArgs = {} # edgeLabel = "P279+" if subclassesBetween: # Get labels for each subclass in between subclassLabels = [ wikidata_utils.get_entity_label(subclass) for subclass in list(subclassesBetween) ] # Connect "main" subclass to its immediate superclass print( f"(First) Marking {subclassNodeLabel.split(NL)[0]} ({subclassNodeLabel.split(NL)[1]}) as subclass of {subclassLabels[-1]} ({list(subclassesBetween)[-1]})" ) dot.edge( subclassNodeLabel, f"{subclassLabels[-1]}\n{list(subclassesBetween)[-1]}", label=edgeLabel, color=argsColor, arrowhead="o", ) try: remainingEntities.remove(list(subclassesBetween)[-1]) except KeyError: pass for i, subclassBetween in enumerate(subclassesBetween): if not nodesDict.get(subclassBetween, False): # Create node for each subclass dot.node( f"{subclassLabels[i]}\n{subclassBetween}", **subclassNodeArgs, color=argsColor, ) # Add intermediary entity QID to nodes' dict nodesDict[subclassBetween] = True for i, subclassBetween in enumerate(list(subclassesBetween)[:-1]): # Connect each subclass to its immediate superclass # First, check if they should be connected for j, entityAbove in enumerate(list(subclassesBetween)[i:]): checkSubclass = list(subclassesBetween)[i] checkSubclassLabel = subclassLabels[i] if i == 0: checkSubclass = subclass checkSubclassLabel = subclassLabel isSubclass = wikidata_utils.query_subclass_stardog( entityAbove, checkSubclass, transitive=True )["results"]["bindings"][0]["isSubclass0"]["value"] isSubclass = isSubclass.lower() == "true" print( f" (For) Is {checkSubclass} subclass of {entityAbove}? {isSubclass}" ) if isSubclass: print( f" Marking {checkSubclassLabel} ({checkSubclass}) as subclass of {subclassLabels[i + j]} ({entityAbove})" ) dot.edge( f"{checkSubclassLabel}\n{checkSubclass}", f"{subclassLabels[i + j]}\n{entityAbove}", label=edgeLabel, color=argsColor, arrowhead="o", ) try: remainingEntities.remove(checkSubclass) except KeyError: pass try: remainingEntities.remove(entityAbove) except KeyError: pass # if totalEntities - len(remainingEntities) > remainingEntitiesLastIteration: print( f"{totalEntities - len(remainingEntities)} entities (of {totalEntities}) from the ranking processed so far." ) # Connect the topmost superclass to the main superclass, i.e., the entity print( f"(Last) Marking {subclassLabels[0]} as subclass of {entityLabel}" ) dot.edge( f"{subclassLabels[0]}\n{list(subclassesBetween)[0]}", f"{entityLabel}\n{entity[0]}", label=edgeLabel, color=argsColor, arrowhead="o", ) else: # If there are no subclasses in between, connect subclass and entity directly print( f"Joining {subclassNodeLabel.split(NL)[0]} ({subclassNodeLabel.split(NL)[1]}) and {entityLabel} ({entity[0]})" ) dot.edge( subclassNodeLabel, f"{entityLabel}\n{entity[0]}", label=edgeLabel, color=argsColor, arrowhead="o", ) try: remainingEntities.remove(subclass) except KeyError: pass # Not having graphviz properly installed might raise an exception try: if rankingEntities: u = dot.unflatten(stagger=5) # Break graphs into more lines u.render(f"output/dots/dots_{dotsTime}/AP1_{dot.comment}.gv") else: u = dot.unflatten(stagger=5) # Break graphs into more lines u.render( f"output/dots/dots_{dotsTime}/AP1_{dot.comment}_intermediary.gv" ) except: print("\nVerify your Graphviz installation or Digraph args!\n") pass try: remainingEntities.remove(entity[0]) except KeyError: pass print(remainingEntities) def get_ranking_entity_set(rankingFile): entityList = parse_ranking_file(rankingFile) return set(entityList) def parse_ranking_file(rankingFile): lines = rankingFile.readlines() lines = list(map(lambda line: line.strip(), lines)) # Look for the QID in all strings rankEntities = wikidata_utils.regex_match_QID(lines) return rankEntities if __name__ == "__main__": try: fileIn = Path(sys.argv[2]) except: fileIn = Path("output/ranking/AP1_minus_Q23958852_ranking.txt") with open(fileIn, "r") as rankingFile: entities = parse_ranking_file(rankingFile) # entitiesSet = get_ranking_entity_set(rankingFile) # graph_from_superclasses_dict( # "output/AP1_occurrence.json", rankingEntities=entities # ) graph_from_superclasses_dict( "output/AP1_trees.json", rankingEntities=entities )
37.371257
174
0.539337
1,098
12,482
6.068306
0.250455
0.012607
0.010506
0.016809
0.199009
0.153084
0.123668
0.123668
0.10866
0.070839
0
0.011031
0.375421
12,482
333
175
37.483483
0.843638
0.162875
0
0.284483
0
0.008621
0.194041
0.072177
0
0
0
0
0
1
0.017241
false
0.034483
0.030172
0
0.060345
0.077586
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d405da66dfc30f0e0e30d4963515d7ec89e1c3ae
3,620
py
Python
harakiri/model.py
ManjiKR/Harakiri
dba9fd7c63e54cf319549a15795e7a4782df8fd0
[ "Apache-2.0" ]
null
null
null
harakiri/model.py
ManjiKR/Harakiri
dba9fd7c63e54cf319549a15795e7a4782df8fd0
[ "Apache-2.0" ]
null
null
null
harakiri/model.py
ManjiKR/Harakiri
dba9fd7c63e54cf319549a15795e7a4782df8fd0
[ "Apache-2.0" ]
null
null
null
from typing import Any, Iterator, List class SkillInfo: def __init__(self, **data: Any) -> None: self.__command = data["Command"] self.__hit_level = data["Hit level"] self.__damage = data["Damage"] self.__startup_frame = data["Start up frame"] self.__block_frame = data["Block frame"] self.__hit_frame = data["Hit frame"] self.__counter_hit_frame = data["Counter hit frame"] self.__notes = data["Notes"] @property def command(self) -> str: return self.__command @property def hit_level(self) -> str: return self.__hit_level @property def damage(self) -> str: return self.__damage @property def start_up_frame(self) -> str: return self.__startup_frame @property def block_frame(self) -> str: return self.__block_frame @property def hit_frame(self) -> str: return self.__hit_frame @property def counter_hit_frame(self) -> str: return self.__counter_hit_frame @property def notes(self) -> str: return self.__notes class SkillData: def __init__(self, **data: Any) -> None: self.__status = data["status"] self.__info = data["info"] @property def status(self) -> int: return self.__status @property def info(self) -> SkillInfo: return SkillInfo(**self.__info) class AllSkillsData: def __init__(self, **data: Any) -> None: self.__status = data["status"] self.__skill_list = data["skill_list"] @property def status(self) -> int: return self.__status @property def skill_list(self) -> Iterator[SkillInfo]: for skill in self.__skill_list: yield SkillInfo(**skill) class GalleryPost: def __init__(self, **data: Any) -> None: self.__status = data["status"] self.__title = data["content"]["title"] self.__auhor = data["content"]["auhor"] self.__content = data["content"]["content"] @property def status(self) -> int: return self.__status @property def title(self) -> str: return self.__title @property def author(self) -> str: return self.__auhor @property def content(self) -> List[str]: return self.__content class ListsPostInfo: def __init__(self, **data: Any) -> None: self.__id = data["id"] self.__title = data["title"] self.__writer = data["writer"] self.__date = data["date"] self.__recommend = data["recommend"] self.__reply = data["reply"] self.__views = data["views"] @property def id(self) -> int: return self.__id @property def title(self) -> str: return self.__title @property def writer(self) -> str: return self.__writer @property def date(self) -> str: return self.__date @property def recommend(self) -> int: return self.__recommend @property def reply(self) -> int: return self.__reply @property def views(self) -> int: return self.__views class GalleryList: def __init__(self, **data: Any) -> None: self.__status = data["status"] self.__total = data["total"] self.__lists = data["lists"] @property def status(self) -> int: return self.__status @property def total(self) -> int: return self.__total @property def lists(self) -> Iterator[ListsPostInfo]: for post in self.__lists: yield ListsPostInfo(post)
22.911392
60
0.594199
413
3,620
4.842615
0.125908
0.143
0.091
0.1105
0.322
0.2885
0.2635
0.2375
0.2375
0.2375
0
0
0.287017
3,620
157
61
23.057325
0.774893
0
0
0.403361
0
0
0.055249
0
0
0
0
0
0
1
0.268908
false
0
0.008403
0.201681
0.529412
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
d405f66eb96aad36fd4897d6d6488e602b579c70
162
py
Python
eviction_tracker/detainer_warrants/caselink/constants/ids.py
gregziegan/eviction-tracker
4db4bacb9675f985cf2f4a747855491e9036ad28
[ "BSD-3-Clause" ]
5
2021-09-15T08:06:59.000Z
2022-01-26T21:25:50.000Z
eviction_tracker/detainer_warrants/caselink/constants/ids.py
gregziegan/eviction-tracker
4db4bacb9675f985cf2f4a747855491e9036ad28
[ "BSD-3-Clause" ]
18
2022-01-14T17:15:53.000Z
2022-02-14T07:33:53.000Z
eviction_tracker/detainer_warrants/caselink/constants/ids.py
thebritican/eviction-tracker
1e34d509b0410d61de6abd6be521c53951fa038a
[ "BSD-3-Clause" ]
1
2021-09-15T01:46:57.000Z
2021-09-15T01:46:57.000Z
# Frames UPDATE_FRAME = 'update' POSTBACK_FRAME = 'postback' # Login USERNAME_LOGIN_FIELD = 'OPERCODE' PASSWORD_LOGIN_FIELD = 'PASSWD' LOGIN_BUTTON = 'LogInSub'
18
33
0.771605
19
162
6.210526
0.631579
0.169492
0
0
0
0
0
0
0
0
0
0
0.123457
162
8
34
20.25
0.830986
0.074074
0
0
0
0
0.244898
0
0
0
0
0
0
1
0
false
0.2
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
3
d4064fd610eae924f03892b2d599dd0687c7269d
913
py
Python
dist/weewx-4.0.0b5/bin/weewx/junk.py
v0rts/docker-weewx
70b2f252051dfead4fcb74e74662b297831e6342
[ "Apache-2.0" ]
10
2017-01-05T17:30:48.000Z
2021-09-18T15:04:20.000Z
dist/weewx-4.0.0b5/bin/weewx/junk.py
v0rts/docker-weewx
70b2f252051dfead4fcb74e74662b297831e6342
[ "Apache-2.0" ]
2
2019-07-21T10:48:42.000Z
2022-02-16T20:36:45.000Z
dist/weewx-4.0.0b5/bin/weewx/junk.py
v0rts/docker-weewx
70b2f252051dfead4fcb74e74662b297831e6342
[ "Apache-2.0" ]
12
2017-01-05T18:50:30.000Z
2021-10-05T07:35:45.000Z
import weewx class MyTypes(object): def get_value(self, obs_type, record, db_manager): if obs_type == 'dewpoint': if record['usUnits'] == weewx.US: return weewx.wxformulas.dewpointF(record.get('outTemp'), record.get('outHumidity')) elif record['usUnits'] == weewx.METRIC or record['usUnits'] == weewx.METRICWX: return weewx.wxformulas.dewpointC(record.get('outTemp'), record.get('outHumidity')) else: raise ValueError("Unknown unit system %s" % record['usUnits']) else: raise weewx.UnknownType(obs_type) class MyVector(object): def get_aggregate(self, obs_type, timespan, aggregate_type=None, aggregate_interval=None): if obs_type.starts_with('ch'): "something" else: raise weewx.UnknownType(obs_type)
32.607143
99
0.591457
97
913
5.443299
0.463918
0.079545
0.102273
0.083333
0.257576
0.257576
0
0
0
0
0
0
0.292443
913
28
100
32.607143
0.817337
0
0
0.25
0
0
0.11488
0
0
0
0
0
0
1
0.1
false
0
0.05
0
0.35
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0