hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
2e64c50e5e3b01bee5233b429195480822031b6d
1,407
py
Python
examples/dopamine/train.py
jurgisp/xmanager
7c59aed661b93817589bb1904e7d099d26afa62f
[ "Apache-2.0" ]
392
2021-10-07T00:12:17.000Z
2022-03-30T11:06:35.000Z
examples/dopamine/train.py
jurgisp/xmanager
7c59aed661b93817589bb1904e7d099d26afa62f
[ "Apache-2.0" ]
8
2021-10-20T14:59:07.000Z
2022-03-31T10:52:47.000Z
examples/dopamine/train.py
jurgisp/xmanager
7c59aed661b93817589bb1904e7d099d26afa62f
[ "Apache-2.0" ]
11
2021-11-16T13:54:18.000Z
2022-03-30T03:13:50.000Z
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The entry point for running a Dopamine agent.""" import os from absl import app from absl import flags from absl import logging from dopamine.discrete_domains import run_experiment import tensorflow as tf FLAGS = flags.FLAGS flags.DEFINE_multi_string( 'gin_files', [], 'List of paths to gin configuration files (e.g.' '"dopamine/agents/dqn/dqn.gin").') # When using Vertex Tensorboard, the tensorboard will be present as a # environment variable. BASE_DIR = os.environ.get('AIP_TENSORBOARD_LOG_DIR', '/tmp/dopamine_runs') def main(unused_argv): logging.set_verbosity(logging.INFO) tf.compat.v1.disable_v2_behavior() run_experiment.load_gin_configs(FLAGS.gin_files, []) runner = run_experiment.create_runner(BASE_DIR) runner.run_experiment() if __name__ == '__main__': app.run(main)
31.977273
74
0.766169
211
1,407
4.966825
0.611374
0.057252
0.040076
0.030534
0
0
0
0
0
0
0
0.00834
0.147832
1,407
43
75
32.72093
0.865721
0.500355
0
0
0
0
0.197657
0.079063
0
0
0
0
0
1
0.052632
false
0
0.315789
0
0.368421
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
2e651a2fe83d07fb048afcf923ef6a66a310e429
2,814
py
Python
APS.py
PabloGramos/APS
a7825628f8ce7ef46da413948c40d03c8118717e
[ "MIT" ]
null
null
null
APS.py
PabloGramos/APS
a7825628f8ce7ef46da413948c40d03c8118717e
[ "MIT" ]
null
null
null
APS.py
PabloGramos/APS
a7825628f8ce7ef46da413948c40d03c8118717e
[ "MIT" ]
null
null
null
def soma(): r=1 while r>0: n1 = int(input("Valor: ")) n2 = int(input(f"{n1} + ")) soma = n1 + n2 print(f"\n{n1} + {n2} = {soma}\n") r=int(input("1-Continuar 0-Sair: ")) if r > 1: print("Opção Inválida!....Saindo") r=0 def sub(): r = 1 while r > 0: n1 = int(input("Valor: ")) n2 = int(input(f"{n1} - ")) sub = n1 - n2 print(f"\n{n1} - {n2} = {sub}\n") r = int(input("1-Continuar 0-Sair: ")) if r > 1: print("Opção Inválida!....Saindo") r = 0 def mult(): r = 1 while r > 0: n1 = int(input("Valor: ")) n2 = int(input(f"{n1} X ")) mult = n1 * n2 print(f"\n{n1} X {n2} = {mult}\n") r = int(input("1-Continuar 0-Sair: ")) if r > 1: print("Opção Inválida!....Saindo") r = 0 def div(): r = 1 while r > 0: n1 = int(input("Valor: ")) n2 = int(input(f"{n1}/ ")) if n2 == 0: print("Não existe divisão por 0! ") break div = n1 / n2 print("\n{} / {} = {:.2f}\n".format(n1, n2, div)) r = int(input("1-Continuar 0-Sair: ")) if r > 1: print("Opção Inválida!....Saindo") r = 0 def raiz(): import math r = 1 while r > 0: n = int(input("Digite o valor: ")) if n < 0: print("Não existe raiz de números negativos!") break raiz = math.sqrt(n) print("\nRaiz de {} = {:.2f}\n".format(n, raiz)) r = int(input("1-Continuar 0-Sair: ")) if r > 1: print("Opção Inválida!....Saindo") r = 0 def sct(): import math r = 1 while r > 0: n = float(input("Digite o valor: ")) seno = math.sin(math.radians(n)) cosseno = math.cos(math.radians(n)) tang = math.tan(math.radians(n)) print("\nO valor {} possui Seno = {:.2f}, Cosseno = {:.2f} e Tangente = {:.2f}\n".format(n, seno, cosseno, tang)) r = int(input("1-Continuar 0-Sair: ")) if r > 1: print("Opção Inválida!....Saindo") r = 0 print(""" -------------CALCULADORA-------------- -----------Pablo--Vinícius------------ """) corpo=True while corpo == True: print(""" MENU 1 - Soma 2 - Subtração 3 - Multiplicação 4 - Divisão 5 - Raiz Quadrada 6 - Seno, Cosseno, Tangente 0 - Sair """) op = int(input("Escolha a operação: ")) if op == 1: soma() elif op == 2: sub() elif op == 3: mult() elif op == 4: div() elif op == 5: raiz() elif op == 6: sct() elif op == 0: break else: print("Opção inválida!")
26.299065
121
0.429638
367
2,814
3.294278
0.201635
0.105873
0.104218
0.039702
0.473945
0.473945
0.463193
0.438379
0.405294
0.405294
0
0.049743
0.378465
2,814
107
122
26.299065
0.641509
0
0
0.457944
0
0.009346
0.314387
0.026998
0
0
0
0
0
1
0.056075
false
0
0.018692
0
0.074766
0.158879
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
2e673f4da5b6854fbcd837a5389934a4ea9a7719
1,452
py
Python
day_2.py
kurtrm/advent_of_code_2018
a3db9df31de730479c8c2b3f2869b274a0e024e9
[ "MIT" ]
null
null
null
day_2.py
kurtrm/advent_of_code_2018
a3db9df31de730479c8c2b3f2869b274a0e024e9
[ "MIT" ]
null
null
null
day_2.py
kurtrm/advent_of_code_2018
a3db9df31de730479c8c2b3f2869b274a0e024e9
[ "MIT" ]
null
null
null
""" """ from collections import Counter from read_file import read_input def checksum(filename): """ """ threes = 0 twos = 0 ids = read_input(filename) for label_id in ids: counts = {value: key for key, value in Counter(label_id).items()} try: counts[2] twos += 1 except KeyError: pass try: counts[3] threes += 1 except KeyError: pass return threes * twos def differ(): """ """ ids = read_input('input_2.txt') # correct_ids = [] # for label_id in ids: # counts = {value: key for key, value in Counter(label_id).items()} # try: # counts[2] # correct_ids.append(label_id) # except KeyError: # pass # try: # counts[3] # if correct_ids[-1] == label_id: # continue # correct_ids.append(label_id) # except KeyError: # pass for idx, first_label in enumerate(ids, 1): for second_label in ids[idx:]: total = 0 for i, _ in enumerate(second_label): if first_label[i] != second_label[i]: total += 1 if total == 1: return first_label, second_label if __name__ == '__main__': print(differ())
23.419355
76
0.472452
155
1,452
4.219355
0.290323
0.074924
0.110092
0.036697
0.400612
0.400612
0.342508
0.342508
0.217125
0.217125
0
0.016867
0.428375
1,452
61
77
23.803279
0.771084
0.250689
0
0.193548
0
0
0.019348
0
0
0
0
0
0
1
0.064516
false
0.064516
0.064516
0
0.193548
0.032258
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
2e675938f3d093c0365d3aa398c262cefa7433e0
3,357
py
Python
home-assistant-backup.py
scaarup/home-assistant-backup
1310054ebd41550292d45329411500cb08b369a1
[ "MIT" ]
null
null
null
home-assistant-backup.py
scaarup/home-assistant-backup
1310054ebd41550292d45329411500cb08b369a1
[ "MIT" ]
null
null
null
home-assistant-backup.py
scaarup/home-assistant-backup
1310054ebd41550292d45329411500cb08b369a1
[ "MIT" ]
null
null
null
#!/usr/bin/env python # Created by Søren Christian Aarup, sc@aarup.org # https://github.com/scaarup/home-assistant-backup # api ref.: https://developers.home-assistant.io/docs/api/supervisor/endpoints import requests,json,datetime,gzip,sys,datetime from datetime import timedelta, date token = 'Bearer <token>' host = '<url>' retention = 12 # In days, how many backups do you want to keep on Home Assistant (normally in /backup). backupname = 'hassio_backup_full-' date_string = datetime.datetime.now().strftime('%Y%m%d') _d = date.today() - timedelta(retention) oldestbackup = backupname+_d.strftime('%Y%m%d')+'.tar.gz' name = backupname+date_string+'.tar.gz' debug = 1 def debuglog(msg): if debug == 1: print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+' DEBUG: '+msg) def log(msg): print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+' INFO: '+msg) # Ping Supervisor, quit if fail: response = requests.get(host+'/api/hassio/supervisor/ping', headers={'authorization': token}) json_response = response.json() if not json_response['result'] == 'ok': log('Supervisor not responding ok to our ping! '+str(response.status_code)+' '+str(response.content)) sys.exit(1) ## def listBackups(name): debuglog('Looping through backups on HA, looking for '+name) response = requests.get( host+'/api/hassio/backups', headers={'authorization': token} ) json_response = response.json() backups = json_response['data']['backups'] for backup in backups: debuglog('\t'+backup['name']+' '+backup['slug']) if (backup['name'] == name): debuglog('Found our backup on HA:') return backup['slug'] def createBackupFull(name): debuglog('Creating backup '+name) response = requests.post( host+'/api/hassio/backups/new/full', json={'name': name}, headers={'authorization': token,'content-type': 'application/json'} ) debuglog(str(response.status_code)+' '+str(response.content)) json_response = response.json() debuglog('Create backup response: '+json_response['result']) return json_response['data']['slug'] def removeBackup(name,slug): debuglog('Removing backup '+name+' on server') response = requests.delete( host+'/api/hassio/backups/'+slug, headers={'authorization': token, 'content-type': 'application/json'} ) debuglog(str(response.status_code)+' '+str(response.content)) json_response = response.json() def getBackup(name,slug): log('Downloading backup '+name) response = requests.get( host+'/api/hassio/backups/'+slug+'/download', headers={'authorization': token} ) output = gzip.open(name, 'wb') # try: output.write(response.content) # finally: output.close() if response.status_code == 200: debuglog('Download ok') else: debuglog('Download response '+str(response.status_code)+' '+str(response.content)) # Create the backup, get the slug: slug = createBackupFull(name) # Download the backup: getBackup(name,slug) # Remove our oldest backup, according to retention slug = listBackups(oldestbackup) if slug is not None: debuglog('Calling removeBackup for '+oldestbackup+' with slug '+slug) removeBackup(name,slug) else: debuglog('Did not find a backup to delete.')
34.96875
105
0.670539
422
3,357
5.2891
0.329384
0.043011
0.029122
0.019713
0.301075
0.28853
0.274194
0.1819
0.143369
0.143369
0
0.002874
0.170688
3,357
95
106
35.336842
0.798851
0.129282
0
0.16
0
0
0.243643
0.0189
0
0
0
0
0
1
0.08
false
0
0.026667
0
0.133333
0.026667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
2e6cd67c9f0d05ca91537b7f522e588f70c9a9c4
1,319
py
Python
src/features/build_features.py
mikolajsacha/tweetsclassification
33756cf6877f9cec328f08a3c728b26bf123bc8f
[ "MIT" ]
4
2016-11-22T11:26:06.000Z
2017-02-22T12:56:45.000Z
src/features/build_features.py
mikolajsacha/tweetsclassification
33756cf6877f9cec328f08a3c728b26bf123bc8f
[ "MIT" ]
26
2016-11-08T20:04:37.000Z
2017-02-18T13:51:39.000Z
src/features/build_features.py
mikolajsacha/tweetsclassification
33756cf6877f9cec328f08a3c728b26bf123bc8f
[ "MIT" ]
null
null
null
""" Contains class FeatureBuilder for building feature set from given data set and word embedding """ import numpy as np class FeatureBuilder(object): """ Class used for building feature matrix. Field "labels" is a list of categories of sentences Field "features" is a features matrix of shape (training set sixe, vector_length) """ def __init__(self): self.labels = np.empty(0, dtype=np.uint8) self.features = np.empty(0, dtype=float) self.labels.flags.writeable = False self.features.flags.writeable = False def build(self, sentence_embedding, labels, sentences): """ :param sentence_embedding: instance of sentence embedding class implementing ISentenceEmbedding interface :param labels: a numpy vector of labels of sentences :param sentences: a numpy matrix of sentences (rows = sentences, columns = words) """ self.labels = labels sentences_vectors_length = sentence_embedding.target_vector_length self.features = np.empty((sentences.shape[0], sentences_vectors_length), dtype=float) for i in xrange(sentences.shape[0]): self.features[i] = sentence_embedding[sentences[i]] self.labels.flags.writeable = False self.features.flags.writeable = False
36.638889
113
0.690675
162
1,319
5.530864
0.37037
0.066964
0.084821
0.029018
0.133929
0.133929
0.133929
0.133929
0.133929
0.133929
0
0.004892
0.225171
1,319
35
114
37.685714
0.87182
0.38514
0
0.266667
0
0
0
0
0
0
0
0
0
1
0.133333
false
0
0.066667
0
0.266667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
2e6d28bc002be9af0e517b72024d00a394efa949
1,624
py
Python
json/conanfile.py
kapilsh/conan-scripts
31c55397a2d721c80da5dbd6a6c738accfdbb241
[ "MIT" ]
null
null
null
json/conanfile.py
kapilsh/conan-scripts
31c55397a2d721c80da5dbd6a6c738accfdbb241
[ "MIT" ]
null
null
null
json/conanfile.py
kapilsh/conan-scripts
31c55397a2d721c80da5dbd6a6c738accfdbb241
[ "MIT" ]
null
null
null
import os from conans import ConanFile from conans.tools import download, check_sha256 class NlohmannJsonConan(ConanFile): name = "json" with open(os.path.join(os.path.dirname(os.path.realpath( __file__)), "VERSION.txt"), 'r') as version_file: version = version_file.read() settings = {} description = "JSON for Modern C++" generators = "cmake", "virtualenv" exports = "VERSION.txt" url = "https://github.com/nlohmann/json" license = "https://github.com/nlohmann/json/blob/v2.1.0/LICENSE.MIT" options = {'no_exceptions': [True, False]} default_options = 'no_exceptions=False' def config(self): self.options.remove("os") self.options.remove("compiler") self.options.remove("shared") self.options.remove("build_type") self.options.remove("arch") def source(self): download_url = 'https://github.com/nlohmann/json/releases/' \ 'download/v{!s}/json.hpp'.format(self.version) download(download_url, 'json.hpp') check_sha256('json.hpp', 'a571dee92515b685784fd527e38405cf3f5e13e96edbfe3f03d6df2e' '363a767b') def build(self): return # Nothing to do. Header Only def package(self): self.copy(pattern='json.hpp', dst='include/nlohmann', src=".") def package_info(self): if self.options.no_exceptions: self.cpp_info.defines.append('JSON_NOEXCEPTION=1') self.cpp_info.includedirs = ['include'] self.env_info.CPATH.append("{}/include".format(self.package_folder))
33.142857
79
0.633621
187
1,624
5.390374
0.481283
0.065476
0.084325
0.065476
0.083333
0.05754
0
0
0
0
0
0.039075
0.227833
1,624
48
80
33.833333
0.764753
0.01601
0
0
0
0.026316
0.260652
0.049499
0
0
0
0
0
1
0.131579
false
0
0.078947
0.026316
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
2e6d68261b931f6d3c99896fa9c575feee129b51
5,958
py
Python
projects/seeker/tasks/dialogue.py
DrMatters/ParlAI
755b9dcb778deb5a82029d69ae3260579c6450f1
[ "MIT" ]
null
null
null
projects/seeker/tasks/dialogue.py
DrMatters/ParlAI
755b9dcb778deb5a82029d69ae3260579c6450f1
[ "MIT" ]
null
null
null
projects/seeker/tasks/dialogue.py
DrMatters/ParlAI
755b9dcb778deb5a82029d69ae3260579c6450f1
[ "MIT" ]
1
2022-01-24T13:22:18.000Z
2022-01-24T13:22:18.000Z
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ SeeKeR Dialogue Tasks. """ from typing import Optional from parlai.core.opt import Opt from parlai.core.params import ParlaiParser from parlai.core.teachers import MultiTaskTeacher import parlai.tasks.convai2.agents as convai2 import parlai.tasks.blended_skill_talk.agents as bst import parlai.tasks.empathetic_dialogues.agents as ed import parlai.tasks.wizard_of_internet.agents as woi import parlai.tasks.wizard_of_wikipedia.agents as wow import parlai.tasks.msc.agents as msc import parlai.tasks.ms_marco.agents as ms_marco import parlai.utils.logging as logging import projects.seeker.tasks.mutators # type: ignore # noqa: F401 class WoiDialogueTeacher(woi.DefaultTeacher): def __init__(self, opt, shared=None): mutators = '+'.join( [ 'flatten', 'woi_pop_documents_mutator', 'woi_filter_no_passage_used', 'woi_add_checked_sentence_to_input', 'skip_retrieval_mutator', ] ) if opt.get('mutators'): mutators = '+'.join([mutators, opt['mutators']]) logging.warning(f'overriding mutators to {mutators}') opt['mutators'] = mutators super().__init__(opt, shared) self.id = "WoiDialogueTeacher" class WowDialogueTeacher(wow.DefaultTeacher): def __init__(self, opt, shared=None): opt['add_missing_turns'] = 'all' mutators = '+'.join( [ 'flatten', 'wow_filter_no_passage_used', 'wow_add_checked_sentence_to_input', 'skip_retrieval_mutator', 'wow_to_woi', 'woi_pop_documents_mutator', ] ) if opt.get('mutators'): mutators = '+'.join([mutators, opt['mutators']]) logging.warning(f'overriding mutators to {mutators}') opt['mutators'] = mutators super().__init__(opt, shared) self.id = "WowDialogueTeacher" class MsMarcoDialogueTeacher(ms_marco.DefaultTeacher): def __init__(self, opt, shared=None): mutators = '+'.join( [ 'ms_marco_filter_has_answer', 'ms_marco_create_fid_docs', 'ms_marco_find_selected_sentence_for_response', 'woi_pop_documents_mutator', 'skip_retrieval_mutator', ] ) if opt.get('mutators'): mutators = '+'.join([mutators, opt['mutators']]) logging.warning(f'overriding mutators to {mutators}') opt['mutators'] = mutators super().__init__(opt, shared) self.id = "MsMarcoDialogueTeacher" def get_dialogue_task_mutators(opt: Opt) -> str: """ Set the mutators appropriately for the dialogue tasks. """ mutators = '+'.join( ['flatten', 'extract_entity_for_response_model', 'skip_retrieval_mutator'] ) if opt.get('mutators'): mutators = '+'.join([mutators, opt['mutators']]) logging.warning(f'overriding mutators to {mutators}') return mutators class Convai2DialogueTeacher(convai2.NormalizedTeacher): def __init__(self, opt, shared=None): opt['mutators'] = get_dialogue_task_mutators(opt) opt['task'] += ':no_cands' super().__init__(opt, shared) self.id = 'Convai2DialogueTeacher' class EDDialogueTeacher(ed.DefaultTeacher): def __init__(self, opt, shared=None): opt['mutators'] = get_dialogue_task_mutators(opt) super().__init__(opt, shared) self.id = 'EDDialogueTeacher' class BSTDialogueTeacher(bst.DefaultTeacher): def __init__(self, opt, shared=None): opt['mutators'] = get_dialogue_task_mutators(opt) super().__init__(opt, shared) self.id = 'BSTDialogueTeacher' class MSCDialogueTeacher(msc.DefaultTeacher): def __init__(self, opt, shared=None): opt['mutators'] = get_dialogue_task_mutators(opt) opt['include_session1'] = False super().__init__(opt, shared) self.id = 'MSCDialogueTeacher' class MSCDialogueOverlapTeacher(msc.DefaultTeacher): def __init__(self, opt, shared=None): opt['mutators'] = '+'.join( ['flatten', 'msc_find_selected_sentence_response', 'skip_retrieval_mutator'] ) opt['include_session1'] = False super().__init__(opt, shared) self.id = 'MSCDialogueOverlapTeacher' class DialogueTeacher(MultiTaskTeacher): @classmethod def add_cmdline_args( cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None ) -> ParlaiParser: WoiDialogueTeacher.add_cmdline_args(parser, partial_opt) WowDialogueTeacher.add_cmdline_args(parser, partial_opt) MsMarcoDialogueTeacher.add_cmdline_args(parser, partial_opt) Convai2DialogueTeacher.add_cmdline_args(parser, partial_opt) EDDialogueTeacher.add_cmdline_args(parser, partial_opt) BSTDialogueTeacher.add_cmdline_args(parser, partial_opt) MSCDialogueTeacher.add_cmdline_args(parser, partial_opt) MSCDialogueOverlapTeacher.add_cmdline_args(parser, partial_opt) return parser def __init__(self, opt, shared=None): tasks = [ f"projects.seeker.tasks.dialogue:{teacher}" for teacher in [ 'WoiDialogueTeacher', 'WowDialogueTeacher', 'MsMarcoDialogueTeacher', 'Convai2DialogueTeacher', 'EDDialogueTeacher', 'BSTDialogueTeacher', 'MSCDialogueTeacher', 'MSCDialogueOverlapTeacher', ] ] opt['task'] = ','.join(tasks) super().__init__(opt, shared) class DefaultTeacher(DialogueTeacher): pass
34.241379
88
0.643337
609
5,958
5.981938
0.228243
0.044469
0.027175
0.034587
0.444688
0.430963
0.343947
0.343947
0.313478
0.286028
0
0.002912
0.250755
5,958
173
89
34.439306
0.813172
0.049513
0
0.375
0
0
0.2126
0.114108
0
0
0
0
0
1
0.080882
false
0.022059
0.095588
0
0.264706
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
2e6d9f56ad67c28ab101dfa720b2f55910ca38c7
350
py
Python
chrispile/util.py
FNNDSC/chrispile
9eb688b17bd3392c23b5cc2a1e11470d78d6029a
[ "MIT" ]
null
null
null
chrispile/util.py
FNNDSC/chrispile
9eb688b17bd3392c23b5cc2a1e11470d78d6029a
[ "MIT" ]
null
null
null
chrispile/util.py
FNNDSC/chrispile
9eb688b17bd3392c23b5cc2a1e11470d78d6029a
[ "MIT" ]
null
null
null
import abc from argparse import ArgumentParser, Namespace from .config import get_config class CommandProvider(abc.ABC): def __init__(self, parser: ArgumentParser): self.config = get_config() parser.set_defaults(func=self) @abc.abstractmethod def __call__(self, options: Namespace): raise NotImplementedError()
25
47
0.728571
39
350
6.25641
0.564103
0.07377
0
0
0
0
0
0
0
0
0
0
0.191429
350
13
48
26.923077
0.862191
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0.3
0
0.6
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
2e6f1afc0f744ac9404e2211aba6de066e7ef17c
297
py
Python
Part_1_beginner/07_type_dictionary/rozwiazania/exercise_1.py
Mikma03/InfoShareacademy_Python_Courses
3df1008c8c92831bebf1625f960f25b39d6987e6
[ "MIT" ]
null
null
null
Part_1_beginner/07_type_dictionary/rozwiazania/exercise_1.py
Mikma03/InfoShareacademy_Python_Courses
3df1008c8c92831bebf1625f960f25b39d6987e6
[ "MIT" ]
null
null
null
Part_1_beginner/07_type_dictionary/rozwiazania/exercise_1.py
Mikma03/InfoShareacademy_Python_Courses
3df1008c8c92831bebf1625f960f25b39d6987e6
[ "MIT" ]
1
2021-02-20T08:30:56.000Z
2021-02-20T08:30:56.000Z
# Stwórz słownik, w którym kluczami będą różne przedmioty szkolne # a wartościami oceny uzyskane z tych przedmiotów grades = { "Matematyka": [4, 2, 6, 5, 3], "Fizyka": [5, 5, 2, 4, 3], "Chemia": [4, 1, 4, 5, 4], "Biologia": [3, 5, 5, 2, 5], } print("Przedmioty i oceny", grades)
24.75
65
0.599327
46
297
3.869565
0.630435
0.022472
0.033708
0
0
0
0
0
0
0
0
0.087336
0.228956
297
11
66
27
0.689956
0.373737
0
0
0
0
0.263736
0
0
0
0
0
0
1
0
false
0
0
0
0
0.142857
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
2e7040e11ae9ee1dfa29a9acc88975d2a9c16bff
703
py
Python
paper2/figures/calibration_default.py
dfm/mapping_stellar_surfaces
52d4ba1a726c65868e4a1290a801fe046fb2155f
[ "MIT" ]
10
2021-01-21T17:03:26.000Z
2021-12-19T17:49:28.000Z
paper2/figures/calibration_default.py
dfm/mapping_stellar_surfaces
52d4ba1a726c65868e4a1290a801fe046fb2155f
[ "MIT" ]
10
2021-01-21T15:55:53.000Z
2021-03-30T14:35:16.000Z
paper2/figures/calibration_default.py
dfm/mapping_stellar_surfaces
52d4ba1a726c65868e4a1290a801fe046fb2155f
[ "MIT" ]
2
2021-01-21T15:41:58.000Z
2021-01-25T16:26:15.000Z
from starry_process import calibrate import numpy as np import os import shutil # Utility funcs to move figures to this directory abspath = lambda *args: os.path.join( os.path.dirname(os.path.abspath(__file__)), *args ) copy = lambda name, src, dest: shutil.copyfile( abspath("data", name, src), abspath(dest) ) # Run calibrate.run(path=abspath("data/default"), ncols=7, clip=True) # Copy output to this directory copy("default", "data.pdf", "calibration_default_data.pdf") copy("default", "corner_transformed.pdf", "calibration_default_corner.pdf") copy("default", "latitude.pdf", "calibration_default_latitude.pdf") copy("default", "inclination.pdf", "calibration_default_inclination.pdf")
31.954545
75
0.755334
97
703
5.329897
0.443299
0.085106
0.162476
0
0
0
0
0
0
0
0
0.001592
0.106686
703
21
76
33.47619
0.821656
0.11522
0
0
0
0
0.365696
0.237864
0
0
0
0
0
1
0
false
0
0.266667
0
0.266667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
2e7169ec55a244a64b91496630b7d2210f0c8139
5,922
py
Python
python/ad/spectral_outlier.py
rislam/ad_examples
20e6dd2dbfd111ed5f69a9018180f7ef5ab627f6
[ "MIT" ]
1
2019-02-21T02:28:34.000Z
2019-02-21T02:28:34.000Z
python/ad/spectral_outlier.py
kinect59/ad_examples
bf0bb75faa3f713a2efef04b6b093e6a313825af
[ "MIT" ]
null
null
null
python/ad/spectral_outlier.py
kinect59/ad_examples
bf0bb75faa3f713a2efef04b6b093e6a313825af
[ "MIT" ]
null
null
null
import numpy.random as rnd from sklearn import manifold from sklearn.ensemble import IsolationForest from common.gen_samples import * """ pythonw -m ad.spectral_outlier """ def euclidean_dist(x1, x2): dist = np.sqrt(np.sum((x1 - x2) ** 2)) return dist class LabelDiffusion(object): """ IMPORTANT: The results from Python's Scikit-Learn MDS API are significantly different (and sub-optimal) from R. Strongly recommend R's isoMDS for the last step of converting pair-wise distances to 2D coordinates. """ def __init__(self, n_neighbors=10, k2=0.5, alpha=0.99, n_components=2, eigen_solver='auto', tol=0., max_iter=None, n_jobs=1, metric=True): self.n_neighbors = n_neighbors self.k2 = k2 self.alpha = alpha self.n_components = n_components self.eigen_solver = eigen_solver self.tol = tol self.max_iter = max_iter self.n_jobs = n_jobs self.metric = metric self.alphas_ = None self.lambdas_ = None def fit_transform(self, x_in): n = nrow(x_in) x = normalize_and_center_by_feature_range(x_in) dists = np.zeros(shape=(n, n), dtype=float) for i in range(n): for j in range(i, n): dists[i, j] = euclidean_dist(x[i, :], x[j, :]) dists[j, i] = dists[i, j] logger.debug(dists[0, 0:10]) neighbors = np.zeros(shape=(n, self.n_neighbors), dtype=int) for i in range(n): neighbors[i, :] = np.argsort(dists[i, :])[0:self.n_neighbors] logger.debug(neighbors[0, 0:10]) W = np.zeros(shape=(n, n)) for i in range(n): for j in neighbors[i, :]: # diagonal elements of W will be zeros if i != j: W[i, j] = np.exp(-(dists[i, j] ** 2) / self.k2) W[j, i] = W[i, j] D = W.sum(axis=1) # logger.debug(str(list(D[0:10]))) iDroot = np.diag(np.sqrt(D) ** (-1)) S = iDroot.dot(W.dot(iDroot)) # logger.debug("S: %s" % str(list(S[0, 0:10]))) B = np.eye(n) - self.alpha * S # logger.debug("B: %s" % str(list(B[0, 0:10]))) A = np.linalg.inv(B) tdA = np.diag(np.sqrt(np.diag(A)) ** (-1)) A = tdA.dot(A.dot(tdA)) # logger.debug("A: %s" % str(list(A[0, 0:10]))) d = 1 - A # logger.debug("d: %s" % str(list(d[0, 0:10]))) # logger.debug("min(d): %f, max(d): %f" % (np.min(d), np.max(d))) mds = manifold.MDS(self.n_components, metric=self.metric, dissimilarity='precomputed') # using abs below because some zeros are represented as -0; other values are positive. embedding = mds.fit_transform(np.abs(d)) return embedding if __name__ == "__main__": logger = logging.getLogger(__name__) args = get_command_args(debug=True, debug_args=["--debug", "--plot", "--log_file=temp/spectral_outlier.log"]) # print "log file: %s" % args.log_file configure_logger(args) # sample_type = "4_" # sample_type = "donut_" sample_type = "face_" rnd.seed(42) x, y = get_demo_samples(sample_type) n = x.shape[0] xx = yy = x_grid = Z = scores = None if args.plot: plot_sample(x, y, pdfpath="temp/spectral_%ssamples.pdf" % sample_type) n_neighbors = 10 n_components = 2 method = "standard" # ['standard', 'ltsa', 'hessian', 'modified'] # embed_type = "se" # embed_type = "tsne" # embed_type = "isomap" # embed_type = "mds" # embed_type = "lle_%s" % method embed_type = "diffusion" if embed_type == "se": embed = manifold.SpectralEmbedding(n_components=n_components, n_neighbors=n_neighbors) elif embed_type == "tsne": embed = manifold.TSNE(n_components=n_components, init='pca', random_state=0) elif embed_type.startswith("lle_"): embed = manifold.LocallyLinearEmbedding(n_neighbors=n_neighbors, n_components=n_components, eigen_solver='auto', method=method) elif embed_type == "isomap": embed = manifold.Isomap(n_neighbors=n_neighbors, n_components=n_components) elif embed_type == "mds": embed = manifold.MDS(n_components=n_components) elif embed_type == "diffusion": embed = LabelDiffusion(n_neighbors=n_neighbors, n_components=n_components, metric=True) else: raise ValueError("invalid embed type %s" % embed_type) x_tr = embed.fit_transform(x) logger.debug(x_tr) if args.plot: plot_sample(x_tr, y, pdfpath="temp/spectral_%s%s.pdf" % (sample_type, embed_type)) ad_type = 'ifor' outliers_fraction = 0.1 ad = IsolationForest(max_samples=256, contamination=outliers_fraction, random_state=None) ad.fit(x_tr) scores = -ad.decision_function(x_tr) top_anoms = np.argsort(-scores)[np.arange(10)] if args.plot: # to plot probability contours xx, yy = np.meshgrid(np.linspace(np.min(x_tr[:, 0]), np.max(x_tr[:, 0]), 50), np.linspace(np.min(x_tr[:, 1]), np.max(x_tr[:, 1]), 50)) x_grid = np.c_[xx.ravel(), yy.ravel()] Z = -ad.decision_function(x_grid) Z = Z.reshape(xx.shape) pdfpath = "temp/spectral_%scontours_%s_%s.pdf" % (sample_type, ad_type, embed_type) dp = DataPlotter(pdfpath=pdfpath, rows=1, cols=1) pl = dp.get_next_plot() pl.contourf(xx, yy, Z, 20, cmap=plt.cm.get_cmap('jet')) dp.plot_points(x_tr, pl, labels=y, lbl_color_map={0: "grey", 1: "red"}, s=25) pl.scatter(x_tr[top_anoms, 0], x_tr[top_anoms, 1], marker='o', s=35, edgecolors='red', facecolors='none') dp.close()
33.647727
99
0.577676
833
5,922
3.92557
0.285714
0.057187
0.026911
0.047095
0.10948
0.088073
0.06422
0.049541
0
0
0
0.020872
0.279973
5,922
175
100
33.84
0.746013
0.148261
0
0.055046
0
0
0.05136
0.023968
0
0
0
0
0
1
0.027523
false
0
0.036697
0
0.091743
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
2e71e3d05682a0aebe1f4b9f321ce88d5da677b1
5,042
py
Python
simchain/vm.py
Frank-gh/simchain
4dec42b6039730e4dcc0068209dd90200ee6b3d3
[ "Unlicense" ]
74
2018-11-14T02:36:13.000Z
2022-03-11T08:24:17.000Z
simchain/vm.py
Frank-gh/simchain
4dec42b6039730e4dcc0068209dd90200ee6b3d3
[ "Unlicense" ]
null
null
null
simchain/vm.py
Frank-gh/simchain
4dec42b6039730e4dcc0068209dd90200ee6b3d3
[ "Unlicense" ]
35
2019-01-16T04:18:24.000Z
2022-03-21T09:05:12.000Z
from .logger import logger from .ecc import convert_pubkey_to_addr,VerifyingKey,sha256d class Stack(list): push = list.append def peek(self): return self[-1] class LittleMachine(object): def __init__(self): self.stack = Stack() self._map = { "OP_ADD": self.add, "OP_MINUS": self.minus, "OP_MUL": self.mul, "OP_EQ": self.equal_check, "OP_EQUAL" : self.equal, "OP_CHECKSIG": self.check_sig, "OP_ADDR": self.calc_addr, "OP_DUP" : self.dup, "OP_NDUP" : self.ndup, "OP_CHECKMULSIG" : self.check_mulsig, "OP_MULHASH": self.calc_mulhash, } def set_script(self,script,message = b''): self.clear() self.result = True self.pointer = 0 self.message = message self.script = script def clear(self): self.stack.clear() def peek(self): return self.stack.peek() def pop(self): return self.stack.pop() def push(self,value): self.stack.push(value) def evaluate(self,op): if op in self._map: self._map[op]() elif isinstance(op,str) or\ isinstance(op,bytes)or\ isinstance(op,int) or\ isinstance(op,bool): self.push(op) else: logger.info('Uknow opcode: '.format(op)) def add(self): self.push(self.pop() + self.pop()) def minus(self): last = self.pop() self.push(self.pop() - last) def mul(self): self.push(self.pop() * self.pop()) def dup(self): self.push(self.peek()) def ndup(self): n = self.pop() for val in self.stack[-n:]: self.push(val) self.push(n) def equal_check(self): flag = self.pop() == self.pop() if not flag: self.result = False def equal(self): self.push(self.pop()==self.pop()) def calc_mulhash(self): n = self.pop() pk_strs = [self.pop() for _ in range(n)] s = b'' for val in pk_strs[::-1]: s += val self.push(sha256d(s)) def check_sig(self): pk_str = self.pop() sig = self.pop() verifying_key = VerifyingKey.from_bytes(pk_str) try: flag = verifying_key.verify(sig,self.message) except Exception: flag = False self.push(flag) def check_mulsig(self): n = self.pop() pk_strs = [self.pop() for _ in range(n)] m = self.pop() sigs = [self.pop() for _ in range(m)] pk_strs = pk_strs[-m:] for i in range(m): verifying_key = VerifyingKey.from_bytes(pk_strs[i]) try: flag = verifying_key.verify(sigs[i],self.message) except Exception: flag = False if not flag: falg = False break self.push(flag) def calc_addr(self): pk_str = self.pop() self.push(convert_pubkey_to_addr(pk_str)) def run(self): while (self.pointer < len(self.script)): op = self.script[self.pointer] self.pointer += 1 self.evaluate(op) if not self.result: return False else: return self.peek() if __name__ == "__main__": from datatype import Vin,Vout from ecc import SigningKey,convert_pubkey_to_addr ## k = 12356 ## k1 = 23464 ## sk = SigningKey.from_number(k) ## pk = sk.get_verifying_key() ## ## sk1 = SigningKey.from_number(k1) ## pk1 = sk1.get_verifying_key() ## addr = convert_pubkey_to_addr(pk.to_bytes()) ## addr1 = convert_pubkey_to_addr(pk1.to_bytes()) ## ## m1 = b'hello' ## m2 = b'go away' ## sig = sk.sign(m1) ## sig1 = sk1.sign(m2) ## vin = Vin(None,sig1,pk1.to_bytes()) ## vout = Vout(addr,10) ## ## sig_script = [vin.sig_script[:64],vin.sig_script[64:]] ## pubkey_script = vout.pubkey_script.split(' ') kA = 3453543 kB = 2349334 skA = SigningKey.from_number(kA) skB = SigningKey.from_number(kB) pkA = skA.get_verifying_key() pkB = skB.get_verifying_key() message = b'I love blockchain' sigA = skA.sign(message) sigB = skB.sign(message) Hash = sha256d(pkA.to_bytes()+pkB.to_bytes()) sig_script = [sigA,sigB,2,pkA.to_bytes(),pkB.to_bytes(),2] pubkey_script = ['OP_NDUP','OP_MULHASH',Hash,'OP_EQ',2,'OP_CHECKMULSIG'] script = sig_script + pubkey_script machine = LittleMachine() machine.set_script(script,message) print (machine.run()) ## script = [a,1,2,'OP_DUP','OP_ADD','OP_EQ'] ## machine = LittleMachine() ## machine.set_script(script) ## print(machine.run())
24.837438
76
0.527172
624
5,042
4.086538
0.214744
0.054902
0.025882
0.037255
0.237647
0.165882
0.062353
0.062353
0.028235
0.028235
0
0.018987
0.341928
5,042
202
77
24.960396
0.749548
0.12733
0
0.162791
0
0
0.037489
0
0
0
0
0
0
1
0.155039
false
0
0.031008
0.023256
0.248062
0.007752
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
2e735a5e613348bc55e8ad80f97e09fd450063e0
442
py
Python
ceol/player/migrations/0003_auto_20200722_1728.py
pythify/backend-pithify
ac845bd96769d3cda1e32ea5c141598f0529db19
[ "MIT" ]
null
null
null
ceol/player/migrations/0003_auto_20200722_1728.py
pythify/backend-pithify
ac845bd96769d3cda1e32ea5c141598f0529db19
[ "MIT" ]
null
null
null
ceol/player/migrations/0003_auto_20200722_1728.py
pythify/backend-pithify
ac845bd96769d3cda1e32ea5c141598f0529db19
[ "MIT" ]
null
null
null
# Generated by Django 3.0.8 on 2020-07-22 17:28 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('player', '0002_auto_20200722_1725'), ] operations = [ migrations.AlterField( model_name='album', name='total_songs', field=models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='total tracks'), ), ]
23.263158
103
0.628959
48
442
5.666667
0.8125
0.066176
0
0
0
0
0
0
0
0
0
0.094512
0.257919
442
18
104
24.555556
0.734756
0.10181
0
0
1
0
0.144304
0.058228
0
0
0
0
0
1
0
false
0
0.083333
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
2e73ac16adb060cc06fc6f0d2d05cbe18736f6a0
5,556
py
Python
bgx/validator-bgx/sawtooth_validator/journal/consensus/consensus_factory.py
sparsov/DGT-Kawartha-demo
edfbc18f2c70e813805ec23c28fbc35bf7866ffc
[ "Apache-2.0" ]
null
null
null
bgx/validator-bgx/sawtooth_validator/journal/consensus/consensus_factory.py
sparsov/DGT-Kawartha-demo
edfbc18f2c70e813805ec23c28fbc35bf7866ffc
[ "Apache-2.0" ]
10
2020-05-12T06:58:15.000Z
2022-02-26T23:59:35.000Z
bgx/validator-bgx/sawtooth_validator/journal/consensus/consensus_factory.py
DGT-Network/DGT-Mississauga
52b5f1f4015db2aa7196e727a25b399de5fbf3c3
[ "Apache-2.0" ]
1
2021-01-12T21:38:01.000Z
2021-01-12T21:38:01.000Z
# Copyright 2017 NTRLab # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------------ import importlib import logging from sawtooth_validator.exceptions import UnknownConsensusModuleError from sawtooth_validator.journal.block_wrapper import NULL_BLOCK_IDENTIFIER from sawtooth_validator.state.settings_view import SettingsView LOGGER = logging.getLogger(__name__) PROXY = '_proxy_' class ConsensusFactory(object): """ConsensusFactory returns consensus modules by short name. """ @staticmethod def get_consensus_module(module_name): """Returns a consensus module by name. Args: module_name (str): The name of the module to load. Returns: module: The consensus module. Raises: UnknownConsensusModuleError: Raised if the given module_name does not correspond to a consensus implementation. """ module_package = module_name if module_name == 'genesis': module_package = ( 'sawtooth_validator.journal.consensus.genesis.' 'genesis_consensus' ) elif module_name == 'devmode': module_package = ( 'sawtooth_validator.journal.consensus.dev_mode.' 'dev_mode_consensus' ) elif module_name == PROXY: module_package = ( 'sawtooth_validator.journal.consensus.proxy.' 'proxy_consensus' ) elif module_name == 'poet': module_package = 'sawtooth_poet.poet_consensus' elif module_name == 'pbft': module_package = 'pbft.bgx_pbft.consensus' try: return importlib.import_module(module_package) except ImportError: raise UnknownConsensusModuleError( 'Consensus module "{}" does not exist.'.format(module_name)) @staticmethod def try_configured_proxy_consensus(): """Returns the proxy onsensus_module based on the consensus module set by the "sawtooth_settings" transaction family. Args: block_id (str): the block id associated with the current state_view state_view (:obj:`StateView`): the current state view to use for setting values Raises: UnknownConsensusModuleError: Thrown when an invalid consensus module has been configured. """ LOGGER.debug("ConsensusFactory::try_configured_proxy_consensus") try: mod = ConsensusFactory.get_consensus_module(PROXY) except UnknownConsensusModuleError: mod = None return mod @staticmethod def try_configured_consensus_module(block_id, state_view): """Returns the consensus_module based on the consensus module set by the "sawtooth_settings" transaction family. Args: block_id (str): the block id associated with the current state_view state_view (:obj:`StateView`): the current state view to use for setting values Raises: UnknownConsensusModuleError: Thrown when an invalid consensus module has been configured. """ settings_view = SettingsView(state_view) default_consensus = 'genesis' if block_id == NULL_BLOCK_IDENTIFIER else 'devmode' consensus_module_name = settings_view.get_setting('bgx.consensus.algorithm', default_value=default_consensus) consensus_version = settings_view.get_setting('bgx.consensus.version', default_value='0.1') LOGGER.debug("ConsensusFactory::try_configured_consensus_module consensus_module_name=%s ver=%s",consensus_module_name,consensus_version) try: mod = ConsensusFactory.get_consensus_module(consensus_module_name) except UnknownConsensusModuleError: mod = None return mod,(consensus_module_name,consensus_version) @staticmethod def get_configured_consensus_module(block_id, state_view): """Returns the consensus_module based on the consensus module set by the "sawtooth_settings" transaction family. Args: block_id (str): the block id associated with the current state_view state_view (:obj:`StateView`): the current state view to use for setting values Raises: UnknownConsensusModuleError: Thrown when an invalid consensus module has been configured. """ settings_view = SettingsView(state_view) default_consensus = 'genesis' if block_id == NULL_BLOCK_IDENTIFIER else 'devmode' consensus_module_name = settings_view.get_setting('bgx.consensus.algorithm', default_value=default_consensus) LOGGER.debug("ConsensusFactory::get_configured_consensus_module consensus_module_name=%s",consensus_module_name) return ConsensusFactory.get_consensus_module(consensus_module_name)
42.090909
145
0.667207
600
5,556
5.96
0.245
0.11745
0.047819
0.031879
0.549217
0.510067
0.4217
0.36689
0.36689
0.36689
0
0.002417
0.2554
5,556
131
146
42.412214
0.861977
0.384989
0
0.322581
0
0
0.19457
0.14415
0
0
0
0
0
1
0.064516
false
0
0.112903
0
0.258065
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
2e7700fe88b3734cb3e81c07b67868f5b2c4220d
19,554
py
Python
bin/ensemble_aws_accounts_eai_handler.py
nvonkorff/grand_central
ef8630bb1e717ff542903fd1e067e428fafafc2f
[ "MIT" ]
null
null
null
bin/ensemble_aws_accounts_eai_handler.py
nvonkorff/grand_central
ef8630bb1e717ff542903fd1e067e428fafafc2f
[ "MIT" ]
null
null
null
bin/ensemble_aws_accounts_eai_handler.py
nvonkorff/grand_central
ef8630bb1e717ff542903fd1e067e428fafafc2f
[ "MIT" ]
null
null
null
import logging import os import sys import uuid import splunk.admin as admin import ensemble_aws_accounts_schema import urllib import hashlib import base_eai_handler import log_helper import boto3 import json if sys.platform == 'win32': import msvcrt # Binary mode is required for persistent mode on Windows. msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY) msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY) # Setup the handler logger = log_helper.setup(logging.INFO, 'EnsembleAWSAccountsEAIHandler', 'ensemble_aws_accounts_handler.log') class EnsembleAWSAccountsEAIHandler(base_eai_handler.BaseEAIHandler): def setup(self): # Add our supported args for arg in ensemble_aws_accounts_schema.ALL_FIELDS: self.supportedArgs.addOptArg(arg) def handleList(self, confInfo): """ Called when user invokes the "list" action. Arguments confInfo -- The object containing the information about what is being requested. """ logger.info('Ensemble AWS accounts list requested.') # Fetch from ensemble_aws_accounts conf handler conf_handler_path = self.get_conf_handler_path_name('ensemble_aws_accounts', 'nobody') ensemble_aws_accounts_eai_response_payload = self.simple_request_eai(conf_handler_path, 'list', 'GET') # Add link alternate (without mgmt, scheme, host, port) to list response for ensemble_aws_accounts in ensemble_aws_accounts_eai_response_payload['entry']: ensemble_aws_accounts_link_alternate = ensemble_aws_accounts['links']['alternate'].replace('/configs/conf-ensemble_aws_accounts/', '/ensemble_aws_accounts/') if ensemble_aws_accounts['content'].get('cloudformation_stack_id', '') != '': ensemble_aws_accounts['content']['data_collection_deployed'] = '1' # Get AWS Secret Key passwords_conf_payload = self.simple_request_eai(ensemble_aws_accounts['content']['aws_secret_key_link_alternate'], 'list', 'GET') SECRET_KEY = passwords_conf_payload['entry'][0]['content']['clear_password'] try: client = boto3.client('cloudformation', aws_access_key_id=ensemble_aws_accounts['content']['aws_access_key'], aws_secret_access_key=SECRET_KEY) response = client.describe_stacks( StackName=ensemble_aws_accounts['name'], ) except Exception, e: ensemble_aws_accounts['content']['data_collection_deployed'] = '0' ensemble_aws_accounts['content']['data_collection_deployment_success'] = '0' # Remove stack_id from the Ensemble AWS Account conf entry ensemble_aws_accounts['content']['cloudformation_stack_id'] = '' continue data_collection_deployment_success = '0' for stack in response['Stacks']: if stack['StackName'] == ensemble_aws_accounts['name']: if stack['StackStatus'] == 'DELETE_IN_PROGRESS': data_collection_deployment_success = '3' ensemble_aws_accounts['content']['data_collection_deployed'] = '2' if stack['StackStatus'] == 'CREATE_IN_PROGRESS': data_collection_deployment_success = '2' if stack['StackStatus'] == 'UPDATE_IN_PROGRESS': data_collection_deployment_success = '2' if stack['StackStatus'] == 'CREATE_COMPLETE': data_collection_deployment_success = '1' if stack['StackStatus'] == 'UPDATE_COMPLETE': data_collection_deployment_success = '1' ensemble_aws_accounts['content']['data_collection_deployment_success'] = data_collection_deployment_success else: ensemble_aws_accounts['content']['data_collection_deployed'] = '0' ensemble_aws_accounts['content']['ensemble_aws_accounts_link_alternate'] = ensemble_aws_accounts_link_alternate ensemble_aws_accounts['content']['ensemble_aws_accounts_name'] = ensemble_aws_accounts['name'] ensemble_aws_accounts['content']['aws_access_key'] = ensemble_aws_accounts['content'].get('aws_access_key', '') ensemble_aws_accounts['content']['cloudformation_stack_id'] = ensemble_aws_accounts['content'].get( 'cloudformation_stack_id', '') ensemble_aws_accounts['content']['tags'] = ensemble_aws_accounts['content'].get('tags', '') self.set_conf_info_from_eai_payload(confInfo, ensemble_aws_accounts_eai_response_payload) def handleCreate(self, confInfo): """ Called when user invokes the "create" action. Arguments confInfo -- The object containing the information about what is being requested. """ logger.info('Ensemble AWS account creation requested.') # Validate and extract correct POST params server_params = self.validate_server_schema_params() auth_params = self.validate_auth_schema_params() params = auth_params.copy() params.update(server_params) # Password creation aws_secret_key_link_alternate = self.password_create(params['aws_access_key'], params['aws_secret_key']) # ensemble_aws_accounts.conf creation and response post_args = { 'name': params['name'], 'aws_account_id': params['aws_account_id'], 'aws_access_key': params['aws_access_key'], 'aws_secret_key_link_alternate': aws_secret_key_link_alternate, 'data_collection_deployed': '0', 'data_collection_deployment_success': '0', 'tags': params['tags'] } ensemble_aws_accounts_eai_response_payload = self.simple_request_eai(self.get_conf_handler_path_name('ensemble_aws_accounts'), 'create', 'POST', post_args) # Always populate entry content from request to list handler. ensemble_aws_accounts_rest_path = '/servicesNS/%s/%s/ensemble_aws_accounts/%s' % ( 'nobody', self.appName, urllib.quote_plus(params['name'])) ensemble_aws_accounts_eai_response_payload = self.simple_request_eai(ensemble_aws_accounts_rest_path, 'read', 'GET') self.set_conf_info_from_eai_payload(confInfo, ensemble_aws_accounts_eai_response_payload) def handleEdit(self, confInfo): """ Called when user invokes the 'edit' action. Index modification is not supported through this endpoint. Both the scripted input and the ensemble_aws_accounts.conf stanza will be overwritten on ANY call to this endpoint. Arguments confInfo -- The object containing the information about what is being requested. """ logger.info('Ensemble AWS Account edit requested.') name = self.callerArgs.id conf_stanza = urllib.quote_plus(name) params = self.validate_server_schema_params() conf_handler_path = '%s/%s' % (self.get_conf_handler_path_name('ensemble_aws_accounts', 'nobody'), conf_stanza) ensemble_aws_accounts_eai_response_payload = self.simple_request_eai(conf_handler_path, 'list', 'GET') old_aws_access_key = ensemble_aws_accounts_eai_response_payload['entry'][0]['content']['aws_access_key'] old_aws_secret_key_link_alternate = ensemble_aws_accounts_eai_response_payload['entry'][0]['content']['aws_secret_key_link_alternate'] # Create post args - remove name to ensure edit instead of create ensemble_aws_accounts_conf_postargs = { 'aws_access_key': params['aws_access_key'], 'tags': params['tags'], } # Change password if provided in params if old_aws_access_key != params['aws_access_key']: if self.get_param('aws_secret_key'): # New username and password provided auth_params = self.validate_auth_schema_params() params.update(auth_params) # Edit passwords.conf stanza ensemble_aws_accounts_conf_postargs['aws_secret_key_link_alternate'] = self.password_edit(old_aws_secret_key_link_alternate, params['aws_access_key'], params['aws_secret_key']) else: # Can't change username without providing password raise admin.InternalException('AWS Secret Key must be provided on AWS Access Key change.') if (old_aws_access_key == params['aws_access_key'] and self.get_param('aws_secret_key')): # Password update to existing username auth_params = self.validate_auth_schema_params() params.update(auth_params) # Edit passwords.conf stanza ensemble_aws_accounts_conf_postargs['aws_secret_key_link_alternate'] = self.password_edit(old_aws_secret_key_link_alternate, params['aws_access_key'], params['aws_secret_key']) if self.get_param('aws_secret_key'): aws_secret_key_link_alternate = self.get_param('aws_secret_key') else: aws_secret_key_link_alternate = old_aws_secret_key_link_alternate # Get AWS Secret Key passwords_conf_payload = self.simple_request_eai(aws_secret_key_link_alternate, 'list', 'GET') SECRET_KEY = passwords_conf_payload['entry'][0]['content']['clear_password'] if params['template_link_alternate'] and params['template_link_alternate'] != '' and params['cloudformation_template_action'] and params['cloudformation_template_action'] == 'apply': # Get CloudFormation template string cloudformation_templates_conf_payload = self.simple_request_eai(params['template_link_alternate'], 'list', 'GET') template_filename = cloudformation_templates_conf_payload['entry'][0]['content']['filename'] with open(os.path.dirname(os.path.abspath(__file__)) + '/cloudformation_templates/' + template_filename) as json_file: json_data = json.dumps(json.load(json_file)) try: client = boto3.client('cloudformation', aws_access_key_id=params['aws_access_key'], aws_secret_access_key=SECRET_KEY) response = client.create_stack( StackName=params['name'], TemplateBody=json_data, Capabilities=[ 'CAPABILITY_IAM' ] ) except Exception, e: logger.error(e) raise admin.InternalException('Error connecting to AWS or deploying CloudFormation template %s' % e) ensemble_aws_accounts_conf_postargs['cloudformation_stack_id'] = response['StackId'] if params['cloudformation_template_action'] and params['cloudformation_template_action'] == 'remove': try: client = boto3.client('cloudformation', aws_access_key_id=params['aws_access_key'], aws_secret_access_key=SECRET_KEY) response = client.delete_stack( StackName=params['name'] ) except Exception, e: logger.error(e) raise admin.InternalException('Error connecting to AWS or deleting CloudFormation template %s' % e) if params['template_link_alternate'] and params['template_link_alternate'] != '' and params[ 'cloudformation_template_action'] and params['cloudformation_template_action'] == 'update': # Get CloudFormation template string cloudformation_templates_conf_payload = self.simple_request_eai(params['template_link_alternate'], 'list', 'GET') template_filename = cloudformation_templates_conf_payload['entry'][0]['content']['filename'] with open(os.path.dirname(os.path.abspath(__file__)) + '/cloudformation_templates/' + template_filename) as json_file: json_data = json.dumps(json.load(json_file)) try: client = boto3.client('cloudformation', aws_access_key_id=params['aws_access_key'], aws_secret_access_key=SECRET_KEY) response = client.update_stack( StackName=params['name'], TemplateBody=json_data, Capabilities=[ 'CAPABILITY_IAM' ] ) except Exception, e: logger.error(e) raise admin.InternalException('Error connecting to AWS or deploying CloudFormation template %s' % e) ensemble_aws_accounts_conf_postargs['cloudformation_stack_id'] = response['StackId'] # Edit ensemble_aws_accounts.conf ensemble_aws_accounts_eai_response_payload = self.simple_request_eai(conf_handler_path, 'edit', 'POST', ensemble_aws_accounts_conf_postargs) # Always populate entry content from request to list handler. ensemble_aws_accounts_rest_path = '/servicesNS/%s/%s/ensemble_aws_accounts/%s' % ('nobody', self.appName, conf_stanza) ensemble_aws_accounts_eai_response_payload = self.simple_request_eai(ensemble_aws_accounts_rest_path, 'read', 'GET') self.set_conf_info_from_eai_payload(confInfo, ensemble_aws_accounts_eai_response_payload) def handleRemove(self, confInfo): """ Called when user invokes the 'remove' action. Removes the requested stanza from inputs.conf (scripted input), removes the requested stanza from ensemble_aws_accounts.conf, and removes all related credentials Arguments confInfo -- The object containing the information about what is being requested. """ logger.info('Ensemble AWS Account removal requested.') name = self.callerArgs.id conf_stanza = urllib.quote_plus(name) # Grab the link alternate and username from the ensemble_aws_accounts GET response payload before it gets deleted ensemble_aws_accounts_rest_path = '/servicesNS/%s/%s/ensemble_aws_accounts/%s' % ('nobody', self.appName, conf_stanza) ensemble_aws_accounts_eai_response_payload = self.simple_request_eai(ensemble_aws_accounts_rest_path, 'read', 'GET') aws_secret_key_link_alternate = ensemble_aws_accounts_eai_response_payload['entry'][0]['content']['aws_secret_key_link_alternate'] # Delete passwords.conf stanza self.password_delete(aws_secret_key_link_alternate) # Delete ensemble_aws_accounts.conf stanza conf_handler_path = '%s/%s' % (self.get_conf_handler_path_name('ensemble_aws_accounts'), conf_stanza) ensemble_aws_accounts_eai_response_payload = self.simple_request_eai(conf_handler_path, 'remove', 'DELETE') self.set_conf_info_from_eai_payload(confInfo, ensemble_aws_accounts_eai_response_payload) def password_edit(self, password_link_alternate, new_username, password): """ Edits a password entry using the storage/passwords endpoint. This endpoint will first delete the existing entry, then creates a new one. Arguments password_link_alternate -- The link alternate of the password entry password -- The actual password which will be encrypted and stored in passwords.conf """ self.password_delete(password_link_alternate) return self.password_create(new_username, password) def password_delete(self, password_link_alternate): """ Deletes a password entry using the storage/passwords endpoint. Arguments password_link_alternate -- The link alternate of the password entry """ passwords_conf_payload = self.simple_request_eai(password_link_alternate, 'remove', 'DELETE') def hash_len_confirm(self, password, password_after, password_orig_hash, password_after_hash): """ Confirms length of plaintext password matches retrieved decrypted password. Also compares the hashes of the initial and retrieved passwords. Arguments password -- The actual password which was encrypted and stored in passwords.conf password_after -- The decrypted password retrieved from passwords.conf password_orig_hash -- The hash of the actual password which was encrypted and stored in passwords.conf password_after_hash -- The hash of the decrypted password retrieved from passwords.conf """ assert len(password_after) == len(password) assert password_orig_hash == password_after_hash def password_create(self, username, password): """ Creates a password entry using the storage/passwords endpoint. This endpoint will validate successful creationof the password by comparing length and hashes of the provided password and the retrieved cleartext password. Password realm will include a unique GUID. Arguments username -- The username associated with the provided password password -- The actual password which will be encrypted and stored in passwords.conf """ m = hashlib.md5() m.update(password) password_orig_hash = m.hexdigest() realm = str(uuid.uuid4().hex) passwords_conf_postargs = { 'realm': realm, 'name': username, 'password': password } passwords_rest_path = '/servicesNS/%s/%s/storage/passwords/' % ('nobody', self.appName) # Create password passwords_conf_payload = self.simple_request_eai(passwords_rest_path, 'create', 'POST', passwords_conf_postargs) password_link_alternate = passwords_conf_payload['entry'][0]['links']['alternate'] # Load password to check hash and length passwords_conf_payload = self.simple_request_eai(password_link_alternate, 'list', 'GET') password_after = passwords_conf_payload['entry'][0]['content']['clear_password'] m = hashlib.md5() m.update(password_after) password_after_hash = m.hexdigest() try: self.hash_len_confirm(password, password_after, password_orig_hash, password_after_hash) except Exception, e: logger.error(e) raise admin.InternalException('Password stored incorrectly %s' % e) return password_link_alternate def validate_server_schema_params(self): """ Validates raw request params against the server schema """ params = self.get_params(schema=ensemble_aws_accounts_schema, filter=ensemble_aws_accounts_schema.SERVER_FIELDS) return self.validate_params(ensemble_aws_accounts_schema.server_schema, params) def validate_auth_schema_params(self): """ Validates raw request params against the auth schema """ params = self.get_params(schema=ensemble_aws_accounts_schema, filter=ensemble_aws_accounts_schema.AUTH_FIELDS) return self.validate_params(ensemble_aws_accounts_schema.auth_schema, params) admin.init(EnsembleAWSAccountsEAIHandler, admin.CONTEXT_NONE)
51.730159
270
0.668763
2,204
19,554
5.586207
0.127042
0.075942
0.125
0.038012
0.729289
0.68551
0.645387
0.580734
0.542966
0.493502
0
0.002174
0.247264
19,554
377
271
51.867374
0.834296
0.058914
0
0.348214
0
0
0.189877
0.085509
0
0
0
0
0.008929
0
null
null
0.138393
0.058036
null
null
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
1
0
0
0
0
0
2
2e7726a4553ba265dc7d5e2d5a2ffbce3ef26f43
480
py
Python
tests/invertible_test_case.py
gregunz/invertransforms
3b0621c567d309ee054115cc90cb188808bd63b2
[ "BSD-3-Clause" ]
5
2019-10-14T18:56:08.000Z
2019-10-17T13:50:11.000Z
tests/invertible_test_case.py
gregunz/invertransforms
3b0621c567d309ee054115cc90cb188808bd63b2
[ "BSD-3-Clause" ]
1
2021-04-06T18:05:19.000Z
2021-04-06T18:05:19.000Z
tests/invertible_test_case.py
gregunz/invertransforms
3b0621c567d309ee054115cc90cb188808bd63b2
[ "BSD-3-Clause" ]
null
null
null
import random import unittest import torch import invertransforms as T class InvertibleTestCase(unittest.TestCase): def setUp(self) -> None: self.img_size = (256, 320) self.h, self.w = self.img_size self.crop_size = (64, 128) self.img_tensor = torch.randn((1,) + self.img_size).clamp(0, 1) self.img_pil = T.ToPILImage()(self.img_tensor) self.img_tensor = T.ToTensor()(self.img_pil) self.n = random.randint(0, 1e9)
24
71
0.645833
69
480
4.362319
0.492754
0.186047
0.109635
0
0
0
0
0
0
0
0
0.045946
0.229167
480
19
72
25.263158
0.767568
0
0
0
0
0
0
0
0
0
0
0
0
1
0.076923
false
0
0.307692
0
0.461538
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
2e7810efd3616472cfac0aa367ce42b73363d1b5
4,162
py
Python
nicos_sinq/zebra/setups/monochromator.py
ess-dmsc/nicos
755d61d403ff7123f804c45fc80c7ff4d762993b
[ "CC-BY-3.0", "Apache-2.0", "CC-BY-4.0" ]
1
2021-03-26T10:30:45.000Z
2021-03-26T10:30:45.000Z
nicos_sinq/zebra/setups/monochromator.py
ess-dmsc/nicos
755d61d403ff7123f804c45fc80c7ff4d762993b
[ "CC-BY-3.0", "Apache-2.0", "CC-BY-4.0" ]
91
2020-08-18T09:20:26.000Z
2022-02-01T11:07:14.000Z
nicos_sinq/zebra/setups/monochromator.py
ess-dmsc/nicos
755d61d403ff7123f804c45fc80c7ff4d762993b
[ "CC-BY-3.0", "Apache-2.0", "CC-BY-4.0" ]
3
2020-08-04T18:35:05.000Z
2021-04-16T11:22:08.000Z
description = 'Devices for the ZEBRA monochromator' mota = 'SQ:ZEBRA:mota:' motb = 'SQ:ZEBRA:motb:' motd = 'SQ:ZEBRA:motd:' devices = dict( mtvl = device('nicos_ess.devices.epics.motor.EpicsMotor', epicstimeout = 3.0, description = 'Lower monochromator vertical translation', motorpv = mota + 'MTVL', errormsgpv = mota + 'MTVL-MsgTxt', precision = 0.5, ), mtpl = device('nicos_ess.devices.epics.motor.EpicsMotor', epicstimeout = 3.0, description = 'Lower monochromator paralell translation', motorpv = mota + 'MTPL', errormsgpv = mota + 'MTPL-MsgTxt', precision = 0.5, ), mgvl = device('nicos_ess.devices.epics.motor.EpicsMotor', epicstimeout = 3.0, description = 'Lower monochromator vertical goniometer', motorpv = mota + 'MGVL', errormsgpv = mota + 'MGVL-MsgTxt', precision = 0.5, ), mgpl = device('nicos_ess.devices.epics.motor.EpicsMotor', epicstimeout = 3.0, description = 'Lower monochromator paralell goniometer', motorpv = mota + 'MGPL', errormsgpv = mota + 'MGPL-MsgTxt', precision = 0.5, ), moml = device('nicos_ess.devices.epics.motor.EpicsMotor', epicstimeout = 3.0, description = 'Lower monochromator omega', motorpv = mota + 'MOML', errormsgpv = mota + 'MOML-MsgTxt', precision = 0.5, ), mtvu = device('nicos_ess.devices.epics.motor.EpicsMotor', epicstimeout = 3.0, description = 'Upper monochromator vertical translation', motorpv = mota + 'MTVU', errormsgpv = mota + 'MTVU-MsgTxt', precision = 0.5, ), mtpu = device('nicos_ess.devices.epics.motor.EpicsMotor', epicstimeout = 3.0, description = 'Upper monochromator paralell translation', motorpv = mota + 'MTPU', errormsgpv = mota + 'MTPU-MsgTxt', precision = 0.5, ), mgvu = device('nicos_ess.devices.epics.motor.EpicsMotor', epicstimeout = 3.0, description = 'Upper monochromator vertical goniometer', motorpv = mota + 'MGVU', errormsgpv = mota + 'MGVU-MsgTxt', precision = 0.5, ), mgpu = device('nicos_ess.devices.epics.motor.EpicsMotor', epicstimeout = 3.0, description = 'Upper monochromator paralell goniometer', motorpv = mota + 'MGPU', errormsgpv = mota + 'MGPU-MsgTxt', precision = 0.5, ), momu = device('nicos_ess.devices.epics.motor.EpicsMotor', epicstimeout = 3.0, description = 'Upper monochromator omega', motorpv = mota + 'MOMU', errormsgpv = mota + 'MOMU-MsgTxt', precision = 0.5, ), mcvl = device('nicos_ess.devices.epics.motor.EpicsMotor', epicstimeout = 3.0, description = 'Lower monochromator curvature', motorpv = mota + 'MCVL', errormsgpv = mota + 'MCVL-MsgTxt', precision = 0.5, ), mcvu = device('nicos_ess.devices.epics.motor.EpicsMotor', epicstimeout = 3.0, description = 'Upper monochromator curvature', motorpv = motb + 'MCVU', errormsgpv = motb + 'MCVU-MsgTxt', precision = 0.5, ), mexz = device('nicos_ess.devices.epics.motor.EpicsMotor', epicstimeout = 3.0, description = 'Monochromator lift', motorpv = motb + 'MEXZ', errormsgpv = motb + 'MEXZ-MsgTxt', precision = 0.5, ), wavelength = device('nicos_sinq.zebra.devices.zebrawl.ZebraWavelength', description = 'Wavelength for ZEBRA', unit = 'A-1', lift = 'mexz' ), cex1 = device('nicos_ess.devices.epics.motor.EpicsMotor', epicstimeout = 3.0, description = 'First collimator drum', motorpv = motd + 'CEX1', errormsgpv = motd + 'CEX1-MsgTxt', precision = 0.5, ), cex2 = device('nicos_ess.devices.epics.motor.EpicsMotor', epicstimeout = 3.0, description = 'Second collimator drum', motorpv = motd + 'CEX2', errormsgpv = motd + 'CEX2-MsgTxt', precision = 0.5, ), )
34.97479
75
0.592023
419
4,162
5.842482
0.143198
0.071895
0.085784
0.128676
0.589052
0.518791
0.518791
0.518791
0.518791
0.518791
0
0.022401
0.281355
4,162
118
76
35.271186
0.796055
0
0
0.396552
0
0
0.351273
0.155694
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
2e79db4019af9551976d2be7470794d639b1bb48
15,886
py
Python
scripts/loading/ontology/psimi.py
dougli1sqrd/SGDBackend-Nex2
2ecb2436db142cf08c6f2dbab6b115a394116632
[ "MIT" ]
5
2015-11-24T23:09:46.000Z
2019-11-06T17:48:13.000Z
scripts/loading/ontology/psimi.py
dougli1sqrd/SGDBackend-Nex2
2ecb2436db142cf08c6f2dbab6b115a394116632
[ "MIT" ]
188
2017-08-28T22:39:03.000Z
2022-03-02T14:53:46.000Z
scripts/loading/ontology/psimi.py
dougli1sqrd/SGDBackend-Nex2
2ecb2436db142cf08c6f2dbab6b115a394116632
[ "MIT" ]
7
2018-05-13T01:58:07.000Z
2021-06-25T19:08:33.000Z
import urllib.request, urllib.parse, urllib.error import logging import os from datetime import datetime import sys import importlib importlib.reload(sys) # Reload does the trick! from src.helpers import upload_file from scripts.loading.database_session import get_session from scripts.loading.ontology import read_owl from src.models import Source, Ro, Edam, Dbentity, Filedbentity, \ Psimod, Psimi, PsimiUrl, PsimiAlias, PsimiRelation __author__ = 'sweng66' ## Created on March 2018 ## This script is used to update PSI-MI ontology in NEX2. log_file = 'scripts/loading/ontology/logs/psimi.log' ontology = 'PSIMI' src = 'PSI' CREATED_BY = os.environ['DEFAULT_USER'] logging.basicConfig(format='%(message)s') log = logging.getLogger() log.setLevel(logging.INFO) log.info("PSI-MI Ontology Loading Report:\n") def load_ontology(ontology_file): nex_session = get_session() log.info(str(datetime.now())) log.info("Getting data from database...") source_to_id = dict([(x.display_name, x.source_id) for x in nex_session.query(Source).all()]) psimiid_to_psimi = dict([(x.psimiid, x) for x in nex_session.query(Psimi).all()]) term_to_ro_id = dict([(x.display_name, x.ro_id) for x in nex_session.query(Ro).all()]) roid_to_ro_id = dict([(x.roid, x.ro_id) for x in nex_session.query(Ro).all()]) edam_to_id = dict([(x.format_name, x.edam_id) for x in nex_session.query(Edam).all()]) psimi_id_to_alias = {} for x in nex_session.query(PsimiAlias).all(): aliases = [] if x.psimi_id in psimi_id_to_alias: aliases = psimi_id_to_alias[x.psimi_id] aliases.append((x.display_name, x.alias_type)) psimi_id_to_alias[x.psimi_id] = aliases psimi_id_to_parent = {} for x in nex_session.query(PsimiRelation).all(): parents = [] if x.child_id in psimi_id_to_parent: parents = psimi_id_to_parent[x.child_id] parents.append((x.parent_id, x.ro_id)) psimi_id_to_parent[x.child_id] = parents #################################### fw = open(log_file, "w") log.info("Reading data from ontology file...") data = read_owl(ontology_file, ontology) log.info("Updating psimi ontology data in the database...") [update_log, to_delete_list] = load_new_data(nex_session, data, source_to_id, psimiid_to_psimi, term_to_ro_id['is a'], roid_to_ro_id, psimi_id_to_alias, psimi_id_to_parent, fw) # log.info("Uploading file to S3...") # update_database_load_file_to_s3(nex_session, ontology_file, source_to_id, edam_to_id) log.info("Writing loading summary...") write_summary_and_send_email(fw, update_log, to_delete_list) nex_session.close() fw.close() log.info(str(datetime.now())) log.info("Done!\n\n") def load_new_data(nex_session, data, source_to_id, psimiid_to_psimi, ro_id, roid_to_ro_id, psimi_id_to_alias, psimi_id_to_parent, fw): active_psimiid = [] update_log = {} for count_name in ['updated', 'added', 'deleted']: update_log[count_name] = 0 relation_just_added = {} alias_just_added = {} for x in data: psimi_id = None if "MI:" not in x['id']: continue if x['id'] in psimiid_to_psimi: ## in database y = psimiid_to_psimi[x['id']] psimi_id = y.psimi_id if y.is_obsolete is True: y.is_obsolete = '0' nex_session.add(y) nex_session.flush() update_log['updated'] = update_log['updated'] + 1 fw.write("The is_obsolete for " + x['id'] + " has been updated from " + y.is_obsolete + " to " + 'False' + "\n") if x['term'] != y.display_name.strip(): ## update term fw.write("The display_name for " + x['id'] + " has been updated from " + y.display_name + " to " + x['term'] + "\n") y.display_name = x['term'] # nex_session.add(y) # nex_session.flush() update_log['updated'] = update_log['updated'] + 1 # print "UPDATED: ", y.psimiid, ":"+y.display_name+ ":" + ":"+x['term']+":" # else: # print "SAME: ", y.psimiid, y.display_name, x['definition'], x['aliases'], x['parents'], x['other_parents'] active_psimiid.append(x['id']) else: fw.write("NEW entry = " + x['id'] + " " + x['term'] + "\n") this_x = Psimi(source_id = source_to_id[src], format_name = x['id'], psimiid = x['id'], display_name = x['term'], description = x['definition'], obj_url = '/psimi/' + x['id'], is_obsolete = '0', created_by = CREATED_BY) nex_session.add(this_x) nex_session.flush() psimi_id = this_x.psimi_id update_log['added'] = update_log['added'] + 1 # print "NEW: ", x['id'], x['term'], x['definition'] link_id = x['id'].replace(':', '_') insert_url(nex_session, source_to_id['Ontobee'], 'Ontobee', psimi_id, 'http://www.ontobee.org/ontology/MI?iri=http://purl.obolibrary.org/obo/'+link_id, fw) # insert_url(nex_session, source_to_id['BioPortal'], 'BioPortal', psimi_id, # 'http://bioportal.bioontology.org/ontologies/MI/?p=classes&conceptid=http%3A%2F%2Fpurl.obolibrary.org%2Fobo%2F' + link_id, # fw) insert_url(nex_session, source_to_id['OLS'], 'OLS', psimi_id, 'http://www.ebi.ac.uk/ols/ontologies/mi/terms?iri=http%3A%2F%2Fpurl.obolibrary.org%2Fobo%2F' + link_id, fw) ## add RELATIONS for parent_psimiid in x['parents']: parent = psimiid_to_psimi.get(parent_psimiid) if parent is not None: parent_id = parent.psimi_id child_id = psimi_id insert_relation(nex_session, source_to_id[src], parent_id, child_id, ro_id, relation_just_added, fw) for (parent_psimiid, roid) in x['other_parents']: parent = psimiid_to_psimi.get(parent_psimiid) if parent is not None: parent_id = parent.psimi_id child_id = psimi_id this_ro_id = roid_to_ro_id.get(roid) if this_ro_id is None: log.info("The ROID:" + str(roid) + " is not found in the database") continue insert_relation(nex_session, source_to_id[src], parent_id, child_id, this_ro_id, relation_just_added, fw) ## add ALIASES for (alias, alias_type) in x['aliases']: if alias_type != 'EAXCT': continue insert_alias(nex_session, source_to_id[src], alias, alias_type, psimi_id, alias_just_added, fw) ## update RELATIONS curr_parents = psimi_id_to_parent.get(psimi_id) if curr_parents is None: curr_parents = [] update_relations(nex_session, psimi_id, curr_parents, x['parents'], x['other_parents'], roid_to_ro_id, source_to_id[src], psimiid_to_psimi, ro_id, relation_just_added, fw) ## update ALIASES update_aliases(nex_session, psimi_id, psimi_id_to_alias.get(psimi_id), x['aliases'], source_to_id[src], psimiid_to_psimi, alias_just_added, fw) to_delete = [] for psimiid in psimiid_to_psimi: if psimiid in active_psimiid: continue x = psimiid_to_psimi[psimiid] if psimiid.startswith('NTR'): continue to_delete.append((psimiid, x.display_name)) if x.is_obsolete is False: x.is_obsolete = '1' nex_session.add(x) nex_session.flush() update_log['updated'] = update_log['updated'] + 1 fw.write("The is_obsolete for " + x.psimiid + " has been updated from " + x.is_obsolete +" to " + 'True' + "\n") nex_session.commit() # nex_session.rollback() return [update_log, to_delete] def update_aliases(nex_session, psimi_id, curr_aliases, new_aliases, source_id, psimiid_to_psimi, alias_just_added, fw): # print "ALIAS: ", curr_aliases, new_aliases # return if curr_aliases is None: curr_aliases = [] for (alias, type) in new_aliases: if type != 'EXACT': continue if (alias, type) not in curr_aliases: insert_alias(nex_session, source_id, alias, type, psimi_id, alias_just_added, fw) for (alias, type) in curr_aliases: if(alias, type) not in new_aliases: to_delete = nex_session.query(PsimiAlias).filter_by(psimi_id=psimi_id, display_name=alias, alias_type=type).first() nex_session.delete(to_delete) fw.write("The old alias = " + alias + " has been deleted for psimi_id = " + str(psimi_id) + "\n") def update_relations(nex_session, child_id, curr_parent_ids, new_parents, other_parents, roid_to_ro_id, source_id, psimiid_to_psimi, ro_id, relation_just_added, fw): # print "RELATION: ", curr_parent_ids, new_parents, other_parents # return new_parent_ids = [] for parent_psimiid in new_parents: parent = psimiid_to_psimi.get(parent_psimiid) if parent is not None: parent_id = parent.psimi_id new_parent_ids.append((parent_id, ro_id)) if (parent_id, ro_id) not in curr_parent_ids: insert_relation(nex_session, source_id, parent_id, child_id, ro_id, relation_just_added, fw) for (parent_psimiid, roid) in other_parents: parent = psimiid_to_psimi.get(parent_psimiid) if parent is not None: parent_id = parent.psimi_id this_ro_id = roid_to_ro_id.get(roid) if this_ro_id is None: log.info("The ROID:" + str(roid) + " is not found in the database") continue new_parent_ids.append((parent_id, this_ro_id)) if (parent_id, this_ro_id) not in curr_parent_ids: insert_relation(nex_session, source_id, parent_id, child_id, this_ro_id, relation_just_added, fw) for (parent_id, ro_id) in curr_parent_ids: if (parent_id, ro_id) not in new_parent_ids: ## remove the old one to_delete = nex_session.query(PsimiRelation).filter_by(child_id=child_id, parent_id=parent_id, ro_id=ro_id).first() nex_session.delete(to_delete) fw.write("The old parent: parent_id = " + str(parent_id) + " has been deleted for psimi_id = " + str(child_id)+ "\n") def insert_url(nex_session, source_id, display_name, psimi_id, url, fw, url_type=None): # print display_name, psimi_id, url # return if url_type is None: url_type = display_name x = PsimiUrl(display_name = display_name, url_type = url_type, source_id = source_id, psimi_id = psimi_id, obj_url = url, created_by = CREATED_BY) nex_session.add(x) nex_session.flush() fw.write("Added new URL: " + url + " for psimi_id = " + str(psimi_id) + "\n") def insert_alias(nex_session, source_id, display_name, alias_type, psimi_id, alias_just_added, fw): # print display_name, alias_type # return if (psimi_id, display_name, alias_type) in alias_just_added: return alias_just_added[(psimi_id, display_name, alias_type)] = 1 x = PsimiAlias(display_name = display_name, alias_type = alias_type, source_id = source_id, psimi_id = psimi_id, created_by = CREATED_BY) nex_session.add(x) nex_session.flush() fw.write("Added new ALIAS: " + display_name + " for psimi_id = " + str(psimi_id) + "\n") def insert_relation(nex_session, source_id, parent_id, child_id, ro_id, relation_just_added, fw): # print "PARENT/CHILD: ", parent_id, child_id # return if (parent_id, child_id) in relation_just_added: return relation_just_added[(parent_id, child_id)] = 1 x = PsimiRelation(parent_id = parent_id, child_id = child_id, source_id = source_id, ro_id = ro_id, created_by = CREATED_BY) nex_session.add(x) nex_session.flush() fw.write("Added new PARENT: parent_id = " + str(parent_id) + " for psimi_id = " + str(child_id) + "\n") def update_database_load_file_to_s3(nex_session, ontology_file, source_to_id, edam_to_id): gzip_file = ontology_file + ".gz" import gzip import shutil with open(ontology_file, 'rb') as f_in, gzip.open(gzip_file, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) local_file = open(gzip_file, mode='rb') import hashlib psimi_md5sum = hashlib.md5(ontology_file.encode()).hexdigest() psimi_row = nex_session.query(Filedbentity).filter_by(md5sum = psimi_md5sum).one_or_none() if psimi_row is not None: return nex_session.query(Dbentity).filter_by(display_name=gzip_file, dbentity_status='Active').update({"dbentity_status": 'Archived'}) nex_session.commit() data_id = edam_to_id.get('EDAM:2353') ## data:2353 Ontology data topic_id = edam_to_id.get('EDAM:0089') ## topic:0089 Ontology and terminology format_id = edam_to_id.get('EDAM:3262') ## format:3262 OWL/XML from sqlalchemy import create_engine from src.models import DBSession engine = create_engine(os.environ['NEX2_URI'], pool_recycle=3600) DBSession.configure(bind=engine) upload_file(CREATED_BY, local_file, filename=gzip_file, file_extension='gz', description='PSI-MI Ontology in OWL RDF/XML format', display_name=gzip_file, data_id=data_id, format_id=format_id, topic_id=topic_id, status='Active', is_public='0', is_in_spell='0', is_in_browser='0', file_date=datetime.now(), source_id=source_to_id['SGD'], md5sum=psimi_md5sum) def write_summary_and_send_email(fw, update_log, to_delete_list): summary = "Updated: " + str(update_log['updated'])+ "\n" summary = summary + "Added: " + str(update_log['added']) + "\n" summary_4_email = summary if len(to_delete_list) > 0: summary = summary + "The following PSI-MI terms are not in the current release:\n" for (psimiid, term) in to_delete_list: summary = summary + "\t" + psimiid + " " + term + "\n" fw.write(summary) log.info(summary_4_email) if __name__ == "__main__": url_path = 'http://purl.obolibrary.org/obo/' mi_owl_file = 'mi.owl' urllib.request.urlretrieve(url_path + mi_owl_file, mi_owl_file) load_ontology(mi_owl_file)
39.715
165
0.585295
2,080
15,886
4.153846
0.115865
0.046181
0.017361
0.015625
0.480208
0.41875
0.357407
0.306829
0.267361
0.244907
0
0.006428
0.304671
15,886
399
166
39.814536
0.775756
0.074153
0
0.230496
0
0.007092
0.094981
0.002667
0
0
0
0
0
1
0.031915
false
0
0.056738
0
0.102837
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
2e79f7ea432b2b3d219bca9a079a2d06945e6b96
5,312
py
Python
clac_module/listcalc.py
yourback/OfflineDataConversionAndDrawingSoftware
b50cbcf0fba22e536baaf77464c7aab289afc24f
[ "Apache-2.0" ]
null
null
null
clac_module/listcalc.py
yourback/OfflineDataConversionAndDrawingSoftware
b50cbcf0fba22e536baaf77464c7aab289afc24f
[ "Apache-2.0" ]
null
null
null
clac_module/listcalc.py
yourback/OfflineDataConversionAndDrawingSoftware
b50cbcf0fba22e536baaf77464c7aab289afc24f
[ "Apache-2.0" ]
null
null
null
from data_module.get_source_file import * import numpy as np class DataCleaner(object): def __init__(self): # 图1数据源 self.data_LD = [] self.data_I = [] self.data_LU = [] self.data_T = [] # 图2数据源 self.data_RD = [] self.data_RU = [] # 图3数据源 self.data_K_R = [] self.data_K_L = [] self.data_J = [] # 图4数据源 self.data_LU_LD = [] self.data_RD_RU = [] self.data_Q = [] # 图5数据源 self.data_K = [] self.data_P = [] self.data_R = [] self.data_S = [] # 读文件 self.read_data() def single_line_cleaning(self, one_line): end = one_line.index(" 0D 0A") start = 1 line_result = one_line[start:end] item_data_list = line_result.split(" ") # print(item_data_list) return self.__get_datas(item_data_list) def get_item_data(self, data, i): """ 需要计算的单数据生成 :param data: :param i: :return: """ return ((int(data[i], 16) << 8) + int(data[i + 1], 16)) / 100 def get_item_single_data(self, data, i): """ 简单的单数据生成 :param i: :return: """ return int(data[i], 16) def __get_datas(self, data): """ 多种单数据生成 :param data: :return: """ LD = self.get_item_data(data, 0) LU = self.get_item_data(data, 2) RD = self.get_item_data(data, 4) RU = self.get_item_data(data, 6) K_R = self.get_item_data(data, 10) K_L = self.get_item_data(data, 12) I = self.get_item_single_data(data, 8) T = self.get_item_single_data(data, 19) J = self.get_item_single_data(data, 9) Q = self.get_item_single_data(data, 16) / 50 K = self.get_item_single_data(data, 16) P = self.get_item_single_data(data, 16) / 10 R = self.get_item_single_data(data, 17) S = self.get_item_single_data(data, 18) return LD, LU, RD, RU, K_R, K_L, I, T, J, Q, K, P, R, S def read_data(self): source_data = file_read_lines() for line_str in source_data: cleaned_data = self.single_line_cleaning(line_str) # print(cleaned_data) # LD, LU, RD, RU, K_R, K_L, I, T, J, Q, K, P, R, S self.data_LD.append(cleaned_data[0]) self.data_LU.append(cleaned_data[1]) self.data_RD.append(cleaned_data[2]) self.data_RU.append(cleaned_data[3]) self.data_K_R.append(cleaned_data[4]) self.data_K_L.append(cleaned_data[5]) self.data_I.append(cleaned_data[6]) self.data_T.append(cleaned_data[7]) self.data_J.append(cleaned_data[8]) self.data_Q.append(cleaned_data[9]) self.data_K.append(cleaned_data[10]) self.data_P.append(cleaned_data[11]) self.data_R.append(cleaned_data[12]) self.data_S.append(cleaned_data[13]) self.data_LU_LD.append(round(cleaned_data[1] - cleaned_data[0], 2)) self.data_RD_RU.append(round(cleaned_data[2] - cleaned_data[3], 2)) def get_chart_data(self, chart_num): # print("data_LD:%s" % self.data_LD) # print("data_LU:%s" % self.data_LU) # print("data_RD:%s" % self.data_RD) # print("data_K_R:%s" % self.data_K_R) # print("data_K_L:%s" % self.data_K_L) # print("data_I:%s" % self.data_I) # print("data_T:%s" % self.data_T) # print("data_J:%s" % self.data_J) # print("data_Q:%s" % self.data_Q) # print("data_K:%s" % self.data_K) # print("data_P:%s" % self.data_P) # print("data_R:%s" % self.data_R) # print("data_S:%s" % self.data_S) if chart_num == '1': # 返回图1 所有需要的数据 return {'data_LD': self.data_LD, 'data_I': self.data_I, 'data_LU': self.data_LU, 'data_T': self.data_T, } elif chart_num == '2': # 返回图2 所有需要的数据 return {'data_RD': self.data_RD, 'data_I': self.data_I, 'data_RU': self.data_RU, 'data_T': self.data_T, } elif chart_num == '3': return {'data_RD': self.data_K_R, 'data_K_L': self.data_K_L, 'data_I': self.data_I, 'data_T': self.data_T, 'data_J': self.data_J, } elif chart_num == '4': return {'data_LU_LD': self.data_LU_LD, 'data_RD_RU': self.data_RD_RU, 'data_K_R': self.data_K_R, 'data_K_L': self.data_K_L, 'data_I': self.data_I, 'data_T': self.data_T, 'data_J': self.data_J, 'data_Q': self.data_Q, } elif chart_num == '5': return {'data_K': self.data_K, 'data_P': self.data_P, 'data_R': self.data_R, 'data_S': self.data_S, } if __name__ == '__main__': dc = DataCleaner() m = dc.get_chart_data("1") for key in m: print(key + ':' + m[key].__str__())
32.193939
79
0.515625
749
5,312
3.315087
0.128171
0.235199
0.05437
0.061619
0.316553
0.220298
0.147
0.114378
0.070882
0.070882
0
0.022332
0.350904
5,312
164
80
32.390244
0.697796
0.130836
0
0.097087
0
0
0.042748
0
0
0
0
0
0
1
0.067961
false
0
0.019417
0
0.184466
0.009709
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
2e7ad10848efe85c68fbc9621bee7802f36c7718
100
py
Python
saleor/request_order/apps.py
AkioSky/FishMart
1d01d7e79812dc7cccb1b26ffc6457af6104d9f2
[ "BSD-3-Clause" ]
null
null
null
saleor/request_order/apps.py
AkioSky/FishMart
1d01d7e79812dc7cccb1b26ffc6457af6104d9f2
[ "BSD-3-Clause" ]
null
null
null
saleor/request_order/apps.py
AkioSky/FishMart
1d01d7e79812dc7cccb1b26ffc6457af6104d9f2
[ "BSD-3-Clause" ]
null
null
null
from django.apps import AppConfig class RequestOrderConfig(AppConfig): name = 'request_order'
16.666667
36
0.78
11
100
7
0.909091
0
0
0
0
0
0
0
0
0
0
0
0.15
100
5
37
20
0.905882
0
0
0
0
0
0.13
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
2e7b7593dba3a850dbee118f9ad9b5841c88cc75
293
py
Python
py3_cookbook/_1_data_structure/find_common_keys_in_two_dicts.py
yellowb/ml-sample
2f283c39d0d7ed4bafc324632100a17da0088237
[ "MIT" ]
null
null
null
py3_cookbook/_1_data_structure/find_common_keys_in_two_dicts.py
yellowb/ml-sample
2f283c39d0d7ed4bafc324632100a17da0088237
[ "MIT" ]
null
null
null
py3_cookbook/_1_data_structure/find_common_keys_in_two_dicts.py
yellowb/ml-sample
2f283c39d0d7ed4bafc324632100a17da0088237
[ "MIT" ]
null
null
null
""" Sample to find common keys between 2 dictionaries """ d1 = { 'a': 1, 'b': 2, 'c': 3, 'd': 4 } d2 = { 'b': 2, 'c': 3, 'd': 4, 'e': 5 } print(d1.keys() & d2.keys()) # intersection print(d1.keys() | d2.keys()) # union print(d1.keys() - d2.keys()) # diff
14.65
57
0.457338
44
293
3.045455
0.522727
0.156716
0.246269
0.291045
0.470149
0.089552
0
0
0
0
0
0.082524
0.296928
293
19
58
15.421053
0.567961
0.255973
0
0.266667
0
0
0.038278
0
0
0
0
0
0
1
0
false
0
0
0
0
0.2
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
2e7dc647373335b503a34a2d4a35865fb0a8ce24
372
py
Python
testserver/manage.py
akubera/batavia
b56f2a3f54242f81aea3a5a048356a03f0d49494
[ "BSD-3-Clause" ]
1
2018-05-17T08:46:16.000Z
2018-05-17T08:46:16.000Z
testserver/manage.py
akubera/batavia
b56f2a3f54242f81aea3a5a048356a03f0d49494
[ "BSD-3-Clause" ]
1
2021-06-10T23:39:56.000Z
2021-06-10T23:39:56.000Z
testserver/manage.py
akubera/batavia
b56f2a3f54242f81aea3a5a048356a03f0d49494
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python import os import sys if __name__ == "__main__": if sys.version_info[0] != 3: sys.stderr.write('Batavia requires Python 3' + os.linesep) sys.exit(1) os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
23.25
66
0.693548
51
372
4.72549
0.647059
0.091286
0.149378
0.182573
0
0
0
0
0
0
0
0.013378
0.196237
372
15
67
24.8
0.792642
0.053763
0
0
0
0
0.179487
0.062678
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
3
2e7ddad188be683aa53d4150da78432b51cf923a
120
py
Python
week-6/flask-api-small/scripts/prediction.py
cgsanders/stat418-tools-in-datascience
4a86fd15386f47d0a59422226b13834bae5a1387
[ "MIT" ]
4
2019-04-21T08:49:52.000Z
2019-09-22T14:07:26.000Z
week-6/flask-api-small/scripts/prediction.py
cgsanders/stat418-tools-in-datascience
4a86fd15386f47d0a59422226b13834bae5a1387
[ "MIT" ]
58
2019-04-22T15:44:14.000Z
2019-06-04T19:50:54.000Z
week-6/flask-api-small/scripts/prediction.py
cgsanders/stat418-tools-in-datascience
4a86fd15386f47d0a59422226b13834bae5a1387
[ "MIT" ]
33
2019-04-03T01:48:34.000Z
2021-04-30T06:36:29.000Z
#!/usr/bin/env python3 def predict(dict_values): x1 = float(dict_values["x"]) y_pred = 2**x1 return y_pred
17.142857
32
0.641667
20
120
3.65
0.75
0.273973
0
0
0
0
0
0
0
0
0
0.042105
0.208333
120
6
33
20
0.726316
0.175
0
0
0
0
0.010204
0
0
0
0
0
0
1
0.25
false
0
0
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
4
2e7e0d9b1faef47626882f56fd831cc608dec063
118
py
Python
src/constants.py
masterbluecompact/boardroom-bot
b903038ee5a9183c81f03e45547642a5f4a757cf
[ "MIT" ]
null
null
null
src/constants.py
masterbluecompact/boardroom-bot
b903038ee5a9183c81f03e45547642a5f4a757cf
[ "MIT" ]
null
null
null
src/constants.py
masterbluecompact/boardroom-bot
b903038ee5a9183c81f03e45547642a5f4a757cf
[ "MIT" ]
null
null
null
TELEGRAM_API_TOKEN = '1978689233:AAGsYEAvbN3ZkrA5a77VTO9yc30VSNZs8lQ' BOT_USERNAME = 'boardroomv1bot' WEBHOOK_URL = ''
39.333333
69
0.847458
10
118
9.6
1
0
0
0
0
0
0
0
0
0
0
0.172727
0.067797
118
3
70
39.333333
0.7
0
0
0
0
0
0.504202
0.386555
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
2e7e9c74fb34d9d539ee2c2a737c83639c165ce7
1,765
py
Python
aito/utils/_file_utils.py
AitoDotAI/aito-python-tools
891d433222b04f4ff8a4eeafbb9268516fd215dc
[ "MIT" ]
6
2019-10-16T02:35:06.000Z
2021-02-03T13:39:43.000Z
aito/utils/_file_utils.py
AitoDotAI/aito-python-tools
891d433222b04f4ff8a4eeafbb9268516fd215dc
[ "MIT" ]
23
2020-03-17T13:16:02.000Z
2021-04-23T15:09:51.000Z
aito/utils/_file_utils.py
AitoDotAI/aito-python-tools
891d433222b04f4ff8a4eeafbb9268516fd215dc
[ "MIT" ]
null
null
null
import gzip import json import os import shutil from os import PathLike from pathlib import Path from typing import Dict, List import ndjson def check_file_is_gzipped(file_path: PathLike): file_path = Path(file_path) if file_path.suffixes[-2:] != ['.ndjson', '.gz']: return False else: return True def gzip_file(input_path: PathLike, output_path: PathLike = None, keep=True): input_path = Path(input_path) if input_path.name.endswith('.gz'): raise ValueError(f'{input_path} is already gzipped') output_path = Path(output_path) if output_path else input_path.parent / f"{input_path.name}.gz" with input_path.open('rb') as f_in, gzip.open(output_path, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) if not keep: os.unlink(input_path) def read_json_gz_file(input_path: PathLike, decoder='utf-8'): input_path = Path(input_path) with gzip.open(input_path, 'rb') as in_f: json_bytes = in_f.read() return json.loads(json_bytes.decode(decoder)) def read_ndjson_gz_file(input_path: PathLike, decoder='utf-8'): input_path = Path(input_path) records = [] with gzip.open(input_path, 'rb') as in_f: line = in_f.readline() while line: records.append(json.loads(line.decode(decoder))) line = in_f.readline() return records def write_to_ndjson_gz_file(data: List[Dict], output_file: PathLike): output_file = Path(output_file) if not output_file.name.endswith(".ndjson.gz"): raise ValueError("Output file must end with .ndjson.gz") ndjson_file = output_file.parent / output_file.stem with ndjson_file.open('w') as f: ndjson.dump(data, f) gzip_file(ndjson_file, output_file, keep=False)
30.964912
99
0.689518
268
1,765
4.309701
0.25
0.132468
0.033766
0.054545
0.164502
0.145455
0.145455
0.145455
0.145455
0.09697
0
0.00212
0.1983
1,765
56
100
31.517857
0.814134
0
0
0.155556
0
0
0.073088
0
0
0
0
0
0
1
0.111111
false
0
0.177778
0
0.377778
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
2e80dd7978a3db15732cccc6a8f417efe3965c4c
946
py
Python
vol4/156.py
EdisonAlgorithms/ProjectEuler
95025ede2c92dbd3ed2dccc0f8a97e9a3db95ef0
[ "MIT" ]
null
null
null
vol4/156.py
EdisonAlgorithms/ProjectEuler
95025ede2c92dbd3ed2dccc0f8a97e9a3db95ef0
[ "MIT" ]
null
null
null
vol4/156.py
EdisonAlgorithms/ProjectEuler
95025ede2c92dbd3ed2dccc0f8a97e9a3db95ef0
[ "MIT" ]
null
null
null
import itertools import time def digits(n): while n: yield n % 10 n /= 10 def pows(b): x = 1 while True: yield x x *= 10 def f(n, d): def g(d0, m): if d0 < d: return n / (m * 10) * m elif d0 == d: return n / (m * 10) * m + n % m + 1 else: return (n / (m * 10) + 1) * m return sum(itertools.starmap(g, itertools.izip(digits(n), pows(10)))) def solve(L, d): n = 1 ret = 0 while True: m = f(n, d) if m == n: ret += n n += 1 elif m < n: n += max(1, (n - m) / (sum(1 for _ in digits(n)) + 1)) else: if m - n >= L: break n += m - n return ret if __name__ == "__main__": L = 10 ** 100 t0 = time.clock() print sum(solve(L, d) for d in range(1, 10)) t1 = time.clock() print 'time = ', t1 - t0
20.12766
73
0.403805
144
946
2.590278
0.291667
0.032172
0.064343
0.080429
0.075067
0.075067
0.075067
0
0
0
0
0.073359
0.452431
946
47
74
20.12766
0.646718
0
0
0.097561
0
0
0.015839
0
0
0
0
0
0
0
null
null
0
0.04878
null
null
0.04878
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
2e82064a46395a03f52d064607c7f8ac759527bc
5,770
py
Python
codelibproj/codelibapp/migrations/0001_initial.py
ahegashira/CodeLibrary
4dca1eb8ec0c274e8dfb1db62a97920703df3240
[ "Apache-2.0" ]
null
null
null
codelibproj/codelibapp/migrations/0001_initial.py
ahegashira/CodeLibrary
4dca1eb8ec0c274e8dfb1db62a97920703df3240
[ "Apache-2.0" ]
null
null
null
codelibproj/codelibapp/migrations/0001_initial.py
ahegashira/CodeLibrary
4dca1eb8ec0c274e8dfb1db62a97920703df3240
[ "Apache-2.0" ]
null
null
null
# Generated by Django 2.2 on 2019-06-04 18:37 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='ResourceType', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('resource_type', models.CharField(max_length=255)), ('resource_description', models.CharField(blank=True, max_length=255, null=True)), ], options={ 'verbose_name_plural': 'resources', 'db_table': 'resource', }, ), migrations.CreateModel( name='Website', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('website_title', models.CharField(max_length=255)), ('website_url', models.URLField()), ('website_description', models.TextField()), ('resource_type', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='codelibapp.ResourceType')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name_plural': 'websites', 'db_table': 'website', }, ), migrations.CreateModel( name='Meetup', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('meetup_title', models.CharField(max_length=255)), ('meetup_url', models.URLField()), ('meetup_city', models.CharField(max_length=100)), ('meetup_state', models.CharField(max_length=2)), ('meetup_description', models.TextField()), ('resource_type', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='codelibapp.ResourceType')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name_plural': 'meetups', 'db_table': 'meetup', }, ), migrations.CreateModel( name='Developer', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('dev_first_name', models.CharField(max_length=100)), ('dev_last_name', models.CharField(max_length=100)), ('dev_twitter', models.CharField(max_length=51)), ('dev_specialty', models.TextField()), ('resource_type', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='codelibapp.ResourceType')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name_plural': 'developers', 'db_table': 'developer', }, ), migrations.CreateModel( name='Book', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('book_title', models.CharField(max_length=255)), ('book_author', models.CharField(max_length=255)), ('book_publisher', models.CharField(max_length=255)), ('book_pages', models.IntegerField(blank=True, null=True, verbose_name=4)), ('book_isbn10', models.CharField(blank=True, max_length=10, null=True)), ('book_isbn13', models.CharField(blank=True, max_length=14, null=True)), ('book_pub_date', models.DateField(blank=True, null=True)), ('book_description', models.TextField()), ('resource_type', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='codelibapp.ResourceType')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name_plural': 'books', 'db_table': 'book', }, ), migrations.CreateModel( name='Blog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('blog_title', models.CharField(max_length=255)), ('blog_author_first', models.CharField(blank=True, max_length=100, null=True)), ('blog_author_last', models.CharField(blank=True, max_length=100, null=True)), ('blog_url', models.CharField(max_length=255)), ('blog_postdate', models.DateField()), ('blog_description', models.TextField()), ('resource_type', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='codelibapp.ResourceType')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name_plural': 'blogs', 'db_table': 'blog', }, ), ]
49.741379
132
0.564991
561
5,770
5.586453
0.172906
0.086152
0.074665
0.099553
0.677728
0.660498
0.555839
0.534142
0.534142
0.534142
0
0.016828
0.299653
5,770
115
133
50.173913
0.758723
0.007452
0
0.425926
1
0
0.15615
0.020499
0
0
0
0
0
1
0
false
0
0.027778
0
0.064815
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
2e84df5100eb34e590dde3692727724451d660e6
186
py
Python
congregation/codegen/python/libs/external/__init__.py
CCD-HRI/congregation
a552856b03a64a4295792184107c4e529ca3f4ae
[ "MIT" ]
3
2020-10-05T16:30:15.000Z
2021-01-22T13:38:02.000Z
congregation/codegen/python/libs/external/__init__.py
multiparty/congregation
a552856b03a64a4295792184107c4e529ca3f4ae
[ "MIT" ]
null
null
null
congregation/codegen/python/libs/external/__init__.py
multiparty/congregation
a552856b03a64a4295792184107c4e529ca3f4ae
[ "MIT" ]
1
2021-08-13T07:28:30.000Z
2021-08-13T07:28:30.000Z
from congregation.codegen.python.libs.external.unary import * from congregation.codegen.python.libs.external.binary import * from congregation.codegen.python.libs.external.nary import *
46.5
62
0.83871
24
186
6.5
0.416667
0.307692
0.442308
0.557692
0.865385
0.865385
0.602564
0
0
0
0
0
0.064516
186
3
63
62
0.896552
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
9
2e894c57c3b59c59676e670168876de12735924c
9,107
py
Python
libs/configs/DOTA1.0/dota_train/cfgs_res50_dotatrain_dcl_v7.py
Thinklab-SJTU/DCL_RetinaNet_Tensorflow
1d14c9800c3eb1975e8832978f7a263783d171ec
[ "Apache-2.0" ]
36
2020-11-19T07:23:42.000Z
2022-03-30T03:35:57.000Z
libs/configs/DOTA1.0/dota_train/cfgs_res50_dotatrain_dcl_v7.py
Thinklab-SJTU/DCL_RetinaNet_Tensorflow
1d14c9800c3eb1975e8832978f7a263783d171ec
[ "Apache-2.0" ]
4
2021-01-30T09:49:10.000Z
2021-12-05T12:49:11.000Z
libs/configs/DOTA1.0/dota_train/cfgs_res50_dotatrain_dcl_v7.py
Thinklab-SJTU/DCL_RetinaNet_Tensorflow
1d14c9800c3eb1975e8832978f7a263783d171ec
[ "Apache-2.0" ]
6
2020-11-23T07:54:47.000Z
2021-07-09T07:20:15.000Z
# -*- coding: utf-8 -*- from __future__ import division, print_function, absolute_import import os import tensorflow as tf import math """ BCL + OMEGA = 180 / 8. {'0.7': {'tennis-court': 0.9030146006529745, 'ship': 0.5247147197113373, 'basketball-court': 0.5197288166873728, 'small-vehicle': 0.3876947548806594, 'harbor': 0.11656494591608674, 'plane': 0.7970783470996496, 'soccer-ball-field': 0.5481252919561383, 'ground-track-field': 0.49822883370115856, 'roundabout': 0.4251287737133813, 'baseball-diamond': 0.4246128098451668, 'large-vehicle': 0.2103852912661382, 'helicopter': 0.26122090469916553, 'bridge': 0.09114583333333333, 'swimming-pool': 0.14757313945494202, 'storage-tank': 0.6677333132875084, 'mAP': 0.4348633584136675}, '0.9': {'tennis-court': 0.19764274620067623, 'ship': 0.004702194357366771, 'basketball-court': 0.09090909090909091, 'small-vehicle': 0.0303030303030303, 'harbor': 0.0016175994823681658, 'plane': 0.09577370534335426, 'soccer-ball-field': 0.045454545454545456, 'ground-track-field': 0.004784688995215311, 'roundabout': 0.09090909090909091, 'baseball-diamond': 0.09090909090909091, 'large-vehicle': 0.0036363636363636364, 'helicopter': 0.0, 'bridge': 0.0, 'swimming-pool': 0.001652892561983471, 'storage-tank': 0.047402781720124895, 'mAP': 0.04704652138548674}, '0.85': {'tennis-court': 0.5175145387959259, 'ship': 0.04181083824704637, 'basketball-court': 0.16507177033492823, 'small-vehicle': 0.0606060606060606, 'harbor': 0.004132231404958678, 'plane': 0.31478501464749403, 'soccer-ball-field': 0.19507575757575757, 'ground-track-field': 0.007974481658692184, 'roundabout': 0.12648221343873517, 'baseball-diamond': 0.10730253353204174, 'large-vehicle': 0.005236915550816896, 'helicopter': 0.0606060606060606, 'bridge': 0.0303030303030303, 'swimming-pool': 0.003305785123966942, 'storage-tank': 0.2216713262889979, 'mAP': 0.1241252372076342}, '0.95': {'tennis-court': 0.011019283746556474, 'ship': 0.0034965034965034965, 'basketball-court': 0.0, 'small-vehicle': 0.00033921302578018993, 'harbor': 0.0, 'plane': 0.0303030303030303, 'soccer-ball-field': 0.004329004329004329, 'ground-track-field': 0.0, 'roundabout': 0.0101010101010101, 'baseball-diamond': 0.0, 'large-vehicle': 0.00016528925619834712, 'helicopter': 0.0, 'bridge': 0.0, 'swimming-pool': 0.0, 'storage-tank': 0.004914004914004914, 'mAP': 0.004311155944805877}, '0.75': {'tennis-court': 0.814202005991537, 'ship': 0.36943182805314534, 'basketball-court': 0.45146913919982956, 'small-vehicle': 0.2500155419128262, 'harbor': 0.04276380829572319, 'plane': 0.7579878981894648, 'soccer-ball-field': 0.4295376606872696, 'ground-track-field': 0.38142101120570016, 'roundabout': 0.33333075942849677, 'baseball-diamond': 0.32189281750059, 'large-vehicle': 0.08109584612393884, 'helicopter': 0.10013175230566534, 'bridge': 0.03636363636363637, 'swimming-pool': 0.04511019283746556, 'storage-tank': 0.5556670810786699, 'mAP': 0.3313613986115972}, '0.6': {'tennis-court': 0.9078675692919017, 'ship': 0.7428748965130202, 'basketball-court': 0.5525816701862102, 'small-vehicle': 0.5661458809978344, 'harbor': 0.31468024317286736, 'plane': 0.8927954483248337, 'soccer-ball-field': 0.7026712063326276, 'ground-track-field': 0.5952492478039144, 'roundabout': 0.5862217256587403, 'baseball-diamond': 0.6337310374828784, 'large-vehicle': 0.473337107335067, 'helicopter': 0.4559992150034522, 'bridge': 0.24367445113583264, 'swimming-pool': 0.3895201094294005, 'storage-tank': 0.780240020845799, 'mAP': 0.5891726553009586}, '0.65': {'tennis-court': 0.9063366415272575, 'ship': 0.6452205305336104, 'basketball-court': 0.5419974943230758, 'small-vehicle': 0.5169961021709822, 'harbor': 0.22418261562998404, 'plane': 0.8852270920046745, 'soccer-ball-field': 0.6228637775932758, 'ground-track-field': 0.5591851331213543, 'roundabout': 0.5511841761637736, 'baseball-diamond': 0.580333891442914, 'large-vehicle': 0.3714621290611434, 'helicopter': 0.38442656608097786, 'bridge': 0.17609532766667257, 'swimming-pool': 0.2673287170682332, 'storage-tank': 0.7642816542612352, 'mAP': 0.5331414565766109}, '0.5': {'tennis-court': 0.9088195386702851, 'ship': 0.8224437807168951, 'basketball-court': 0.5830775602074171, 'small-vehicle': 0.6169954809326167, 'harbor': 0.5258339843237152, 'plane': 0.8967687126422501, 'soccer-ball-field': 0.7362705406914213, 'ground-track-field': 0.6498421987512867, 'roundabout': 0.6566326127028347, 'baseball-diamond': 0.6993401680187941, 'large-vehicle': 0.6045608802509415, 'helicopter': 0.5212808419504471, 'bridge': 0.36652945756438354, 'swimming-pool': 0.5164216645404407, 'storage-tank': 0.820030826549724, 'mAP': 0.6616565499008968}, '0.8': {'tennis-court': 0.7816894723179306, 'ship': 0.1451783316171541, 'basketball-court': 0.3190681777298075, 'small-vehicle': 0.10194653796304984, 'harbor': 0.013468013468013467, 'plane': 0.5427055255026874, 'soccer-ball-field': 0.3415451418500199, 'ground-track-field': 0.13296378418329638, 'roundabout': 0.24207752583038625, 'baseball-diamond': 0.15874047455208096, 'large-vehicle': 0.02730096965512913, 'helicopter': 0.07382920110192837, 'bridge': 0.0303030303030303, 'swimming-pool': 0.022727272727272728, 'storage-tank': 0.3935350148663551, 'mAP': 0.22180523157787616}, '0.55': {'tennis-court': 0.9088195386702851, 'ship': 0.7579876783545256, 'basketball-court': 0.5756532396263904, 'small-vehicle': 0.6038512968643537, 'harbor': 0.41351532033351385, 'plane': 0.8953579426876774, 'soccer-ball-field': 0.7172833446523743, 'ground-track-field': 0.6291038290639339, 'roundabout': 0.6120712939504148, 'baseball-diamond': 0.6832929342136569, 'large-vehicle': 0.540009629017207, 'helicopter': 0.5193070813559969, 'bridge': 0.31825260872940675, 'swimming-pool': 0.4790914021824345, 'storage-tank': 0.78480241358501, 'mAP': 0.6292266368858123}, 'mmAP': 0.3576710201805347} """ # ------------------------------------------------ VERSION = 'RetinaNet_DOTA_DCL_B_2x_20200920' NET_NAME = 'resnet50_v1d' # 'MobilenetV2' ADD_BOX_IN_TENSORBOARD = True # ---------------------------------------- System_config ROOT_PATH = os.path.abspath('../') print(20*"++--") print(ROOT_PATH) GPU_GROUP = "0,1,2" NUM_GPU = len(GPU_GROUP.strip().split(',')) SHOW_TRAIN_INFO_INTE = 20 SMRY_ITER = 2000 SAVE_WEIGHTS_INTE = 20673 * 2 SUMMARY_PATH = ROOT_PATH + '/output/summary' TEST_SAVE_PATH = ROOT_PATH + '/tools/test_result' if NET_NAME.startswith("resnet"): weights_name = NET_NAME elif NET_NAME.startswith("MobilenetV2"): weights_name = "mobilenet/mobilenet_v2_1.0_224" else: raise Exception('net name must in [resnet_v1_101, resnet_v1_50, MobilenetV2]') PRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt' TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights') EVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/' # ------------------------------------------ Train config RESTORE_FROM_RPN = False FIXED_BLOCKS = 1 # allow 0~3 FREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone USE_07_METRIC = True MUTILPY_BIAS_GRADIENT = 2.0 # if None, will not multipy GRADIENT_CLIPPING_BY_NORM = 10.0 # if None, will not clip CLS_WEIGHT = 1.0 REG_WEIGHT = 1.0 ANGLE_WEIGHT = 0.5 REG_LOSS_MODE = None ALPHA = 1.0 BETA = 1.0 BATCH_SIZE = 1 EPSILON = 1e-5 MOMENTUM = 0.9 LR = 5e-4 DECAY_STEP = [SAVE_WEIGHTS_INTE*12, SAVE_WEIGHTS_INTE*16, SAVE_WEIGHTS_INTE*20] MAX_ITERATION = SAVE_WEIGHTS_INTE*20 WARM_SETP = int(1.0 / 4.0 * SAVE_WEIGHTS_INTE) # -------------------------------------------- Data_preprocess_config DATASET_NAME = 'DOTATrain' # 'pascal', 'coco' PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR PIXEL_MEAN_ = [0.485, 0.456, 0.406] PIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR IMG_SHORT_SIDE_LEN = 800 IMG_MAX_LENGTH = 800 CLASS_NUM = 15 OMEGA = 180 / 8. ANGLE_MODE = 0 IMG_ROTATE = False RGB2GRAY = False VERTICAL_FLIP = False HORIZONTAL_FLIP = True IMAGE_PYRAMID = False # --------------------------------------------- Network_config SUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None) SUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0) PROBABILITY = 0.01 FINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY)) WEIGHT_DECAY = 1e-4 USE_GN = False FPN_CHANNEL = 256 # ---------------------------------------------Anchor config LEVEL = ['P3', 'P4', 'P5', 'P6', 'P7'] BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512] ANCHOR_STRIDE = [8, 16, 32, 64, 128] ANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)] ANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 5.] ANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15] ANCHOR_SCALE_FACTORS = None USE_CENTER_OFFSET = True METHOD = 'H' USE_ANGLE_COND = False ANGLE_RANGE = 180 # 90 or 180 # --------------------------------------------RPN config SHARE_NET = True USE_P5 = True IOU_POSITIVE_THRESHOLD = 0.5 IOU_NEGATIVE_THRESHOLD = 0.4 NMS = True NMS_IOU_THRESHOLD = 0.1 MAXIMUM_DETECTIONS = 100 FILTERED_SCORE = 0.05 VIS_SCORE = 0.4
70.596899
581
0.726694
1,136
9,107
5.698063
0.367077
0.018539
0.018539
0.024718
0.060559
0.056234
0.022246
0.022246
0.022246
0.01205
0
0.355285
0.087954
9,107
128
582
71.148438
0.424031
0.072582
0
0
0
0
0.108846
0.050965
0
0
0
0
0
1
0
false
0
0.046512
0
0.046512
0.034884
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
2e8a31039b719e32fd9540d0b31a493f407bf660
3,081
py
Python
src/korth_spirit/configuration/input_configuration.py
Korth-Spirit/Korth-Spirit
1bdaf6f46a4665f4d2a312e61e57d853b9dd5d84
[ "MIT" ]
null
null
null
src/korth_spirit/configuration/input_configuration.py
Korth-Spirit/Korth-Spirit
1bdaf6f46a4665f4d2a312e61e57d853b9dd5d84
[ "MIT" ]
null
null
null
src/korth_spirit/configuration/input_configuration.py
Korth-Spirit/Korth-Spirit
1bdaf6f46a4665f4d2a312e61e57d853b9dd5d84
[ "MIT" ]
null
null
null
# Copyright (c) 2021-2022 Johnathan P. Irvin # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from functools import cache class InputConfiguration: @cache def get_bot_name(self) -> str: """ Returns the name of the bot. Returns: str: The name of the bot. """ return input("Bot name: ") @cache def get_citizen_number(self) -> int: """ Returns the citizen number of the owner of the bot. Returns: int: The citizen number of the owner of the bot. """ while True: citizen_number = input("Citizen number: ") if citizen_number.isnumeric(): return int(citizen_number) print("Invalid citizen number.") @cache def get_password(self) -> str: """ Returns the priviledge password of the owner of the bot. Returns: str: The priviledge password of the owner of the bot. """ return input("Password: ") @cache def get_world_name(self) -> str: """ Returns the name of the world the bot will enter. Returns: str: The name of the world the bot will enter. """ return input("World name: ") @cache def get_world_coordinates(self) -> tuple: """ Returns the coordinates of the world the bot will enter. Returns: tuple: The coordinates where the bot will enter. """ while True: x, y, z = input( "World coordinates (x, y, z): " ).replace(" ", "").split(",") if x.isnumeric() and y.isnumeric() and z.isnumeric(): return ( int(x), int(y), int(z) ) print("Invalid coordinates.") @cache def get_plugin_path(self) -> str: """ Returns the path where the plugins are stored. Returns: str: The path where the plugins are stored. """ return input("Plugin path: ")
31.762887
72
0.612788
395
3,081
4.744304
0.351899
0.040021
0.035219
0.036286
0.228922
0.215582
0.199039
0.158485
0.11206
0
0
0.003752
0.308017
3,081
96
73
32.09375
0.875235
0.55469
0
0.25
0
0
0.120751
0
0
0
0
0
0
1
0.1875
false
0.0625
0.03125
0
0.4375
0.0625
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
2e8a599c94ab88b6f514655190020c0bde169a1f
586
py
Python
data/data.py
owrior/snakeMach
1af8ca51badd3e23201ef5cc873e9179ee01c058
[ "MIT" ]
null
null
null
data/data.py
owrior/snakeMach
1af8ca51badd3e23201ef5cc873e9179ee01c058
[ "MIT" ]
null
null
null
data/data.py
owrior/snakeMach
1af8ca51badd3e23201ef5cc873e9179ee01c058
[ "MIT" ]
null
null
null
import numpy as np from sklearn.datasets import make_blobs from sklearn.preprocessing import normalize class TestData: def __init__(self, dimensions, points) -> None: self.dimensions = dimensions self.points = points def linearly_separable(self) -> np.array: x, y = make_blobs( n_samples=self.points, centers=2, n_features=self.dimensions, center_box=(0, 1), ) for d in range(self.dimensions): x[d] = x[d] - np.min(x[d]) / (np.max(x[d]) - np.min(x[d])) return x, y
25.478261
70
0.583618
78
586
4.25641
0.512821
0.03012
0.036145
0.042169
0.054217
0.054217
0
0
0
0
0
0.007335
0.302048
586
22
71
26.636364
0.804401
0
0
0
0
0
0
0
0
0
0
0
0
1
0.117647
false
0
0.176471
0
0.411765
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
2e8d37dec89478528db6577f4a7c15427ede6234
7,013
py
Python
sahara/tests/unit/plugins/mapr/utils/test_func_utils.py
citrix-openstack-build/sahara
17e4f4dac5bb321ef4d5a55664cca0857127d7e6
[ "Apache-2.0" ]
null
null
null
sahara/tests/unit/plugins/mapr/utils/test_func_utils.py
citrix-openstack-build/sahara
17e4f4dac5bb321ef4d5a55664cca0857127d7e6
[ "Apache-2.0" ]
null
null
null
sahara/tests/unit/plugins/mapr/utils/test_func_utils.py
citrix-openstack-build/sahara
17e4f4dac5bb321ef4d5a55664cca0857127d7e6
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2014, MapR Technologies # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sahara.plugins.mapr.util.func_utils as fu import sahara.tests.unit.base as b class PredicatesTest(b.SaharaTestCase): def test_true_predicate(self): self.assertTrue(fu.true_predicate(None)) def test_false_predicate(self): self.assertFalse(fu.false_predicate(None)) def test_not_predicate(self): self.assertFalse(fu.not_predicate(fu.true_predicate)(None)) self.assertTrue(fu.not_predicate(fu.false_predicate)(None)) def test_and_predicate(self): true_p = fu.true_predicate false_p = fu.false_predicate and_p = fu.and_predicate self.assertTrue(and_p(true_p, true_p)(None)) self.assertFalse(and_p(false_p, true_p)(None)) self.assertFalse(and_p(true_p, false_p)(None)) self.assertFalse(and_p(false_p, false_p)(None)) def test_or_predicate(self): true_p = fu.true_predicate false_p = fu.false_predicate or_p = fu.or_predicate self.assertTrue(or_p(true_p, true_p)(None)) self.assertTrue(or_p(false_p, true_p)(None)) self.assertTrue(or_p(true_p, false_p)(None)) self.assertFalse(or_p(false_p, false_p)(None)) def test_field_equals_predicate(self): field_equals_p = fu.field_equals_predicate arg = {'a': 'a', 'b': 'b'} self.assertTrue(field_equals_p('a', 'a')(arg)) self.assertFalse(field_equals_p('b', 'a')(arg)) def test_like_predicate(self): like_p = fu.like_predicate arg = {'a': 'a', 'b': 'b', 'c': 'c'} self.assertTrue(like_p({'a': 'a', 'b': 'b', 'c': 'c'})(arg)) self.assertTrue(like_p({'a': 'a', 'b': 'b'})(arg)) self.assertTrue(like_p({'a': 'a'})(arg)) self.assertTrue(like_p({'a': 'a'}, ['a'])(arg)) self.assertTrue(like_p({})(arg)) self.assertTrue(like_p({'a': 'a', 'b': 'b', 'c': 'a'}, ['c'])(arg)) self.assertFalse(like_p({'a': 'a', 'b': 'b', 'c': 'a'})(arg)) self.assertFalse(like_p({'a': 'a', 'c': 'a'})(arg)) self.assertFalse(like_p({'c': 'a'}, ['a'])(arg)) def test_in_predicate(self): in_p = fu.in_predicate arg = {'a': 'a', 'b': 'b'} self.assertTrue(in_p('a', ['a', 'b'])(arg)) self.assertFalse(in_p('a', ['c', 'b'])(arg)) self.assertFalse(in_p('a', [])(arg)) class FunctionsTest(b.SaharaTestCase): def test_copy_function(self): copy_f = fu.copy_function arg = {'a': 'a'} actual = copy_f()(arg) expected = {'a': 'a'} self.assertEqual(expected, actual) self.assertIsNot(actual, arg) def test_append_field_function(self): append_field_f = fu.append_field_function arg = {'a': 'a'} actual = append_field_f('b', 'b')(arg) expected = {'a': 'a', 'b': 'b'} self.assertEqual(expected, actual) self.assertIsNot(actual, arg) def test_append_fields_function(self): append_fields_f = fu.append_fields_function arg = {'a': 'a'} actual = append_fields_f({'b': 'b', 'c': 'c'})(arg) expected = {'a': 'a', 'b': 'b', 'c': 'c'} self.assertEqual(expected, actual) self.assertIsNot(actual, arg) actual = append_fields_f({'b': 'b'})(arg) expected = {'a': 'a', 'b': 'b'} self.assertEqual(expected, actual) self.assertIsNot(actual, arg) actual = append_fields_f({})(arg) expected = {'a': 'a'} self.assertEqual(expected, actual) self.assertIsNot(actual, arg) def test_get_values_pair_function(self): get_values_pair_f = fu.get_values_pair_function arg = {'a': 'a', 'b': 'b'} actual = get_values_pair_f('a', 'b')(arg) expected = ('a', 'b') self.assertEqual(expected, actual) def test_get_field_function(self): get_field_f = fu.get_field_function arg = {'a': 'a', 'b': 'b'} actual = get_field_f('a')(arg) expected = ('a', 'a') self.assertEqual(expected, actual) def test_get_fields_function(self): get_fields_f = fu.get_fields_function arg = {'a': 'a', 'b': 'b'} actual = get_fields_f(['a', 'b'])(arg) expected = [('a', 'a'), ('b', 'b')] self.assertEqual(expected, actual) actual = get_fields_f(['a'])(arg) expected = [('a', 'a')] self.assertEqual(expected, actual) def test_extract_fields_function(self): extract_fields_f = fu.extract_fields_function arg = {'a': 'a', 'b': 'b'} actual = extract_fields_f(['a', 'b'])(arg) expected = {'a': 'a', 'b': 'b'} self.assertEqual(expected, actual) actual = extract_fields_f(['a'])(arg) expected = {'a': 'a'} self.assertEqual(expected, actual) def test_get_value_function(self): get_value_f = fu.get_value_function arg = {'a': 'a', 'b': 'b'} actual = get_value_f('a')(arg) expected = 'a' self.assertEqual(expected, actual) def test_set_default_value_function(self): set_default_value_f = fu.set_default_value_function arg = {'a': 'a'} actual = set_default_value_f('b', 'b')(arg) expected = {'a': 'a', 'b': 'b'} self.assertEqual(expected, actual) self.assertIsNot(actual, arg) actual = set_default_value_f('a', 'b')(arg) expected = {'a': 'a'} self.assertEqual(expected, actual) self.assertIsNot(actual, arg) def test_set_default_values_function(self): set_default_values_f = fu.set_default_values_function arg = {'a': 'a'} actual = set_default_values_f({'a': 'b', 'c': 'c'})(arg) expected = {'a': 'a', 'c': 'c'} self.assertEqual(expected, actual) self.assertIsNot(actual, arg) actual = set_default_values_f({'b': 'b'})(arg) expected = {'a': 'a', 'b': 'b'} self.assertEqual(expected, actual) self.assertIsNot(actual, arg) actual = set_default_values_f({})(arg) expected = {'a': 'a'} self.assertEqual(expected, actual) self.assertIsNot(actual, arg) def test_values_pair_to_dict_function(self): values_pair_to_dict_f = fu.values_pair_to_dict_function arg = ('a', 'b') actual = values_pair_to_dict_f('a', 'b')(arg) expected = {'a': 'a', 'b': 'b'} self.assertEqual(expected, actual)
34.546798
76
0.592756
950
7,013
4.148421
0.114737
0.020299
0.015986
0.020299
0.642223
0.580817
0.541233
0.445572
0.336717
0.317178
0
0.001502
0.240696
7,013
202
77
34.717822
0.738592
0.081135
0
0.37415
0
0
0.027527
0
0
0
0
0
0.367347
1
0.129252
false
0
0.013605
0
0.156463
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
2e8e0133f4cc67b5978f48ca2c017cf170b948d8
609
py
Python
astronat/phot/MegaCam_PanSTARRS/MCgen2_PS1/Mixed_MegaCamGen2_PS1.py
nstarman/astronat
9e1f41c6de1ca6adbd2bf99414a4c9b61838abf6
[ "BSD-3-Clause" ]
1
2020-11-20T18:25:26.000Z
2020-11-20T18:25:26.000Z
astronat/phot/MegaCam_PanSTARRS/MCgen2_PS1/Mixed_MegaCamGen2_PS1.py
nstarman/astronat
9e1f41c6de1ca6adbd2bf99414a4c9b61838abf6
[ "BSD-3-Clause" ]
3
2020-09-09T06:10:20.000Z
2020-09-16T05:56:10.000Z
astronat/phot/MegaCam_PanSTARRS/MCgen2_PS1/Mixed_MegaCamGen2_PS1.py
nstarman/astronat
9e1f41c6de1ca6adbd2bf99414a4c9b61838abf6
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- """Mega-Cam gen2 band and PanSTARRS 1 band Mixed Functions.""" __author__ = "Nathaniel Starkman" __copyright__ = "Copyright 2018, " __credits__ = [ "http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/en/megapipe/docs/filt.html" ] __all__ = [] ############################################################################# # IMPORTS ############################################################################# # CODE ############################################################################# ############################################################################# # END
23.423077
77
0.320197
39
609
4.589744
0.948718
0
0
0
0
0
0
0
0
0
0
0.012635
0.090312
609
25
78
24.36
0.310469
0.157635
0
0
0
0.166667
0.530612
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
2e8e056676584d6baf6b9485f76064371025e1cf
8,405
py
Python
docs/rips/tests/test_cases.py
OPM/ResInsight-UserDocumentation
2af2c3a5ef297c0061d842944360a83bf8e49c36
[ "MIT" ]
1
2020-04-25T21:24:45.000Z
2020-04-25T21:24:45.000Z
docs/rips/tests/test_cases.py
OPM/ResInsight-UserDocumentation
2af2c3a5ef297c0061d842944360a83bf8e49c36
[ "MIT" ]
7
2020-02-11T07:42:10.000Z
2020-09-28T17:18:01.000Z
docs/rips/tests/test_cases.py
OPM/ResInsight-UserDocumentation
2af2c3a5ef297c0061d842944360a83bf8e49c36
[ "MIT" ]
2
2020-04-02T09:33:45.000Z
2020-04-09T19:44:53.000Z
import sys import os import math import pytest import grpc import tempfile sys.path.insert(1, os.path.join(sys.path[0], "../../")) import rips import dataroot def test_Launch(rips_instance, initialize_test): assert rips_instance is not None def test_EmptyProject(rips_instance, initialize_test): cases = rips_instance.project.cases() assert len(cases) is 0 def test_OneCase(rips_instance, initialize_test): case = rips_instance.project.load_case( dataroot.PATH + "/TEST10K_FLT_LGR_NNC/TEST10K_FLT_LGR_NNC.EGRID" ) assert case.name == "TEST10K_FLT_LGR_NNC" assert case.id == 0 cases = rips_instance.project.cases() assert len(cases) is 1 def test_BoundingBox(rips_instance, initialize_test): case = rips_instance.project.load_case( dataroot.PATH + "/TEST10K_FLT_LGR_NNC/TEST10K_FLT_LGR_NNC.EGRID" ) assert case.name == "TEST10K_FLT_LGR_NNC" boundingbox = case.reservoir_boundingbox() assert math.isclose(3382.90, boundingbox.min_x, abs_tol=1.0e-1) assert math.isclose(5850.48, boundingbox.max_x, abs_tol=1.0e-1) assert math.isclose(4157.45, boundingbox.min_y, abs_tol=1.0e-1) assert math.isclose(7354.93, boundingbox.max_y, abs_tol=1.0e-1) assert math.isclose(-4252.61, boundingbox.min_z, abs_tol=1.0e-1) assert math.isclose(-4103.60, boundingbox.max_z, abs_tol=1.0e-1) min_depth, max_depth = case.reservoir_depth_range() assert math.isclose(4103.60, min_depth, abs_tol=1.0e-1) assert math.isclose(4252.61, max_depth, abs_tol=1.0e-1) def test_MultipleCases(rips_instance, initialize_test): case_paths = [] case_paths.append(dataroot.PATH + "/TEST10K_FLT_LGR_NNC/TEST10K_FLT_LGR_NNC.EGRID") case_paths.append(dataroot.PATH + "/TEST10K_FLT_LGR_NNC/TEST10K_FLT_LGR_NNC.EGRID") case_paths.append(dataroot.PATH + "/TEST10K_FLT_LGR_NNC/TEST10K_FLT_LGR_NNC.EGRID") case_names = [] for case_path in case_paths: case_name = os.path.splitext(os.path.basename(case_path))[0] case_names.append(case_name) rips_instance.project.load_case(path=case_path) cases = rips_instance.project.cases() assert len(cases) == len(case_names) for i, case_name in enumerate(case_names): assert case_name == cases[i].name def get_cell_index_with_ijk(cell_info, i, j, k): for (idx, cell) in enumerate(cell_info): if cell.local_ijk.i == i and cell.local_ijk.j == j and cell.local_ijk.k == k: return idx return -1 def check_corner(actual, expected): assert math.isclose(actual.x, expected[0], abs_tol=0.1) assert math.isclose(actual.y, expected[1], abs_tol=0.1) assert math.isclose(actual.z, expected[2], abs_tol=0.1) def test_10k(rips_instance, initialize_test): case_path = dataroot.PATH + "/TEST10K_FLT_LGR_NNC/TEST10K_FLT_LGR_NNC.EGRID" case = rips_instance.project.load_case(path=case_path) assert len(case.grids()) == 2 cell_count_info = case.cell_count() assert cell_count_info.active_cell_count == 11125 assert cell_count_info.reservoir_cell_count == 316224 time_steps = case.time_steps() assert len(time_steps) == 9 days_since_start = case.days_since_start() assert len(days_since_start) == 9 cell_info = case.cell_info_for_active_cells() assert len(cell_info) == cell_count_info.active_cell_count # Check an active cell (found in resinsight ui) cell_index = get_cell_index_with_ijk(cell_info, 23, 44, 19) assert cell_index != -1 cell_centers = case.active_cell_centers() assert len(cell_centers) == cell_count_info.active_cell_count # Check the cell center for the specific cell assert math.isclose(3627.17, cell_centers[cell_index].x, abs_tol=0.1) assert math.isclose(5209.75, cell_centers[cell_index].y, abs_tol=0.1) assert math.isclose(4179.6, cell_centers[cell_index].z, abs_tol=0.1) cell_corners = case.active_cell_corners() assert len(cell_corners) == cell_count_info.active_cell_count # Expected values from ResInsight UI expected_corners = [ [3565.22, 5179.02, 4177.18], [3655.67, 5145.34, 4176.63], [3690.07, 5240.69, 4180.02], [3599.87, 5275.16, 4179.32], [3564.13, 5178.61, 4179.75], [3654.78, 5144.79, 4179.23], [3688.99, 5239.88, 4182.7], [3598.62, 5274.48, 4181.96], ] check_corner(cell_corners[cell_index].c0, expected_corners[0]) check_corner(cell_corners[cell_index].c1, expected_corners[1]) check_corner(cell_corners[cell_index].c2, expected_corners[2]) check_corner(cell_corners[cell_index].c3, expected_corners[3]) check_corner(cell_corners[cell_index].c4, expected_corners[4]) check_corner(cell_corners[cell_index].c5, expected_corners[5]) check_corner(cell_corners[cell_index].c6, expected_corners[6]) check_corner(cell_corners[cell_index].c7, expected_corners[7]) # No coarsening info for this case coarsening_info = case.coarsening_info() assert len(coarsening_info) == 0 def test_PdmObject(rips_instance, initialize_test): case_path = dataroot.PATH + "/TEST10K_FLT_LGR_NNC/TEST10K_FLT_LGR_NNC.EGRID" case = rips_instance.project.load_case(path=case_path) assert case.id == 0 assert case.address() is not 0 assert case.__class__.__name__ == "EclipseCase" @pytest.mark.skipif( sys.platform.startswith("linux"), reason="Brugge is currently exceptionally slow on Linux", ) def test_brugge_0010(rips_instance, initialize_test): case_path = dataroot.PATH + "/Case_with_10_timesteps/Real10/BRUGGE_0010.EGRID" case = rips_instance.project.load_case(path=case_path) assert len(case.grids()) == 1 cellCountInfo = case.cell_count() assert cellCountInfo.active_cell_count == 43374 assert cellCountInfo.reservoir_cell_count == 60048 time_steps = case.time_steps() assert len(time_steps) == 11 days_since_start = case.days_since_start() assert len(days_since_start) == 11 @pytest.mark.skipif( sys.platform.startswith("linux"), reason="Brugge is currently exceptionally slow on Linux", ) def test_replaceCase(rips_instance, initialize_test): project = rips_instance.project.open( dataroot.PATH + "/TEST10K_FLT_LGR_NNC/10KWithWellLog.rsp" ) case_path = dataroot.PATH + "/Case_with_10_timesteps/Real0/BRUGGE_0000.EGRID" case = project.case(case_id=0) assert case is not None assert case.name == "TEST10K_FLT_LGR_NNC" assert case.id == 0 cases = rips_instance.project.cases() assert len(cases) is 1 case.replace(new_grid_file=case_path) # Check that the case object has been changed assert case.name == "BRUGGE_0000" assert case.id == 0 cases = rips_instance.project.cases() assert len(cases) is 1 # Check that retrieving the case object again will yield the changed object case = project.case(case_id=0) assert case.name == "BRUGGE_0000" assert case.id == 0 def test_loadNonExistingCase(rips_instance, initialize_test): case_path = "Nonsense/Nonsense/Nonsense" with pytest.raises(grpc.RpcError): assert rips_instance.project.load_case(case_path) @pytest.mark.skipif( sys.platform.startswith("linux"), reason="Brugge is currently exceptionally slow on Linux", ) def test_exportFlowCharacteristics(rips_instance, initialize_test): case_path = dataroot.PATH + "/Case_with_10_timesteps/Real0/BRUGGE_0000.EGRID" case = rips_instance.project.load_case(case_path) with tempfile.TemporaryDirectory(prefix="rips") as tmpdirname: print("Temporary folder: ", tmpdirname) file_name = tmpdirname + "/exportFlowChar.txt" case.export_flow_characteristics( time_steps=8, producers=[], injectors="I01", file_name=file_name ) def test_selected_cells(rips_instance, initialize_test): case = rips_instance.project.load_case( dataroot.PATH + "/TEST10K_FLT_LGR_NNC/TEST10K_FLT_LGR_NNC.EGRID" ) assert case.name == "TEST10K_FLT_LGR_NNC" selected_cells = case.selected_cells() assert len(selected_cells) == 0 time_step_info = case.time_steps() for (tidx, timestep) in enumerate(time_step_info): # Try to read for SOIL the time step (will be empty since nothing is selected) soil_results = case.selected_cell_property("DYNAMIC_NATIVE", "SOIL", tidx) assert len(soil_results) == 0
37.690583
87
0.723974
1,241
8,405
4.625302
0.205479
0.058537
0.047561
0.058537
0.529617
0.516376
0.447038
0.405226
0.366725
0.319512
0
0.057278
0.167043
8,405
222
88
37.86036
0.762605
0.04188
0
0.287356
0
0
0.112865
0.071473
0
0
0
0
0.304598
1
0.08046
false
0
0.045977
0
0.137931
0.005747
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
2e8e45f33735173ba981879373ba6646052dcfb9
104
py
Python
fantasyanalyzer/main/main.py
dmowen2/FantasyAnalyzer
b113615ac711560718c595eeef3e771a2a4e2a69
[ "MIT" ]
null
null
null
fantasyanalyzer/main/main.py
dmowen2/FantasyAnalyzer
b113615ac711560718c595eeef3e771a2a4e2a69
[ "MIT" ]
null
null
null
fantasyanalyzer/main/main.py
dmowen2/FantasyAnalyzer
b113615ac711560718c595eeef3e771a2a4e2a69
[ "MIT" ]
null
null
null
#basic file to run the program import mainApp startprogram = mainApp.commandLine() startprogram.run()
14.857143
36
0.788462
13
104
6.307692
0.769231
0
0
0
0
0
0
0
0
0
0
0
0.134615
104
6
37
17.333333
0.911111
0.278846
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
2e8e8e12d320aec8142afc42dd8fc9a16e93586d
6,103
py
Python
iamim_gt_planner.py
cernodile/py-deltaparser
a1767e0429d8bf4e12364c4dcce41e7433065049
[ "MIT" ]
2
2021-04-21T22:05:50.000Z
2021-08-14T21:45:03.000Z
iamim_gt_planner.py
cernodile/py-deltaparser
a1767e0429d8bf4e12364c4dcce41e7433065049
[ "MIT" ]
1
2021-06-11T14:18:23.000Z
2021-06-11T15:16:02.000Z
iamim_gt_planner.py
cernodile/py-deltaparser
a1767e0429d8bf4e12364c4dcce41e7433065049
[ "MIT" ]
null
null
null
#!/usr/bin/python """ @cernodile/py-deltaparser A Python script to convert Growtopia's items.dat to human-readable indexable format. File: iamim_gt_planner.py Purpose: To generate data file for Iamim's GT Planner License: See LICENSE.txt in project root directory. """ import csv import item_parser def filter(item): """Filters out items that you should not be able to use in a world planner.""" if item.ID % 2 == 1: return False if item.Type == 0 or item.Type == 1 or item.Type == 4 or item.Type == 8 or item.Type == 19 \ or item.Type == 20 or item.Type == 37 or item.Type == 44 or item.Type == 57 or item.Type == 64 or item.Type == 107 \ or item.Type == 112 or item.Type == 120 or item.Type == 129: return False # Any bedrock-type that is within startopia if item.Type == 15 and item.ID > 6000 and item.ID <= 6742: return False if "null_item" in item.Name: return False if "Guild Flag" in item.Name: return False # Blank, UPDATE_STORE, Valhowla Treasure if item.ID == 0 or item.ID == 244 or item.ID == 4368: return False return True def get_item_type(Type): if Type == 18 or Type == 22 or Type == 23 or Type == 28: return "Background" return "Foreground" def get_informational_type(item): if item.Type == 7: return "Bouncy" elif item.Type == 2 or item.Type == 13 or item.Type == 26: return "Door" elif item.Type == 3: return "Lock" elif item.Type == 6 or item.Type == 45 or item.Type == 93: return "Death" elif item.Type == 9: return "Entrance" elif item.Type == 10: return "Sign" elif item.Type == 12 or item.Type == 31 or item.Type == 32 or item.Type == 122: return "Togglable Block" elif item.Type == 14 or item.CollisionType == 2: return "Platform" elif item.Type == 16 or item.Type == 25 or item.Type == 126 or item.Type == 136 or item.ID == 5238: return "Pain" elif item.Type == 27: return "Checkpoint" elif item.Type == 28: return "Music Note" elif item.Type == 41 or item.Type == 81 or item.Type == 89 or item.Type == 134: return "Weather Machine" elif item.Type == 60: return "Wind" elif item.Type == 69 or item.Type == 70 or item.Type == 71 or item.Type == 79: return "Steam" elif item.Type == 113: return "Bots" else: return get_item_type(item.Type) def get_special_data(item): name = item.FileName.replace(".rttex", "") x = item.TexX y = item.TexY if (item.ID >= 3258 and item.ID <= 3268) or item.ID == 3280 or item.ID == 3282 or item.ID == 3412 or item.ID == 3414 \ or (item.ID >= 3752 and item.ID <= 3756) or item.ID == 3766 or item.ID == 3768: name = "Steam_items" x = 0 y = 0 if item.ID == 3258 or item.ID == 3268 or item.ID == 3412 or item.ID == 3756: x = 8 if item.ID == 3262 or item.ID == 3280 or item.ID == 3766 or item.ID == 3768: x = 16 if item.ID == 3264 or item.ID == 3282 or item.ID == 3752: x = 24 if item.ID >= 3266 and item.ID <= 3282: y = 6 if item.ID == 3412 or item.ID == 3414 or item.ID == 3752 or item.ID == 3766: y = 12 if item.ID == 3754 or item.ID == 3756 or item.ID == 3768: y = 18 if item.ID == 620 or item.ID == 3592: name = "pipes" x = 0 y = 0 if item.ID == 620 else 2 if item.ID >= 2242 and item.ID <= 2250: name = "crystals" y = 1 if item.ID == 2250 else 0 x = (item.ID - 2242) // 2 if item.ID != 2250 else 0 if item.ID >= 4382 and item.ID <= 4398: name = "bunting" x = y = 0 if item.ID == 4384: x = 4 elif item.ID == 4386: y = 1 elif item.ID == 4388: x = 4 y = 1 elif item.ID == 4390: y = 2 elif item.ID == 4392: x = 4 y = 2 elif item.ID == 4394: x = 6 y = 2 elif item.ID == 4396: y = 3 elif item.ID == 4398: x = 2 y = 3 if item.ID == 10254: x = y = 0 name = "Dining" return (name, x, y) def get_storage_type(item): if (item.ID >= 3258 and item.ID <= 3268) or item.ID == 3280 or item.ID == 3282 or item.ID == 3412 or item.ID == 3414 \ or (item.ID >= 3752 and item.ID <= 3756) or item.ID == 3766 or item.ID == 3768: return 2 return item.StorageType def write_iamim_gt_planner(items): with open("iamim_gt_planner.csv", "w") as csvfile: writer = csv.writer(csvfile, delimiter="|") writer.writerow([1, "Water", "Water", 2, "Water", "Water", 0, 0, 0]) for item in items: item = items[item] if filter(item): data = get_special_data(item) writer.writerow([item.ID, item.Name, get_item_type(item.Type), get_storage_type(item), get_informational_type(item), data[0], data[1], data[2], 1 if item.Properties & 0x01 or item.ID == 4700 else 0]) # actual 4660 is a confetti cannon, 5604 a goldfish bowler hat. if item.ID == 4658: writer.writerow([4660, "Detonated Uranium Block", "Foreground", 1, "Foreground", "tiles_page10", 3, 1, 0]) elif item.ID == 5602: writer.writerow([5604, "Drilled Ice Crust Block", "Foreground", 1, "Foreground", "tiles_page10", 1, 1, 0]) elif item.ID == 7866: writer.writerow([7865, "Topiary Hedge (Swirly)", "Foreground", 1, "Foreground", "tiles_page13", 28, 11, 1]) writer.writerow([7866, "Topiary Hedge (Bird)", "Foreground", 1, "Foreground", "tiles_page13", 26, 11, 1]) writer.writerow([7867, "Topiary Hedge (Circle)", "Foreground", 1, "Foreground", "tiles_page13", 27, 11, 1]) elif item.ID == 9030: writer.writerow([9032, "Spooky Bunting (pumpkin)", "Foreground", 1, "Foreground", "tiles_page14", 18, 10, 1]) writer.writerow([9034, "Spooky Bunting (ghost)", "Foreground", 1, "Foreground", "tiles_page14", 19, 10, 1]) writer.writerow([9036, "Spooky Bunting (bats)", "Foreground", 1, "Foreground", "tiles_page14", 21, 10, 1]) elif item.ID == 9198: writer.writerow([9199, "Ice Sculptures (Flower)", "Foreground", 1, "Foreground", "tiles_page14", 9, 12, 0]) elif item.ID == 9200: writer.writerow([9201, "Ice Sculptures (Teddy Bear)", "Foreground", 1, "Foreground", "tiles_page14", 10, 12, 0]) elif item.ID == 9202: writer.writerow([9203, "Ice Sculptures (Star)", "Foreground", 1, "Foreground", "tiles_page14", 11, 12, 0]) if __name__ == "__main__": items = item_parser.parse("items.dat") write_iamim_gt_planner(items)
35.690058
203
0.636081
1,014
6,103
3.77712
0.252465
0.117493
0.066841
0.074674
0.274151
0.134204
0.098695
0.078851
0.072585
0.072585
0
0.118574
0.209569
6,103
170
204
35.9
0.675373
0.077994
0
0.164384
1
0
0.150953
0
0
0
0.000713
0
0
1
0.041096
false
0
0.013699
0
0.246575
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
2e8f0b81bbd9561984ac38ea8dfc729ccec17dd2
236
py
Python
organizerui/model.py
angusmacdonald/gopro-organizer
6fc77fab231d016186b4bb4df5e24ecf039b72d9
[ "MIT" ]
6
2016-04-23T23:44:10.000Z
2020-10-30T07:35:19.000Z
organizerui/model.py
angusmacdonald/gopro-organizer
6fc77fab231d016186b4bb4df5e24ecf039b72d9
[ "MIT" ]
null
null
null
organizerui/model.py
angusmacdonald/gopro-organizer
6fc77fab231d016186b4bb4df5e24ecf039b72d9
[ "MIT" ]
null
null
null
import sys sys.path.append('') from organizercore import organizer class OrganizerModel: def start_processing(self, input_dir, output_dir, settings): organizer.Organizer(settings).process_gopro_dir(input_dir, output_dir)
23.6
78
0.783898
30
236
5.933333
0.633333
0.089888
0.157303
0.191011
0
0
0
0
0
0
0
0
0.131356
236
9
79
26.222222
0.868293
0
0
0
0
0
0
0
0
0
0
0
0
1
0.166667
false
0
0.333333
0
0.666667
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
2e903b0c067785052b3d3529823e93eb770c9d80
2,959
py
Python
tests/test_to_human.py
AleCandido/human_dates
56bb10587b69e84b27a27117b2ecb3b1df09a028
[ "MIT" ]
1
2020-05-11T12:47:23.000Z
2020-05-11T12:47:23.000Z
tests/test_to_human.py
AleCandido/human_dates
56bb10587b69e84b27a27117b2ecb3b1df09a028
[ "MIT" ]
9
2020-04-30T13:43:30.000Z
2020-10-19T15:32:54.000Z
tests/test_to_human.py
AleCandido/human_dates
56bb10587b69e84b27a27117b2ecb3b1df09a028
[ "MIT" ]
null
null
null
import datetime as dt import pytest import human_dates class TestTimeAgoInWords: """ test time_ago_in_words function """ @pytest.fixture(autouse=True) def _import_templates(self, templates): """ import templates from conftest local plugin """ self.templates = templates @pytest.fixture(autouse=True) def _run_time_ago_comparison(self): """ autoexecute after the specific test has been defined, running the actual comparison """ yield for date, expected in zip(self.dates, self.expected): result = human_dates.time_ago_in_words(dt.datetime.now() + date) assert expected == result def test_time_years(self): self.dates = [-dt.timedelta(days=366 * 4), dt.timedelta(days=366 * 4)] self.expected = [ self.templates.past % "4 years", self.templates.future % "4 years", ] def test_time_months(self): self.dates = [-dt.timedelta(days=31 * 3), dt.timedelta(days=31 * 3)] self.expected = [ self.templates.past % "3 months", self.templates.future % "3 months", ] def test_time_weeks(self): self.dates = [-dt.timedelta(days=7 * 3 + 1), dt.timedelta(days=7 * 3 + 1)] self.expected = [ self.templates.past % "3 weeks", self.templates.future % "3 weeks", ] def test_time_days(self): self.dates = [-dt.timedelta(days=5.1), dt.timedelta(days=5.1)] self.expected = [ self.templates.past % "5 days", self.templates.future % "5 days", ] def test_time_one_day(self): self.dates = [-dt.timedelta(hours=24.1), dt.timedelta(hours=24.5)] self.expected = ["yesterday", "tomorrow"] def test_time_hours(self): self.dates = [ -dt.timedelta(hours=17.1), dt.timedelta(hours=5.1), -dt.timedelta(minutes=75), ] self.expected = [ self.templates.past % "17 hours", self.templates.future % "5 hours", self.templates.past % "an hour", ] def test_time_minutes(self): self.dates = [ -dt.timedelta(minutes=41.3), dt.timedelta(minutes=26.3), dt.timedelta(seconds=67), ] self.expected = [ self.templates.past % "41 minutes", self.templates.future % "26 minutes", self.templates.future % "a minute", ] def test_time_seconds(self): self.dates = [-dt.timedelta(seconds=19.3), dt.timedelta(seconds=45.8)] self.expected = [ self.templates.past % "19 seconds", self.templates.future % "45 seconds", ] def test_time_now(self): self.dates = [-dt.timedelta(seconds=3.7), dt.timedelta(seconds=8.1)] self.expected = ["just now"] * 2
30.505155
82
0.559649
348
2,959
4.663793
0.229885
0.135551
0.060998
0.083179
0.36907
0.211337
0
0
0
0
0
0.036783
0.310916
2,959
96
83
30.822917
0.759196
0.053734
0
0.15493
0
0
0.055637
0
0
0
0
0
0.014085
1
0.15493
false
0
0.056338
0
0.225352
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
2e9074bb1cba53266c6974290d0d2403b768e6ea
3,124
py
Python
network_anomaly/code/Basic_features/test.py
kidrabit/Data-Visualization-Lab-RND
baa19ee4e9f3422a052794e50791495632290b36
[ "Apache-2.0" ]
1
2022-01-18T01:53:34.000Z
2022-01-18T01:53:34.000Z
network_anomaly/code/Basic_features/test.py
kidrabit/Data-Visualization-Lab-RND
baa19ee4e9f3422a052794e50791495632290b36
[ "Apache-2.0" ]
null
null
null
network_anomaly/code/Basic_features/test.py
kidrabit/Data-Visualization-Lab-RND
baa19ee4e9f3422a052794e50791495632290b36
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- import pandas as pd import csv import collections from pandas import DataFrame as df def test(data): print(data['TIME']) return 100 # data['TIME'] = data['TIME'].astype('float') # data['TIME'] = data['TIME'].astype('int') # # # IP, PORT, IP_PORT 데이터셋 생성 -> new_data # new_data = data['dstip'] # new_data = pd.DataFrame(new_data) # IP = [] # PORT = [] # dst = [] # land=[] # # dst = data['dstip'].values.tolist() # src = data['srcip'].values.tolist() # # # for i in range(len(new_data)): # IP.append(new_data.iloc[i][0].split(':')[0]) # PORT.append(new_data.iloc[i][0].split(':')[1]) # # for i in range(len(new_data)): # if src==dst: # land.append(1) # else: # land.append(0) # # # IP = pd.DataFrame(IP, columns=['IP']) # PORT = pd.DataFrame(PORT, columns=['PORT']) # IP_PORT = pd.DataFrame(dst, columns=['IP_PORT']) # LAND=pd.DataFrame(land,columns=['LAND']) # # # # new_data = pd.concat([data['TIME'], IP], axis=1) # new_data = pd.concat([new_data, PORT], axis=1) # new_data = pd.concat([new_data, IP_PORT], axis=1) # new_data = pd.concat([new_data,LAND],axis=1) # # # # # timestamp에 각 초에 따른 데이터를 넣어줌 # timestamp_IP_PORT = [] # for i in range((max(new_data['TIME']))+1): # line = [] # timestamp_IP_PORT.append(line) # # for j in range(len(new_data['TIME'])): # timestamp_IP_PORT[new_data['TIME'].iloc[j]].append(new_data['IP_PORT'].iloc[j]) # # # timestamp를 이용해서 counter에 각 초당 IP&PORT 개수를 저장함 # counter_IP_PORT = [] # for k in range(len(timestamp_IP_PORT)): # counter_IP_PORT.append(collections.Counter(timestamp_IP_PORT[k])) # # timestamp_IP = [] # # # 초단위로 바꾼 값 중의 최대값 크기만큼의 (timestamp)리스트를 만듦 # for i in range((max(new_data['TIME']))+1): # line = [] # timestamp_IP.append(line) # # # # # (timestamp)안에 각 초단위에 해당하는 dstip를 리스트형태로 넣음 # # 아래 코드 실행 후 timestamp[0:2] 로 출력하면 0초,1초에 대한 dstip 출력 # for j in range(len(new_data['TIME'])): # # timestamp_IP[new_data['TIME'].iloc[j]].append(new_data['IP'].iloc[j]) # # f.write(str(timestamp_IP)) 테스트 # # # # timestamp를 이용해서 counter에 각 초당 IP&PORT 개수를 저장함 # counter_IP = [] # for k in range(len(timestamp_IP)): # counter_IP.append(collections.Counter(timestamp_IP[k])) # # timestamp_PORT = [] # # 초단위로 바꾼 값 중의 최대값 크기만큼의 (timestamp)리스트를 만듦 # for i in range((max(new_data['TIME']))+1): # line = [] # timestamp_PORT.append(line) # # # (timestamp)안에 각 초단위에 해당하는 dstip를 리스트형태로 넣음 # # 아래 코드 실행 후 timestamp[0:2] 로 출력하면 0초,1초에 대한 dstip 출력 # for j in range(len(new_data['TIME'])): # # print(data['time'].iloc[j]) # timestamp_PORT[new_data['TIME'].iloc[j]].append(new_data['PORT'].iloc[j]) # # counter_PORT = [] # for k in range(len(timestamp_PORT)): # counter_PORT.append(collections.Counter(timestamp_PORT[k])) # # return land if __name__ == '__main__': pass
28.925926
88
0.572983
447
3,124
3.850112
0.221477
0.10982
0.057525
0.031958
0.61011
0.520046
0.520046
0.420686
0.404997
0.311447
0
0.010647
0.248399
3,124
108
89
28.925926
0.722317
0.876761
0
0
0
0
0.043011
0
0
0
0
0
0
1
0.111111
false
0.111111
0.444444
0
0.666667
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
0
0
0
3
5ce8437b0fec0991f67a078f2016c53fe445b831
24,331
py
Python
security_monkey/auditor.py
bungoume/security_monkey
90c02638a315c78535869ab71a8859d17e011a6a
[ "Apache-2.0" ]
null
null
null
security_monkey/auditor.py
bungoume/security_monkey
90c02638a315c78535869ab71a8859d17e011a6a
[ "Apache-2.0" ]
null
null
null
security_monkey/auditor.py
bungoume/security_monkey
90c02638a315c78535869ab71a8859d17e011a6a
[ "Apache-2.0" ]
null
null
null
# Copyright 2014 Netflix, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ .. module: security_monkey.auditor :platform: Unix :synopsis: This class is subclassed to add audit rules. .. version:: $$VERSION$$ .. moduleauthor:: Patrick Kelley <pkelley@netflix.com> """ import datastore from security_monkey import app, db from security_monkey.watcher import ChangeItem from security_monkey.common.jinja import get_jinja_env from security_monkey.datastore import User, AuditorSettings, Item, ItemAudit, Technology, Account, ItemAuditScore, AccountPatternAuditScore from security_monkey.common.utils import send_email from security_monkey.account_manager import get_account_by_name from security_monkey.alerters.custom_alerter import report_auditor_changes from sqlalchemy import and_ from collections import defaultdict auditor_registry = defaultdict(list) class AuditorType(type): def __init__(cls, name, bases, attrs): super(AuditorType, cls).__init__(name, bases, attrs) if cls.__name__ != 'Auditor' and cls.index: # Only want to register auditors explicitly loaded by find_modules if not '.' in cls.__module__: found = False for auditor in auditor_registry[cls.index]: if auditor.__module__ == cls.__module__ and auditor.__name__ == cls.__name__: found = True break if not found: app.logger.debug("Registering auditor {} {}.{}".format(cls.index, cls.__module__, cls.__name__)) auditor_registry[cls.index].append(cls) class Auditor(object): """ This class (and subclasses really) run a number of rules against the configurations and look for any violations. These violations are saved with the object and a report is made available via the Web UI and through email. """ index = None # Should be overridden i_am_singular = None # Should be overridden i_am_plural = None # Should be overridden __metaclass__ = AuditorType support_auditor_indexes = [] support_watcher_indexes = [] def __init__(self, accounts=None, debug=False): self.datastore = datastore.Datastore() self.accounts = accounts self.debug = debug self.items = [] self.team_emails = app.config.get('SECURITY_TEAM_EMAIL', []) self.emails = [] self.current_support_items = {} self.override_scores = None self.current_method_name = None if type(self.team_emails) in (str, unicode): self.emails.append(self.team_emails) elif type(self.team_emails) in (list, tuple): self.emails.extend(self.team_emails) else: app.logger.info("Auditor: SECURITY_TEAM_EMAIL contains an invalid type") for account in self.accounts: users = User.query.filter(User.daily_audit_email==True).filter(User.accounts.any(name=account)).all() self.emails.extend([user.email for user in users]) def add_issue(self, score, issue, item, notes=None): """ Adds a new issue to an item, if not already reported. :return: The new issue """ if notes and len(notes) > 1024: notes = notes[0:1024] if not self.override_scores: query = ItemAuditScore.query.filter(ItemAuditScore.technology == self.index) self.override_scores = query.all() # Check for override scores to apply score = self._check_for_override_score(score, item.account) for existing_issue in item.audit_issues: if existing_issue.issue == issue: if existing_issue.notes == notes: if existing_issue.score == score: app.logger.debug( "Not adding issue because it was already found:{}/{}/{}/{}\n\t{} -- {}" .format(item.index, item.region, item.account, item.name, issue, notes)) return existing_issue app.logger.debug("Adding issue: {}/{}/{}/{}\n\t{} -- {}" .format(item.index, item.region, item.account, item.name, issue, notes)) new_issue = datastore.ItemAudit(score=score, issue=issue, notes=notes, justified=False, justified_user_id=None, justified_date=None, justification=None) item.audit_issues.append(new_issue) return new_issue def prep_for_audit(self): """ To be overridden by child classes who need a way to prepare for the next run. """ pass def audit_these_objects(self, items): """ Only inspect the given items. """ app.logger.debug("Asked to audit {} Objects".format(len(items))) self.prep_for_audit() self.current_support_items = {} query = ItemAuditScore.query.filter(ItemAuditScore.technology == self.index) self.override_scores = query.all() methods = [getattr(self, method_name) for method_name in dir(self) if method_name.find("check_") == 0] app.logger.debug("methods: {}".format(methods)) for item in items: for method in methods: self.current_method_name = method.func_name # If the check function is disabled by an entry on Settings/Audit Issue Scores # the function will not be run and any previous issues will be cleared if not self._is_current_method_disabled(): method(item) self.items = items self.override_scores = None def _is_current_method_disabled(self): """ Determines whether this method has been marked as disabled based on Audit Issue Scores settings. """ for override_score in self.override_scores: if override_score.method == self.current_method_name + ' (' + self.__class__.__name__ + ')': return override_score.disabled return False def audit_all_objects(self): """ Read all items from the database and inspect them all. """ self.items = self.read_previous_items() self.audit_these_objects(self.items) def read_previous_items(self): """ Pulls the last-recorded configuration from the database. :return: List of all items for the given technology and the given account. """ prev_list = [] for account in self.accounts: prev = self.datastore.get_all_ctype_filtered(tech=self.index, account=account, include_inactive=False) # Returns a map of {Item: ItemRevision} for item in prev: item_revision = prev[item] new_item = ChangeItem(index=self.index, region=item.region, account=item.account.name, name=item.name, arn=item.arn, new_config=item_revision.config) new_item.audit_issues = [] new_item.db_item = item prev_list.append(new_item) return prev_list def read_previous_items_for_account(self, index, account): """ Pulls the last-recorded configuration from the database. :return: List of all items for the given technology and the given account. """ prev_list = [] prev = self.datastore.get_all_ctype_filtered(tech=index, account=account, include_inactive=False) # Returns a map of {Item: ItemRevision} for item in prev: item_revision = prev[item] new_item = ChangeItem(index=self.index, region=item.region, account=item.account.name, name=item.name, arn=item.arn, new_config=item_revision.config) new_item.audit_issues = [] new_item.db_item = item prev_list.append(new_item) return prev_list def save_issues(self): """ Save all new issues. Delete all fixed issues. """ app.logger.debug("\n\nSaving Issues.") # Work around for issue where previous get's may cause commit to fail db.session.rollback() for item in self.items: changes = False loaded = False if not hasattr(item, 'db_item'): loaded = True item.db_item = self.datastore._get_item(item.index, item.region, item.account, item.name) existing_issues = list(item.db_item.issues) new_issues = item.audit_issues for issue in item.db_item.issues: if not issue.auditor_setting: self._set_auditor_setting_for_issue(issue) # Add new issues old_scored = ["{} -- {} -- {} -- {} -- {}".format( old_issue.auditor_setting.auditor_class, old_issue.issue, old_issue.notes, old_issue.score, self._item_list_string(old_issue)) for old_issue in existing_issues] for new_issue in new_issues: nk = "{} -- {} -- {} -- {} -- {}".format(self.__class__.__name__, new_issue.issue, new_issue.notes, new_issue.score, self._item_list_string(new_issue)) if nk not in old_scored: changes = True app.logger.debug("Saving NEW issue {}".format(nk)) item.found_new_issue = True item.confirmed_new_issues.append(new_issue) item.db_item.issues.append(new_issue) else: for issue in existing_issues: if issue.issue == new_issue.issue and issue.notes == new_issue.notes and issue.score == new_issue.score: item.confirmed_existing_issues.append(issue) break key = "{}/{}/{}/{}".format(item.index, item.region, item.account, item.name) app.logger.debug("Issue was previously found. Not overwriting.\n\t{}\n\t{}".format(key, nk)) # Delete old issues new_scored = ["{} -- {} -- {} -- {}".format(new_issue.issue, new_issue.notes, new_issue.score, self._item_list_string(new_issue)) for new_issue in new_issues] for old_issue in existing_issues: ok = "{} -- {} -- {} -- {}".format(old_issue.issue, old_issue.notes, old_issue.score, self._item_list_string(old_issue)) old_issue_class = old_issue.auditor_setting.auditor_class if old_issue_class is None or (old_issue_class == self.__class__.__name__ and ok not in new_scored): changes = True app.logger.debug("Deleting FIXED or REPLACED issue {}".format(ok)) item.confirmed_fixed_issues.append(old_issue) item.db_item.issues.remove(old_issue) if changes: db.session.add(item.db_item) else: if loaded: db.session.expunge(item.db_item) db.session.commit() self._create_auditor_settings() report_auditor_changes(self) def email_report(self, report): """ Given a report, send an email using SES. """ if not report: app.logger.info("No Audit issues. Not sending audit email.") return subject = "Security Monkey {} Auditor Report".format(self.i_am_singular) send_email(subject=subject, recipients=self.emails, html=report) def create_report(self): """ Using a Jinja template (jinja_audit_email.html), create a report that can be emailed. :return: HTML - The output of the rendered template. """ jenv = get_jinja_env() template = jenv.get_template('jinja_audit_email.html') # This template expects a list of items that have been sorted by total score in # descending order. for item in self.items: item.totalscore = 0 for issue in item.db_item.issues: item.totalscore = item.totalscore + issue.score sorted_list = sorted(self.items, key=lambda item: item.totalscore) sorted_list.reverse() report_list = [] for item in sorted_list: if item.totalscore > 0: report_list.append(item) else: break if len(report_list) > 0: return template.render({'items': report_list}) else: return False def applies_to_account(self, account): """ Placeholder for custom auditors which may only want to run against certain types of accounts """ return True def _create_auditor_settings(self): """ Checks to see if an AuditorSettings entry exists for each issue. If it does not, one will be created with disabled set to false. """ app.logger.debug("Creating/Assigning Auditor Settings in account {} and tech {}".format(self.accounts, self.index)) query = ItemAudit.query query = query.join((Item, Item.id == ItemAudit.item_id)) query = query.join((Technology, Technology.id == Item.tech_id)) query = query.filter(Technology.name == self.index) issues = query.filter(ItemAudit.auditor_setting_id == None).all() for issue in issues: self._set_auditor_setting_for_issue(issue) db.session.commit() app.logger.debug("Done Creating/Assigning Auditor Settings in account {} and tech {}".format(self.accounts, self.index)) def _set_auditor_setting_for_issue(self, issue): auditor_setting = AuditorSettings.query.filter( and_( AuditorSettings.tech_id == issue.item.tech_id, AuditorSettings.account_id == issue.item.account_id, AuditorSettings.issue_text == issue.issue, AuditorSettings.auditor_class == self.__class__.__name__ ) ).first() if auditor_setting: auditor_setting.issues.append(issue) db.session.add(auditor_setting) return auditor_setting auditor_setting = AuditorSettings( tech_id=issue.item.tech_id, account_id=issue.item.account_id, disabled=False, issue_text=issue.issue, auditor_class=self.__class__.__name__ ) auditor_setting.issues.append(issue) db.session.add(auditor_setting) db.session.commit() db.session.refresh(auditor_setting) app.logger.debug("Created AuditorSetting: {} - {} - {}".format( issue.issue, self.index, issue.item.account.name)) return auditor_setting def _check_cross_account(self, src_account_number, dest_item, location): account = Account.query.filter(Account.identifier == src_account_number).first() account_name = None if account is not None: account_name = account.name src = account_name or src_account_number dst = dest_item.account if src == dst: return None notes = "SRC [{}] DST [{}]. Location: {}".format(src, dst, location) if not account_name: tag = "Unknown Cross Account Access" self.add_issue(10, tag, dest_item, notes=notes) elif account_name != dest_item.account and not account.third_party: tag = "Friendly Cross Account Access" self.add_issue(0, tag, dest_item, notes=notes) elif account_name != dest_item.account and account.third_party: tag = "Friendly Third Party Cross Account Access" self.add_issue(0, tag, dest_item, notes=notes) def _check_cross_account_root(self, source_item, dest_arn, actions): if not actions: return None account = Account.query.filter(Account.name == source_item.account).first() source_item_account_number = account.identifier if source_item_account_number == dest_arn.account_number: return None tag = "Cross-Account Root IAM" notes = "ALL IAM Roles/users/groups in account {} can perform the following actions:\n"\ .format(dest_arn.account_number) notes += "{}".format(actions) self.add_issue(6, tag, source_item, notes=notes) def get_auditor_support_items(self, auditor_index, account): for index in self.support_auditor_indexes: if index == auditor_index: audited_items = self.current_support_items.get(account + auditor_index) if audited_items is None: audited_items = self.read_previous_items_for_account(auditor_index, account) if not audited_items: app.logger.info("{} Could not load audited items for {}/{}".format(self.index, auditor_index, account)) self.current_support_items[account+auditor_index] = [] else: self.current_support_items[account+auditor_index] = audited_items return audited_items raise Exception("Auditor {} is not configured as an audit support auditor for {}".format(auditor_index, self.index)) def get_watcher_support_items(self, watcher_index, account): for index in self.support_watcher_indexes: if index == watcher_index: items = self.current_support_items.get(account + watcher_index) if items is None: items = self.read_previous_items_for_account(watcher_index, account) # Only the item contents should be used for watcher support # config. This prevents potentially stale issues from being # used by the auditor for item in items: item.db_item.issues = [] if not items: app.logger.info("{} Could not load support items for {}/{}".format(self.index, watcher_index, account)) self.current_support_items[account+watcher_index] = [] else: self.current_support_items[account+watcher_index] = items return items raise Exception("Watcher {} is not configured as a data support watcher for {}".format(watcher_index, self.index)) def link_to_support_item_issues(self, item, sub_item, sub_issue_message=None, issue_message=None, issue=None, score=None): """ Creates a new issue that is linked to an issue in a support auditor """ matching_issues = [] for sub_issue in sub_item.issues: if not sub_issue_message or sub_issue.issue == sub_issue_message: matching_issues.append(sub_issue) if len(matching_issues) > 0: for matching_issue in matching_issues: if issue is None: if issue_message is None: if sub_issue_message is not None: issue_message = sub_issue_message else: issue_message = "UNDEFINED" if score is not None: issue = self.add_issue(score, issue_message, item) else: issue = self.add_issue(matching_issue.score, issue_message, item) else: if score is not None: issue.score = score else: issue.score = issue.score + matching_issue.score issue.sub_items.append(sub_item) return issue def link_to_support_item(self, score, issue_message, item, sub_item, issue=None): """ Creates a new issue that is linked a support watcher item """ if issue is None: issue = self.add_issue(score, issue_message, item) issue.sub_items.append(sub_item) return issue def _item_list_string(self, issue): """ Use by save_issue to generate a unique id for an item """ item_ids = [] for sub_item in issue.sub_items: item_ids.append(sub_item.id) item_ids.sort() return str(item_ids) def _check_for_override_score(self, score, account): """ Return an override to the hard coded score for an issue being added. This could either be a general override score for this check method or one that is specific to a particular field in the account. :param score: the hard coded score which will be returned back if there is no applicable override :param account: The account name, used to look up the value of any pattern based overrides :return: """ for override_score in self.override_scores: # Look for an oberride entry that applies to if override_score.method == self.current_method_name + ' (' + self.__class__.__name__ + ')': # Check for account pattern override where a field in the account matches # one configured in Settings/Audit Issue Scores account = get_account_by_name(account) for account_pattern_score in override_score.account_pattern_scores: if getattr(account, account_pattern_score.account_field, None): # Standard account field, such as identifier or notes account_pattern_value = getattr(account, account_pattern_score.account_field) else: # If there is no attribute, this is an account custom field account_pattern_value = account.getCustom(account_pattern_score.account_field) if account_pattern_value is not None: # Override the score based on the matching pattern if account_pattern_value == account_pattern_score.account_pattern: app.logger.debug("Overriding score based on config {}:{} {}/{}".format(self.index, self.current_method_name + '(' + self.__class__.__name__ + ')', score, account_pattern_score.score)) score = account_pattern_score.score break else: # No specific override pattern fund. use the generic override score app.logger.debug("Overriding score based on config {}:{} {}/{}".format(self.index, self.current_method_name + '(' + self.__class__.__name__ + ')', score, override_score.score)) score = override_score.score return score
43.063717
211
0.586371
2,781
24,331
4.910464
0.147429
0.01406
0.014353
0.013474
0.332088
0.277827
0.244508
0.183875
0.170548
0.155316
0
0.001662
0.332498
24,331
564
212
43.140071
0.839172
0.15433
0
0.276139
0
0
0.065035
0.003447
0
0
0
0
0
1
0.061662
false
0.002681
0.02681
0
0.16622
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5ce9c84ffdd6672839d34427b02aa2894b9eec7a
16,473
py
Python
rst2reveal/Parser.py
rartino/rst2reveal
c31a0939275f26219aaa19ce4e55c3c08491aac8
[ "MIT" ]
null
null
null
rst2reveal/Parser.py
rartino/rst2reveal
c31a0939275f26219aaa19ce4e55c3c08491aac8
[ "MIT" ]
null
null
null
rst2reveal/Parser.py
rartino/rst2reveal
c31a0939275f26219aaa19ce4e55c3c08491aac8
[ "MIT" ]
null
null
null
try: import locale locale.setlocale(locale.LC_ALL, '') except: pass import os, sys, codecs import docutils.core from .RevealTranslator import RST2RevealTranslator, RST2RevealWriter # Import custom directives from .TwoColumnsDirective import * from .PygmentsDirective import * from .VideoDirective import * from .PlotDirective import * from .SmallRole import * from .VspaceRole import * from .ClassDirective import * from .ClearDirective import * from .TemplateDirective import * class Parser: """Class converting a stand-alone reST file into a Reveal.js-powered HTML5 file, using the provided options.""" def __init__(self, input_file, output_file='', theme='default', transition = 'default', stylesheet='', mathjax_path='', pygments_style='', vertical_center=False, horizontal_center=False, title_center=False, footer=False, page_number=False, controls=False, firstslide_template='', footer_template='', init_html=False, reveal_root='reveal'): """ Constructor of the Parser class. ``create_slides()`` must then be called to actually produce the presentation. Arguments: * input_file : name of the reST file to be processed (obligatory). * output_file: name of the HTML file to be generated (default: same as input_file, but with a .html extension). * theme: the name of the theme to be used ({**default**, beige, night}). * transition: the transition between slides ({**default**, cube, page, concave, zoom, linear, fade, none}). * stylesheet: a custom CSS file which extends or replaces the used theme. * mathjax_path: URL or path to the MathJax library (default: http://cdn.mathjax.org/mathjax/latest/MathJax.js). * pygments_style: the style to be used for syntax color-highlighting using Pygments. The list depends on your Pygments version, type:: from pygments.styles import STYLE_MAP print STYLE_MAP.keys() * vertical_center: boolean stating if the slide content should be vertically centered (default: False). * horizontal_center: boolean stating if the slide content should be horizontally centered (default: False). * title_center: boolean stating if the title of each slide should be horizontally centered (default: False). * footer: boolean stating if the footer line should be displayed (default: False). * page_number: boolean stating if the slide number should be displayed (default: False). * controls: boolean stating if the control arrows should be displayed (default: False). * firstslide_template: template string defining how the first slide will be rendered in HTML. * footer_template: template string defining how the footer will be rendered in HTML. The ``firstslide_template`` and ``footer_template`` can use the following substitution variables: * %(title)s : will be replaced by the title of the presentation. * %(subtitle)s : subtitle of the presentation (either a level-2 header or the :subtitle: field, if any). * %(author)s : :author: field (if any). * %(institution)s : :institution: field (if any). * %(email)s : :email: field (if any). * %(date)s : :date: field (if any). * %(is_author)s : the '.' character if the :author: field is defined, '' otherwise. * %(is_subtitle)s : the '-' character if the subtitle is defined, '' otherwise. * %(is_institution)s : the '-' character if the :institution: field is defined, '' otherwise. You can also use your own fields in the templates. """ # Input/Output files self.input_file = input_file self.output_file = output_file # Style self.theme = theme self.stylesheet = stylesheet self.transition = transition self.vertical_center=vertical_center self.horizontal_center = horizontal_center self.title_center = title_center self.write_footer=footer self.page_number=page_number self.controls=controls # MathJax if mathjax_path =='': self.mathjax_path = 'http://cdn.mathjax.org/mathjax/latest/MathJax.js' else: self.mathjax_path = mathjax_path # Pygments self.pygments_style = pygments_style # Template for the first slide self.firstslide_template = firstslide_template # Temnplate for the footer self.footer_template = footer_template # Initalization html for reveal.js self.init_html = init_html # Root path to reaveal self.reveal_root = reveal_root def create_slides(self): """Creates the HTML5 presentation based on the arguments given to the constructor.""" # Copy the reveal library in the current directory self._copy_reveal() # Create the writer and retrieve the parts self.html_writer = RST2RevealWriter() self.html_writer.translator_class = RST2RevealTranslator with codecs.open(self.input_file, 'r', 'utf8') as infile: self.parts = docutils.core.publish_parts(source=infile.read(), writer=self.html_writer) # Produce the html file self._produce_output() def _copy_reveal(self): curr_dir = os.path.dirname(os.path.realpath(self.output_file)) cwd = os.getcwd() # Copy the reveal subfolder #if not os.path.isdir(curr_dir+'/reveal'): # sources_dir = os.path.abspath(os.path.dirname(__file__)+'/reveal') # import shutil # shutil.copytree(sources_dir, curr_dir+'/reveal') # Copy the rst2reveal.css if not os.path.exists(curr_dir+'/rst2reveal.css'): source_file = os.path.abspath(os.path.dirname(__file__)+'/reveal/css/rst2reveal.css') import shutil shutil.copyfile(source_file, curr_dir+'/rst2reveal.css') # Generate the Pygments CSS file self.is_pygments = False if not self.pygments_style == '': # Check if Pygments is installed try: import pygments self.is_pygments = True except: print('Warning: Pygments is not installed, the code will not be highlighted.') print('You should install it with `pip install pygments`') return os.chdir(curr_dir) import subprocess, shutil os.system("pygmentize -S "+self.pygments_style+" -f html -O bg=light > pygments.css") # Fix the bug where the literal color goes to math blocks... with codecs.open('pygments.css', 'r', 'utf8') as infile: with codecs.open('pygments.css.tmp', 'w', 'utf8') as outfile: for aline in infile: outfile.write('.highlight '+aline) shutil.move('pygments.css.tmp', 'pygments.css') os.chdir(cwd) def _produce_output(self): self.title = self.parts['title'] self._analyse_metainfo() header = self._generate_header() body = self._generate_body() footer = self._generate_footer() document_content = header + body + footer with codecs.open(self.output_file, 'w', 'utf8') as wfile: wfile.write(document_content) def _generate_body(self): body = """ <body> <div class="static-content"></div> <div class="reveal"> <div class="slides"> %(titleslide)s %(body)s </div> </div> """ % {'body': self.parts['body'], 'titleslide' : self.titleslide} return body def _analyse_metainfo(self): def clean(text): import re if len(re.findall(r'<paragraph>', text)) > 0: text = re.findall(r'<paragraph>(.+)</paragraph>', text)[0] if len(re.findall(r'<author>', text)) > 0: text = re.findall(r'<author>(.+)</author>', text)[0] if len(re.findall(r'<date>', text)) > 0: text = re.findall(r'<date>(.+)</date>', text)[0] if len(re.findall(r'<reference', text)) > 0: text = re.findall(r'<reference refuri="mailto:(.+)">', text)[0] return text self.meta_info ={'author': ''} texts=self.parts['metadata'].split('\n') for t in texts: if not t == '': name=t.split('=')[0] content=t.replace(name+'=', '') content=clean(content) self.meta_info[name]= content self._generate_titleslide() def _generate_titleslide(self): if self.parts['title'] != '': # A title has been given self.meta_info['title'] = self.parts['title'] elif not 'title' in self.meta_info.keys(): self.meta_info['title'] = '' if self.parts['subtitle'] != '': # defined with a underlined text instead of :subtitle: self.meta_info['subtitle'] = self.parts['subtitle'] elif not 'subtitle' in self.meta_info.keys(): self.meta_info['subtitle'] = '' if not 'email' in self.meta_info.keys(): self.meta_info['email'] = '' if not 'institution' in self.meta_info.keys(): self.meta_info['institution'] = '' if not 'date' in self.meta_info.keys(): self.meta_info['date'] = '' # Separators self.meta_info['is_institution'] = '-' if self.meta_info['institution'] != '' else '' self.meta_info['is_author'] = '.' if self.meta_info['author'] != '' else '' self.meta_info['is_subtitle'] = '.' if self.meta_info['subtitle'] != '' else '' if self.firstslide_template == "": self.firstslide_template = """ <section class="titleslide"> <h1>%(title)s</h1> <h3>%(subtitle)s</h3> <br> <p><a href="mailto:%(email)s">%(author)s</a> %(is_institution)s %(institution)s</p> <p><small>%(email)s</small></p> <p>%(date)s</p> </section> """ self.titleslide=self.firstslide_template % self.meta_info if self.footer_template=="": self.footer_template = """<b>%(title)s %(is_subtitle)s %(subtitle)s.</b> %(author)s%(is_institution)s %(institution)s. %(date)s""" if self.write_footer: self.footer_html = """<footer id=\"footer\">""" + self.footer_template % self.meta_info + """<b id=\"slide_number\" style=\"padding: 1em;\"></b></footer>""" elif self.page_number: self.footer_html = """<footer><b id=\"slide_number\"></b></footer>""" else: self.footer_html = "" def _generate_header(self): header="""<!doctype html> <html lang="en"> <head> <meta charset="utf-8"> <title>%(title)s</title> <meta name="description" content="%(title)s"> %(meta)s <meta name="apple-mobile-web-app-capable" content="yes" /> <meta name="apple-mobile-web-app-status-bar-style" content="black-translucent" /> <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=5.0, user-scalable=no"> <link rel="stylesheet" href="%(reveal_root)s/css/reveal.css"> %(pygments)s <link rel="stylesheet" href="rst2reveal.css"> <!--link rel="stylesheet" href="%(reveal_root)s/css/theme/default.css" id="theme"--> <link rel="stylesheet" href="%(reveal_root)s/css/theme/%(theme)s.css" id="theme"> <link rel="stylesheet" href="%(reveal_root)s/css/print/pdf.css" type="text/css" media="print"> <script type="text/javascript" src="%(mathjax_path)s?config=TeX-AMS-MML_HTMLorMML"></script> <!-- Extra styles --> <style> .reveal section { text-align: %(horizontal_center)s; } .reveal h2{ text-align: %(title_center)s; } </style> %(custom_stylesheet)s <!--[if lt IE 9]> <script src="%(reveal_root)s/lib/js/html5shiv.js"></script> <![endif]--> </head> """%{'title': self.title, 'meta' : self.parts['meta'], 'theme': self.theme, 'reveal_root' : self.reveal_root, 'pygments': '<link rel="stylesheet" href="pygments.css">' if self.is_pygments else '', 'mathjax_path': self.mathjax_path, 'horizontal_center': 'center' if self.horizontal_center else 'left', 'title_center': 'center' if self.title_center else 'left', 'custom_stylesheet' : '<link rel="stylesheet" href="%s">'%self.stylesheet if not self.stylesheet is '' else ''} return header def _generate_footer(self): if self.page_number: script_page_number = """ <script> // Fires each time a new slide is activated Reveal.addEventListener( 'slidechanged', function( event ) { if(event.indexh > 0) { if(event.indexv > 0) { val = event.indexh + ' - ' + event.indexv document.getElementById('slide_number').innerHTML = val; } else{ document.getElementById('slide_number').innerHTML = event.indexh; } } else { document.getElementById('slide_number').innerHTML = ''; } } ); </script>""" else: script_page_number = "" if self.init_html: footer = self.init_html else: footer=""" <script src="%(reveal_root)s/lib/js/head.min.js"></script> <script src="%(reveal_root)s/js/reveal.min.js"></script> <script> // Full list of configuration options available here: // https://github.com/hakimel/reveal.js#configuration Reveal.initialize({ controls: %(controls)s, progress: false, history: true, overview: true, keyboard: true, loop: false, touch: true, rtl: false, center: %(vertical_center)s, mouseWheel: true, fragments: true, rollingLinks: false, transition: '%(transition)s' }); </script>""" footer+=""" %(script_page_number)s %(footer)s </body> </html>""" footer = footer % {'transition' : self.transition, 'footer' : self.footer_html, 'mathjax_path': self.mathjax_path, 'reveal_root' : self.reveal_root, 'script_page_number' : script_page_number, 'vertical_center' : 'true' if self.vertical_center else 'false', 'controls': 'true' if self.controls else 'false'} return footer if __name__ == '__main__': # Create the object parser = Parser(input_file='index.rst') # Create the slides parser.create_slides()
40.375
169
0.541249
1,755
16,473
4.954416
0.216524
0.020242
0.030362
0.016906
0.190339
0.118689
0.077056
0.064405
0.025877
0.010351
0
0.00368
0.340193
16,473
407
170
40.474201
0.796301
0.215868
0
0.084615
0
0.030769
0.402674
0.087863
0
0
0
0
0
1
0.038462
false
0.003846
0.065385
0
0.126923
0.011538
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cea000fbf7f6010b16ca5c88addb7c29ae330a8
25
py
Python
src/__init__.py
dmitrijbozhkov/cloudcourseproject
3e62a5fafef418c1c058587abc5615b03fc2325a
[ "Apache-2.0" ]
null
null
null
src/__init__.py
dmitrijbozhkov/cloudcourseproject
3e62a5fafef418c1c058587abc5615b03fc2325a
[ "Apache-2.0" ]
7
2021-02-08T20:41:23.000Z
2022-03-12T00:21:37.000Z
src/__init__.py
dmitrijbozhkov/cloudcourseproject
3e62a5fafef418c1c058587abc5615b03fc2325a
[ "Apache-2.0" ]
null
null
null
""" Application files """
25
25
0.64
2
25
8
1
0
0
0
0
0
0
0
0
0
0
0
0.12
25
1
25
25
0.727273
0.68
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
5cecc676be7c87ec7a461eb8220ca39b7ad15690
235
py
Python
PyMOTW/source/webbrowser/webbrowser_get.py
axetang/AxePython
3b517fa3123ce2e939680ad1ae14f7e602d446a6
[ "Apache-2.0" ]
1
2019-01-04T05:47:50.000Z
2019-01-04T05:47:50.000Z
PyMOTW/source/webbrowser/webbrowser_get.py
axetang/AxePython
3b517fa3123ce2e939680ad1ae14f7e602d446a6
[ "Apache-2.0" ]
1
2020-07-18T03:52:03.000Z
2020-07-18T04:18:01.000Z
PyMOTW/source/webbrowser/webbrowser_get.py
axetang/AxePython
3b517fa3123ce2e939680ad1ae14f7e602d446a6
[ "Apache-2.0" ]
2
2021-03-06T04:28:32.000Z
2021-03-06T04:59:17.000Z
#!/usr/bin/env python3 # encoding: utf-8 # # Copyright (c) 2008 Doug Hellmann All rights reserved. # """ """ #end_pymotw_header import webbrowser b = webbrowser.get('lynx') b.open('https://docs.python.org/3/library/webbrowser.html')
16.785714
59
0.710638
34
235
4.852941
0.911765
0
0
0
0
0
0
0
0
0
0
0.033654
0.114894
235
13
60
18.076923
0.759615
0.459574
0
0
0
0
0.464912
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
5ced01ac9ef8ae3d7716b3b971d1835e202f6024
385
py
Python
notebooks/solutions/01-ex04-solutions.py
tramposh-olo/ml-workshop-intermediate-2-of-2
a9d81d760f875ff67796190f95818a0f4f0f5d6b
[ "MIT" ]
null
null
null
notebooks/solutions/01-ex04-solutions.py
tramposh-olo/ml-workshop-intermediate-2-of-2
a9d81d760f875ff67796190f95818a0f4f0f5d6b
[ "MIT" ]
null
null
null
notebooks/solutions/01-ex04-solutions.py
tramposh-olo/ml-workshop-intermediate-2-of-2
a9d81d760f875ff67796190f95818a0f4f0f5d6b
[ "MIT" ]
null
null
null
from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_absolute_error rf = RandomForestRegressor(random_state=42) rf.fit(X_train, y_train) rf_pred = rf.predict(X_test) r2_score(y_test, rf_pred) mean_squared_error(y_test, rf_pred) mean_absolute_error(y_test, rf_pred)
24.0625
50
0.844156
62
385
4.903226
0.370968
0.144737
0.177632
0.236842
0.351974
0
0
0
0
0
0
0.011429
0.090909
385
15
51
25.666667
0.857143
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
5cedb3c2eba19bceedfcaed437eaaaba8aec1a1e
400
py
Python
app/api/atleta/migrations/0008_atleta_senha.py
gahhhenrikk/gerenciador-de-equipes
1418a9ebae6e9b636b4597af9596206aa6cf75c2
[ "MIT" ]
1
2020-08-13T20:59:33.000Z
2020-08-13T20:59:33.000Z
app/api/atleta/migrations/0008_atleta_senha.py
AlbericoD/gerenciador-de-equipes
e6e7d084e5980c4ef05a46e0bfa4b70b13fcca4e
[ "MIT" ]
19
2019-09-03T22:49:45.000Z
2022-02-26T20:06:12.000Z
app/api/atleta/migrations/0008_atleta_senha.py
gahhhenrikk/gerenciador-de-equipes
1418a9ebae6e9b636b4597af9596206aa6cf75c2
[ "MIT" ]
2
2019-09-03T20:16:34.000Z
2019-09-09T12:35:14.000Z
# Generated by Django 2.2.7 on 2019-11-11 23:04 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('atleta', '0007_remove_atleta_treinador'), ] operations = [ migrations.AddField( model_name='atleta', name='senha', field=models.CharField(default='', max_length=255), ), ]
21.052632
63
0.6
43
400
5.465116
0.767442
0
0
0
0
0
0
0
0
0
0
0.076389
0.28
400
18
64
22.222222
0.739583
0.1125
0
0
1
0
0.127479
0.07932
0
0
0
0
0
1
0
false
0
0.083333
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
5cee76e061dea1eed3dca0e1ad8fc1077c7c5235
214
py
Python
rameniaapp/forms/create_list.py
awlane/ramenia
6bf8e75a1f279ac584daa4ee19927ffccaa67551
[ "MIT" ]
null
null
null
rameniaapp/forms/create_list.py
awlane/ramenia
6bf8e75a1f279ac584daa4ee19927ffccaa67551
[ "MIT" ]
null
null
null
rameniaapp/forms/create_list.py
awlane/ramenia
6bf8e75a1f279ac584daa4ee19927ffccaa67551
[ "MIT" ]
null
null
null
from django import forms from django.forms.widgets import TextInput class ListCreateForm(forms.Form): list_name = forms.CharField(label="Name", max_length=60, widget=TextInput(attrs={'class':'form-control'}))
35.666667
110
0.775701
29
214
5.655172
0.655172
0.121951
0
0
0
0
0
0
0
0
0
0.010309
0.093458
214
5
111
42.8
0.835052
0
0
0
0
0
0.098131
0
0
0
0
0
0
1
0
false
0
0.5
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
5cee8d9dc6ecef33153612d1cf40e03aa8fb60af
10,656
py
Python
toqnets/nn/nltl/functional.py
C-SUNSHINE/TOQ-Nets-PyTorch-Release
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
[ "MIT" ]
6
2021-08-24T21:46:01.000Z
2022-03-09T14:34:05.000Z
toqnets/nn/nltl/functional.py
vacancy/TOQ-Nets-PyTorch-Release
53a712be28e2ecf8d2e04a9f71a2d7e8db5430e1
[ "MIT" ]
null
null
null
toqnets/nn/nltl/functional.py
vacancy/TOQ-Nets-PyTorch-Release
53a712be28e2ecf8d2e04a9f71a2d7e8db5430e1
[ "MIT" ]
2
2021-08-23T03:06:20.000Z
2021-09-30T14:17:14.000Z
#! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : functional.py # Author : Jiayuan Mao # Email : maojiayuan@gmail.com # Date : 04/15/2020 # # This file is part of TOQ-Nets-PyTorch. # Distributed under terms of the MIT license. from typing import List import jactorch import torch from jacinle.utils.enum import JacEnum __all__ = [ 'TemporalPoolingImplementation', 'TemporalPoolingReduction', 'backward_pooling_1d1d', 'temporal_pooling_1d', 'temporal_pooling_2d', 'interval_pooling', 'matrix_from_diags', 'matrix_remove_diag' ] class TemporalPoolingImplementation(JacEnum): BROADCAST = 'broadcast' FORLOOP = 'forloop' class TemporalPoolingReduction(JacEnum): MAX = 'max' MIN = 'min' SOFTMAX = 'softmax' SOFTMIN = 'softmin' def masked_min(input, mask, dim, inf=1e9): mask = mask.type(input.dtype) input = input * mask + inf * (1 - mask) return input.min(dim)[0] def masked_max(input, mask, dim, inf=1e9): mask = mask.type(input.dtype) input = input * mask + inf * (mask - 1) return input.max(dim)[0] def backward_pooling_1d1d(input, implementation='forloop', reduction='max'): """ :param input: [batch, nr_frames, nr_frames, hidden_dim] """ implementation = TemporalPoolingImplementation.from_string(implementation) nr_frames = input.size(1) if implementation == TemporalPoolingImplementation.BROADCAST: indices = torch.arange(nr_frames, device=input.device) indices_i, indices_j = jactorch.meshgrid(indices, dim=0) mask = indices_i <= indices_j mask = jactorch.add_dim_as_except(mask, input, 1, 2) if reduction == 'max': return masked_max(input, mask, dim=2) elif reduction == 'min': return masked_min(input, mask, dim=2) else: raise ValueError() elif implementation == TemporalPoolingImplementation.FORLOOP: all_tensors = list() for i in range(nr_frames): if reduction == 'max': all_tensors.append(input[:, i, i:].max(dim=1)[0]) elif reduction == 'min': all_tensors.append(input[:, i, i:].min(dim=1)[0]) else: raise ValueError() return torch.stack(all_tensors, dim=1) else: raise ValueError('Unknown temporal pooling implementation: {}.'.format(implementation)) def temporal_pooling_1d(input, implementation='forloop'): implementation = TemporalPoolingImplementation.from_string(implementation) nr_frames = input.size(1) if implementation is TemporalPoolingImplementation.BROADCAST: indices = torch.arange(nr_frames, device=input.device) indices_i, indices_j = jactorch.meshgrid(indices, dim=0) input = jactorch.add_dim(input, 1, nr_frames) mask = indices_i <= indices_j mask = jactorch.add_dim_as_except(mask, input, 1, 2) return torch.cat((masked_min(input, mask, dim=2), masked_max(input, mask, dim=2)), dim=-1) elif implementation is TemporalPoolingImplementation.FORLOOP: all_tensors = list() for i in range(nr_frames): all_tensors.append(torch.cat((input[:, i:].min(dim=1)[0], input[:, i:].max(dim=1)[0]), dim=-1)) return torch.stack(all_tensors, dim=1) else: raise ValueError('Unknown temporal pooling implementation: {}.'.format(implementation)) def temporal_pooling_2d(input, implementation='forloop'): implementation = TemporalPoolingImplementation.from_string(implementation) nr_frames = input.size(1) indices = torch.arange(nr_frames, device=input.device) if implementation is TemporalPoolingImplementation.BROADCAST: indices_i, indices_j, indices_k = ( jactorch.add_dim(jactorch.add_dim(indices, 1, nr_frames), 2, nr_frames), jactorch.add_dim(jactorch.add_dim(indices, 0, nr_frames), 1, nr_frames), jactorch.add_dim(jactorch.add_dim(indices, 0, nr_frames), 2, nr_frames) ) input = jactorch.add_dim(input, 0, nr_frames) # input[batch, i, k, j] = input[batch, k, j] mask = indices_i <= indices_k <= indices_j mask = jactorch.add_dim_as_except(mask, input, 1, 2, 3) return torch.cat(( masked_min(input, mask, dim=2), masked_max(input, mask, dim=2) ), dim=-1) elif implementation is TemporalPoolingImplementation.FORLOOP: all_tensors = list() for i in range(nr_frames): mask = indices >= i mask = jactorch.add_dim_as_except(mask, input, 1) all_tensors.append(torch.cat(( masked_min(input, mask, dim=1), masked_max(input, mask, dim=1) ), dim=-1)) return torch.stack(all_tensors, dim=1) else: raise ValueError('Unknown temporal pooling implementation: {}.'.format(implementation)) def interval_pooling(input, implementation='forloop', reduction='max', beta=None): """ Args: input (torch.Tensor): 3D tensor of [batch_size, nr_frames, hidden_dim] implementation (Union[TemporalPoolingImplementation, str]): the implementation. Currently only support FORLOOP. reduction (Union[TemporalPoolingReduction, str]): reduction method. Either MAX or MIN. Return: output (torch.Tensor): 4D tensor of [batch_size, nr_frames, nr_frames, hidden_dim], where ``` output[:, i, j, :] = min output[:, k, :] where i <= k <= j ``` the k is cyclic-indexed. """ implementation = TemporalPoolingImplementation.from_string(implementation) reduction = TemporalPoolingReduction.from_string(reduction) batch_size, nr_frames = input.size()[:2] if implementation is TemporalPoolingImplementation.FORLOOP: if reduction is TemporalPoolingReduction.MAX or reduction is TemporalPoolingReduction.MIN: input_doubled = torch.cat((input, input), dim=1) # repeat the input at dim=1. output_tensors = list() output_tensors.append(input) for length in range(2, nr_frames + 1): last_tensor = output_tensors[-1] last_elems = input_doubled[:, length - 1:length - 1 + nr_frames] if reduction is TemporalPoolingReduction.MAX: this_tensor = torch.max(last_tensor, last_elems) elif reduction is TemporalPoolingReduction.MIN: this_tensor = torch.min(last_tensor, last_elems) else: raise ValueError('Wrong value {}.'.format(reduction)) output_tensors.append(this_tensor) return matrix_from_diags(output_tensors, dim=1, triu=True) else: from math import exp scale = exp(beta) input_doubled = torch.cat((input, input), dim=1) # repeat the input at dim=1. output_tensors = list() if reduction is TemporalPoolingReduction.SOFTMIN: scale = -scale else: assert reduction is TemporalPoolingReduction.SOFTMAX input_arg = torch.exp(input / scale) output_tensors.append((input * input_arg, input_arg)) for length in range(2, nr_frames + 1): last_tensor, last_argsum = output_tensors[-1] last_elems = input_doubled[:, length - 1:length - 1 + nr_frames] last_elems_arg = torch.exp(last_elems / scale) output_tensors.append(( last_tensor + last_elems * last_elems_arg, last_argsum + last_elems_arg )) output2 = matrix_from_diags([x[0] / x[1] for x in output_tensors], dim=1, triu=True) # Test: # X, Y = torch.meshgrid(torch.arange(length), torch.arange(length)) # upper = (X < Y).float().view(1, length, length, 1).to(output.device) # print((((output - output2) ** 2) * upper).sum()) # exit() return output2 else: raise NotImplementedError('Unknown interval pooling implementation: {}.'.format(implementation)) def matrix_from_diags(diags: List[torch.Tensor], dim: int = 1, triu: bool = False): """ Construct an N by N matrix from N diags of the matrix. Args: diags (List[torch.Tensor]): N length-N vectors regarding the 1st, 2nd, ... diags of the output matrix. They can also be same-dimensional tensors, where the matrix will be created at the dim and dim+1 axes. dim (int): the matrix will be created at dim and dim+1. triu (bool): use only the upper triangle of the matrix. Return: output: torch.Tensor """ if dim < 0: dim += diags[0].dim() size = diags[0].size() diags.append(torch.zeros_like(diags[0])) output = torch.cat(diags, dim=dim) # [..., (f+1)*f, ...] output = output.reshape(size[:dim] + (size[dim] + 1, size[dim]) + size[dim + 1:]) output = output.transpose(dim, dim + 1) output = output.reshape( size[:dim] + (size[dim] + 1, size[dim]) + size[dim + 1:]) # use to reshape for auto-contiguous. if triu: return output.narrow(dim, 0, size[dim]) output = torch.cat(( output.narrow(dim, 0, 1), matrix_remove_diag(output.narrow(dim, 1, size[dim]), dim=dim, move_up=True) ), dim=dim) return output def matrix_remove_diag(matrix: torch.Tensor, dim: int = 1, move_up: bool = False): """ Remove the first diag of the input matrix. The result is an N x (N-1) matrix. Args: matrix (torch.Tensor): the input matrix. It can be a tensor where the dim and dim+1 axes form a matrix. dim (int): the matrix is at dim and dim+1. move_up (bool): if True, the output matrix will be of shape (N-1) x N. In the move_left (default, move_up=False) mode, the left triangle will stay in its position and the upper triangle will move 1 element left. While in the move_up mode, the upper triangle will stay in its position, and the left triangle will move 1 element up. """ if dim < 0: dim += matrix.size() if move_up: matrix = matrix.transpose(dim, dim + 1) size = matrix.size() n = size[dim] matrix = matrix.reshape(size[:dim] + (n * n,) + size[dim + 2:]) matrix = matrix.narrow(dim, 1, n * n - 1) matrix = matrix.reshape(size[:dim] + (n - 1, n + 1) + size[dim + 2:]) matrix = matrix.narrow(dim + 1, 0, n) matrix = matrix.reshape(size[:dim] + (n, n - 1) + size[dim + 2:]) if move_up: matrix = matrix.transpose(dim, dim + 1) return matrix
41.142857
152
0.62866
1,351
10,656
4.831236
0.151739
0.020224
0.025739
0.01195
0.54175
0.448905
0.386701
0.368316
0.341811
0.325264
0
0.017163
0.256381
10,656
258
153
41.302326
0.806537
0.201858
0
0.366279
0
0
0.052291
0.008875
0
0
0
0
0.005814
1
0.046512
false
0
0.02907
0
0.203488
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cef3a3627a98fccd21ed376ae079a30c30f5f8d
194
py
Python
curso em video/python/mundo 3/aula 21/aula 21.py
KenzoDezotti/cursoemvideo
6eba03e67192f7384092192ed2cc1a8e59efd9b9
[ "MIT" ]
null
null
null
curso em video/python/mundo 3/aula 21/aula 21.py
KenzoDezotti/cursoemvideo
6eba03e67192f7384092192ed2cc1a8e59efd9b9
[ "MIT" ]
null
null
null
curso em video/python/mundo 3/aula 21/aula 21.py
KenzoDezotti/cursoemvideo
6eba03e67192f7384092192ed2cc1a8e59efd9b9
[ "MIT" ]
null
null
null
def teste(b): global a b += 4 a=8 c=2 print(a) print(b) print(c) #------------------------------------ #funcionamento do comando global a=5 print(a) teste(6) print(a)
13.857143
37
0.453608
29
194
3.034483
0.517241
0.204545
0
0
0
0
0
0
0
0
0
0.034247
0.247423
194
14
38
13.857143
0.568493
0.345361
0
0.25
0
0
0
0
0
0
0
0
0
1
0.083333
false
0
0
0
0.083333
0.416667
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
3
5cf060a195dbf7d7e608526fbe61c86808f684c4
598
py
Python
etcdb/execute/dml/use.py
box/etcdb
0f27846a0ca13efff9750b97a38939f66172debc
[ "Apache-2.0" ]
12
2016-10-25T18:03:49.000Z
2019-06-27T13:20:22.000Z
etcdb/execute/dml/use.py
box/etcdb
0f27846a0ca13efff9750b97a38939f66172debc
[ "Apache-2.0" ]
30
2016-10-20T23:27:09.000Z
2018-12-06T17:23:59.000Z
etcdb/execute/dml/use.py
box/etcdb
0f27846a0ca13efff9750b97a38939f66172debc
[ "Apache-2.0" ]
4
2016-10-20T23:24:48.000Z
2022-03-01T09:59:29.000Z
"""Implement USE query.""" from pyetcd import EtcdKeyNotFound from etcdb import OperationalError def use_database(etcd_client, tree): """ Return database name if it exists or raise exception. :param etcd_client: etcd client :type etcd_client: pyetcd.client.Client :param tree: Parsing tree. :type tree: SQLTree :return: Database name :raise OperationalError: if database doesn't exist. """ try: etcd_client.read('/%s' % tree.db) return tree.db except EtcdKeyNotFound: raise OperationalError("Unknown database '%s'" % tree.db)
26
65
0.682274
73
598
5.520548
0.479452
0.124069
0.08933
0
0
0
0
0
0
0
0
0
0.22408
598
22
66
27.181818
0.868534
0.449833
0
0
0
0
0.084507
0
0
0
0
0
0
1
0.125
false
0
0.25
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cf0ea4689aa7bc6979159d92505dd9ac4c6f33a
598
py
Python
main.py
dminglv/covid19
4753f1574c9035c5780c6669e5a9bd3812a4bc10
[ "MIT" ]
null
null
null
main.py
dminglv/covid19
4753f1574c9035c5780c6669e5a9bd3812a4bc10
[ "MIT" ]
null
null
null
main.py
dminglv/covid19
4753f1574c9035c5780c6669e5a9bd3812a4bc10
[ "MIT" ]
null
null
null
from libs.apis import getCountryInfo, getCountries, getCountriesNames from libs.charts import visualize def main(): arr = [] number = 10 # Get top 10 countries countries = getCountries(number) countries_names = getCountriesNames(number) for i in range(len(countries)): country = countries[i] country_names = countries_names[i] country_info = getCountryInfo(country) d = { 'country': country_names, 'info': country_info } arr.append(d) visualize(arr) if __name__ == "__main__": main()
19.290323
69
0.623746
62
598
5.790323
0.467742
0.044568
0
0
0
0
0
0
0
0
0
0.009368
0.285953
598
30
70
19.933333
0.831382
0.033445
0
0
0
0
0.032986
0
0
0
0
0
0
1
0.052632
false
0
0.105263
0
0.157895
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cf19f197631f63c8d9513d8b9abb97666915389
1,230
py
Python
authkit/authenticate/sso/__init__.py
bobrock/AuthKit
ba82501d9dff699be9eef33266aecd03d016cec2
[ "MIT" ]
null
null
null
authkit/authenticate/sso/__init__.py
bobrock/AuthKit
ba82501d9dff699be9eef33266aecd03d016cec2
[ "MIT" ]
null
null
null
authkit/authenticate/sso/__init__.py
bobrock/AuthKit
ba82501d9dff699be9eef33266aecd03d016cec2
[ "MIT" ]
1
2020-06-24T19:20:13.000Z
2020-06-24T19:20:13.000Z
"""Authenticate Single Sign-On Middleware ============== Single-Sign On ============== About SSO --------- Single sign on is a session/user authentication process that allows a user to provide his or her credentials once in order to access multiple applications. The single sign on authenticates the user to access all the applications he or she has been authorized to access. It eliminates future authenticaton requests when the user switches applications during that particular session. .. admonition :: sources # http://searchsecurity.techtarget.com/sDefinition/0,,sid14_gci340859,00.html # http://en.wikipedia.org/wiki/Single_sign-on AuthKit Implementations ----------------------- The SSO sub-package of Authenticate implements various SSO schemes for several University SSO systems as well as OpenID. In the future, additional SSO schemes like LID may also be supported. These systems sub-class the ``RedirectingAuthMiddleware`` from the api package as they all utilize a similar scheme of authentcation via redirection with back-end verification. .. note:: All University SSO work developed by Ben Bangert has been sponsered by Prometheus Research, LLC and contributed under the BSD license. """
34.166667
81
0.756911
170
1,230
5.464706
0.664706
0.053821
0.064586
0
0
0
0
0
0
0
0
0.010526
0.150407
1,230
35
82
35.142857
0.878469
0.993496
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
2
5cf285d186a6317622d28fa8ce936054a9456a47
4,158
py
Python
app/server.py
DavidRalph/search-mendeley
64cb3aa353d4a5571db0fb46a5b46b928af1c6b0
[ "Apache-2.0" ]
2
2020-05-15T02:06:46.000Z
2020-05-15T02:14:52.000Z
app/server.py
DavidRalph/search-mendeley
64cb3aa353d4a5571db0fb46a5b46b928af1c6b0
[ "Apache-2.0" ]
1
2018-05-16T12:55:14.000Z
2018-05-18T14:29:14.000Z
app/server.py
DavidRalph/search-mendeley
64cb3aa353d4a5571db0fb46a5b46b928af1c6b0
[ "Apache-2.0" ]
1
2020-05-15T02:14:55.000Z
2020-05-15T02:14:55.000Z
from flask import Flask, redirect, render_template, request, session import yaml from mendeley import Mendeley from mendeley.session import MendeleySession with open('config.yml') as f: config = yaml.load(f) REDIRECT_URI = 'http://localhost:5000/oauth' app = Flask(__name__) app.debug = True app.secret_key = config['clientSecret'] mendeley = Mendeley(config['clientId'], config['clientSecret'], REDIRECT_URI) @app.route('/') def login(): # TODO Check for token expiry # if 'token' in session: # return redirect('/library') auth = mendeley.start_authorization_code_flow() session['state'] = auth.state return redirect(auth.get_login_url()) @app.route('/oauth') def auth_return(): auth = mendeley.start_authorization_code_flow(state=session['state']) mendeley_session = auth.authenticate(request.url) session.clear() session['token'] = mendeley_session.token return redirect('/library') @app.route('/library') def list_documents(): if 'token' not in session: return redirect('/') query = request.args.get('query') or '' titleQuery = request.args.get('titleQuery') or '' authorQuery = request.args.get('authorQuery') or '' sourceQuery = request.args.get('sourceQuery') or '' abstractQuery = request.args.get('abstractQuery') or '' noteQuery = request.args.get('noteQuery') or '' advancedSearch = request.args.get('advancedSearch') mendeley_session = get_session_from_cookies() docs = [] # Get iterator for user's document library if advancedSearch and (titleQuery or authorQuery or sourceQuery or abstractQuery): docsIter = mendeley_session.documents.advanced_search( title=titleQuery, author=authorQuery, source=sourceQuery, abstract=abstractQuery, view='client').iter() elif query: docsIter = mendeley_session.documents.search( query, view='client').iter() else: docsIter = mendeley_session.documents.iter(view='client') # Accumulate all the documents for doc in docsIter: docs.append(doc) # Apply filter for annotations if noteQuery: nq = noteQuery.lower() noteDocIDs = set() # Find the IDs of all documents with at least one matching annotation for note in mendeley_session.annotations.iter(): if (note.text): text = note.text.lower() if (text.find(nq) > -1): noteDocIDs.add(note.document().id) # Filter the document list docs = [doc for doc in docs if doc.id in noteDocIDs] # Render results return render_template( 'library.html', docs=docs, query=query, titleQuery=titleQuery, authorQuery=authorQuery, sourceQuery=sourceQuery, abstractQuery=abstractQuery, noteQuery=noteQuery, advancedSearch=advancedSearch) @app.route('/document') def get_document(): if 'token' not in session: return redirect('/') mendeley_session = get_session_from_cookies() document_id = request.args.get('document_id') doc = mendeley_session.documents.get(document_id) return render_template('details.html', doc=doc) @app.route('/detailsLookup') def details_lookup(): if 'token' not in session: return redirect('/') mendeley_session = get_session_from_cookies() doi = request.args.get('doi') doc = mendeley_session.catalog.by_identifier(doi=doi) return render_template('details.html', doc=doc) @app.route('/download') def download(): if 'token' not in session: return redirect('/') mendeley_session = get_session_from_cookies() document_id = request.args.get('document_id') doc = mendeley_session.documents.get(document_id) doc_file = doc.files.list().items[0] return redirect(doc_file.download_url) @app.route('/logout') def logout(): session.pop('token', None) return redirect('/') def get_session_from_cookies(): return MendeleySession(mendeley, session['token']) if __name__ == '__main__': app.run()
26.316456
86
0.664262
478
4,158
5.631799
0.267782
0.083581
0.052006
0.042719
0.219168
0.219168
0.177563
0.165305
0.165305
0.131872
0
0.001847
0.218615
4,158
157
87
26.484076
0.826716
0.069264
0
0.186275
0
0
0.088342
0
0
0
0
0.006369
0
1
0.078431
false
0
0.039216
0.009804
0.235294
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cf2cde51e543e36e937cb094f4bc31af4b0de5a
3,429
py
Python
lib/googlecloudsdk/command_lib/container/gkemulticloud/endpoint_util.py
google-cloud-sdk-unofficial/google-cloud-sdk
2a48a04df14be46c8745050f98768e30474a1aac
[ "Apache-2.0" ]
2
2019-11-10T09:17:07.000Z
2019-12-18T13:44:08.000Z
lib/googlecloudsdk/command_lib/container/gkemulticloud/endpoint_util.py
google-cloud-sdk-unofficial/google-cloud-sdk
2a48a04df14be46c8745050f98768e30474a1aac
[ "Apache-2.0" ]
null
null
null
lib/googlecloudsdk/command_lib/container/gkemulticloud/endpoint_util.py
google-cloud-sdk-unofficial/google-cloud-sdk
2a48a04df14be46c8745050f98768e30474a1aac
[ "Apache-2.0" ]
1
2020-07-25T01:40:19.000Z
2020-07-25T01:40:19.000Z
# -*- coding: utf-8 -*- # # Copyright 2021 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for operating on different endpoints.""" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals import contextlib from apitools.base.py import exceptions as apitools_exceptions from googlecloudsdk.api_lib.container.gkemulticloud import util as api_util from googlecloudsdk.api_lib.util import apis from googlecloudsdk.calliope import base from googlecloudsdk.calliope import exceptions from googlecloudsdk.core import log from googlecloudsdk.core import properties from six.moves.urllib import parse _VALID_LOCATIONS = frozenset([ 'asia-southeast1', 'europe-west1', 'us-east4', 'us-west1', ]) def _ValidateLocation(location): if location not in _VALID_LOCATIONS: locations = list(_VALID_LOCATIONS) locations.sort() raise exceptions.InvalidArgumentException( '--location', '{bad_location} is not a valid location. Allowed values: [{location_list}].' .format( bad_location=location, location_list=', '.join('\'{}\''.format(r) for r in locations))) def _AppendLocation(endpoint, location): scheme, netloc, path, params, query, fragment = parse.urlparse(endpoint) netloc = '{}-{}'.format(location, netloc) return parse.urlunparse((scheme, netloc, path, params, query, fragment)) @contextlib.contextmanager def GkemulticloudEndpointOverride(location, track=base.ReleaseTrack.GA): """Context manager to override the GKE Multi-cloud endpoint temporarily. Args: location: str, location to use for GKE Multi-cloud. track: calliope_base.ReleaseTrack, Release track of the endpoint. Yields: None. """ original_ep = properties.VALUES.api_endpoint_overrides.gkemulticloud.Get() try: if not original_ep: if not location: raise ValueError('A location must be specified.') _ValidateLocation(location) regional_ep = _GetEffectiveEndpoint(location, track=track) properties.VALUES.api_endpoint_overrides.gkemulticloud.Set(regional_ep) # TODO(b/203617640): Remove handling of this exception once API has gone GA. yield except apitools_exceptions.HttpNotFoundError as e: if 'Method not found' in e.content: log.warning( 'This project may not have been added to the allow list for the Anthos Multi-Cloud API, please reach out to your GCP account team to resolve this' ) raise finally: if not original_ep: properties.VALUES.api_endpoint_overrides.gkemulticloud.Set(original_ep) def _GetEffectiveEndpoint(location, track=base.ReleaseTrack.GA): """Returns regional GKE Multi-cloud Endpoint.""" endpoint = apis.GetEffectiveApiEndpoint( api_util.MODULE_NAME, api_util.GetApiVersionForTrack(track)) return _AppendLocation(endpoint, location)
34.989796
156
0.748031
432
3,429
5.819444
0.460648
0.023866
0.019093
0.03222
0.121321
0.096659
0.068815
0.046937
0
0
0
0.007714
0.168271
3,429
97
157
35.350515
0.873773
0.284048
0
0.035714
0
0.017857
0.134384
0
0
0
0
0.010309
0
1
0.071429
false
0
0.214286
0
0.321429
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
5cf2e08da44d6148a770dc0050be540bbf3f5a61
3,025
py
Python
util/mathUtil.py
herougan/TradeHunter
1270a1d9807d1f2107db6bc78b98b584431840cc
[ "MIT" ]
null
null
null
util/mathUtil.py
herougan/TradeHunter
1270a1d9807d1f2107db6bc78b98b584431840cc
[ "MIT" ]
null
null
null
util/mathUtil.py
herougan/TradeHunter
1270a1d9807d1f2107db6bc78b98b584431840cc
[ "MIT" ]
1
2022-02-09T08:45:05.000Z
2022-02-09T08:45:05.000Z
from math import floor import talib from util.dataRetrievalUtil import try_stdev from util.langUtil import try_mean, try_int def quartile_out(quartile, data): """Takes out extremities""" pass def moving_average(period, data): avg = [] if len(data) < period: return avg for i in range(period - 1, len(data)): avg.append(try_mean(data[i - period + 1:i])) return avg def moving_stddev(period, data): avg = [] if len(data) < period: return avg for i in range(period - 1, len(data)): avg.append(try_stdev(data[i - period + 1:i])) return avg def adjusted_dev(period, data, order=1): # Does not work! above, below = data, data stdev_data = talib.STDDEV(data, period) for i, row in above.iterrows(): above.iloc[i].data += stdev_data.iloc[i].data * order for u, row in below.iterrows(): above.iloc[i].data -= stdev_data.iloc[i].data * order return above, below def index_arr_to_date(date_index, index): """Given an index, return date from date_index.""" if index < 0 or index > len(date_index): return 0 return date_index.iloc[index] def date_to_index_arr(index, dates_index, dates): """Given an index that corresponds to a date_array, find the relative index of input date.""" try: _dates = [] for date in dates: _dates.append(index[list(dates_index).index(date)]) return _dates except: print('Error! Date cannot be found. Continuing with 0.') return [0 for date in dates] def is_integer(x): y = try_int(x) if not y or y - x != 0: return False return True def get_scale_colour(col1, col2, val): """Takes in two colours and the val (between 1 and 0) to decide the colour value in the continuum from col1 to col2. col1 and col2 must be named colours.""" pass def to_candlestick(ticker_data, interval: str, inc=False): pass def get_scale_grey(val): hexa = 15*16+15 * val first_digit = hexa//16 second_digit = hexa - first_digit * 16 hexa = F'{to_single_hex(first_digit)}{to_single_hex(second_digit)}' return F'#{hexa}{hexa}{hexa}' def get_inverse_single_hex(val): val = try_int(val) _val = val % 16 _val = 16 - _val if _val < 10: return str(_val) elif 10 <= _val < 11: return 'A' elif 11 <= _val < 12: return 'B' elif 12 <= _val < 13: return 'C' elif 13 <= _val < 14: return 'D' elif 14 <= _val < 15: return 'E' elif 15 <= _val < 16: return 'F' return None def to_single_hex(val): val = try_int(val) _val = val % 16 if _val < 10: return str(_val) elif 10 <= _val < 11: return 'A' elif 11 <= _val < 12: return 'B' elif 12 <= _val < 13: return 'C' elif 13 <= _val < 14: return 'D' elif 14 <= _val < 15: return 'E' elif 15 <= _val < 16: return 'F' return None
23.632813
97
0.597686
454
3,025
3.823789
0.264317
0.020737
0.020737
0.017281
0.360599
0.360599
0.360599
0.360599
0.331797
0.331797
0
0.040056
0.290248
3,025
128
98
23.632813
0.768514
0.106116
0
0.505376
0
0
0.050448
0.0213
0
0
0
0
0
1
0.129032
false
0.032258
0.043011
0
0.473118
0.010753
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cf5f3ec8ad78eb84a1e2c101567b1b3b4dc3a79
5,057
py
Python
09-gui/terremoto_antiguo.py
Agc96/matplotlib-examples
bc2db2d14c1822b05f99356ebf538ebcd14f262a
[ "MIT" ]
null
null
null
09-gui/terremoto_antiguo.py
Agc96/matplotlib-examples
bc2db2d14c1822b05f99356ebf538ebcd14f262a
[ "MIT" ]
null
null
null
09-gui/terremoto_antiguo.py
Agc96/matplotlib-examples
bc2db2d14c1822b05f99356ebf538ebcd14f262a
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Fri Oct 25 18:38:21 2019 @author: Agutierrez """ # -*- coding: utf-8 -*- """ Interfaz gráfica para el movimiento armónico de un edificio, de forma similar a un terremoto. """ import numpy as np import tkinter as tk from matplotlib.animation import FuncAnimation from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from matplotlib.figure import Figure from tkinter.messagebox import showerror # Inicializar la ventana window = tk.Tk() window.title("Movimiento armónico de un edificio") window.geometry("800x600") # Inicializar el frame de ingreso de datos frame = tk.Frame(window) frame.pack(side=tk.LEFT) # Declarar los valores por defecto base = 0.75 altura = 5.71 masa = 164200 radio = 5.76 amplitud = 10 periodo = 2 # Función auxiliar para generar datos de entrada def generar_dato_entrada(frame, text, index, default=None): variable = tk.DoubleVar(value=default) # Configurar etiqueta para los datos label = tk.Label(frame, text=text) label.grid(row=index, column=0, padx=5, pady=5) # Configurar entrada para los datos entry = tk.Entry(frame, textvariable=variable, justify="right") entry.grid(row=index, column=1, padx=5, pady=5) return variable # Inicializar datos de entrada base_var = generar_dato_entrada(frame, "Semi-base (m):", 0, base) altura_var = generar_dato_entrada(frame, "Semi-altura (m):", 1, altura) masa_var = generar_dato_entrada(frame, "Masa (kg):", 2, masa) radio_var = generar_dato_entrada(frame, "Radio (m):" , 3, radio) amplitud_var = generar_dato_entrada(frame, "Amplitud (m):", 4, amplitud) periodo_var = generar_dato_entrada(frame, "Periodo (s):", 5, periodo) def calcular_posicion(tiempo, masa, amplitud, elastica, viscosidad): """ Simula la posición de un movimiento armónico amortiguado con los datos del edificio. """ parte1 = -viscosidad/(2*masa) # Constante decreciente de amplitud parte2 = np.sqrt(elastica/masa - parte1**2) # Velocidad angular return amplitud * np.exp(parte1*tiempo) * np.cos(parte2*tiempo) # Generar gráfico principal principal_fig = Figure(figsize=(5, 2)) principal_ax = principal_fig.gca(xlim=(-100, 100), ylim=(0, 10)) principal_ax.grid(True) principal_canvas = FigureCanvasTkAgg(principal_fig, master=window) principal_canvas.draw() principal_canvas.get_tk_widget().grid(row=0, column=1) def calcular_aceleracion(tiempo, masa, amplitud, elastica, viscosidad): """ Simula la segunda derivada de la posición (es decir, la aceleración) de un movimiento armónico amortiguado con los datos del edificio. """ parte1 = -viscosidad/(2*masa) # Constante decreciente de amplitud parte2 = np.sqrt(elastica/masa - parte1**2) # Velocidad angular parte3 = (parte1**2 - parte2**2)*np.cos(parte2*tiempo) parte4 = (2*parte1*parte2)*np.sin(parte2*tiempo) return amplitud * np.exp(parte1*tiempo) * (parte3 - parte4) def obtener_valor(variable, mensaje_error): try: return variable.get() except Exception as ex: raise AssertionError(mensaje_error) from ex def iniciar_simulacion(): try: base = obtener_valor(base_var, "La semibase no es válida.") altura = obtener_valor(altura_var, "La semialtura no es válida.") masa = obtener_valor(masa_var, "La masa no es válida.") radio = obtener_valor(radio_var, "El radio no es válido.") amplitud = obtener_valor(amplitud_var, "La amplitud no es válida.") elastica = obtener_valor(elastica_var, "La const. elástica no es válida.") viscosidad = obtener_valor(viscosidad_var, "El coef. viscosidad no es válido.") # Calcular el ángulo entre la base y la altura assert altura != 0, "La altura no puede ser 0." alfa = np.arctan(base/altura) # Verificar que es un movimiento amortiguado msg = ("Los datos para el movimiento amortiguado no son correctos. " "Debe cumplirse que b^2 < 4*k*m, donde:\n" "- b es el coeficiente de viscosidad\n" "- k es la constante elástica\n" "- m es la masa del edificio.") assert viscosidad**2 < 4*elastica*masa, msg # Mostrar los gráficos frames = np.linspace(0, 100, 1001) posiciones = calcular_posicion(frames, masa, amplitud, elastica, viscosidad) principal_ax.plot(frames, posiciones, '-o') print(posiciones) except Exception as ex: showerror("Error", str(ex)) def detener_simulacion(): pass # Inicializar botones btn_start = tk.Button(frame, text="Iniciar", command=iniciar_simulacion) btn_start.grid(row=7, column=0) btn_stop = tk.Button(frame, text="Detener", command=detener_simulacion) btn_stop.grid(row=7, column=1) """ # Mostrar los gráficos frames = np.linspace(0, 100, 1001) posiciones = calcular_posicion(frames, masa, amplitud, elastica, viscosidad) principal_ax.plot(frames, posiciones, '-o') """ # Interactuar con la ventana window.mainloop()
36.381295
87
0.693692
683
5,057
5.045388
0.313324
0.027858
0.036564
0.046721
0.276262
0.228671
0.193268
0.167731
0.167731
0.167731
0
0.028304
0.196559
5,057
138
88
36.644928
0.819838
0.167886
0
0.097561
0
0
0.142708
0
0
0
0
0
0.036585
1
0.073171
false
0.012195
0.073171
0
0.195122
0.012195
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cf67afad445851293cf259134cd16fdc9dcfa88
1,914
py
Python
app/tests/test_questions.py
Gichia/questioner-v2
b93ffdc521e364c191b770bf1bcb93964e7fa1f3
[ "MIT" ]
null
null
null
app/tests/test_questions.py
Gichia/questioner-v2
b93ffdc521e364c191b770bf1bcb93964e7fa1f3
[ "MIT" ]
6
2019-01-22T17:35:28.000Z
2022-01-13T01:01:48.000Z
app/tests/test_questions.py
Gichia/questioner-v2
b93ffdc521e364c191b770bf1bcb93964e7fa1f3
[ "MIT" ]
null
null
null
"""File to test all meetup endpoints""" import os import psycopg2 as pg2 import json from app.tests.basetest import BaseTest data = { "title": "Test Title", "body": "body" } comment = { "comment": "Comment 1" } class TestQuestions(BaseTest): """ Class to test all user endpoints """ def test_post_question(self): """Method to test post meetup endpoint""" url = "http://localhost:5000/api/questions/1" response = self.post(url, data) result = json.loads(response.data.decode("UTF-8")) self.assertEqual(result["status"], 201) self.assertEqual(result["message"], "Succesfully added!") def test_get_questions(self): """Test all meetups questions""" url = "http://localhost:5000/api/questions/8" response = self.get_items(url) result = json.loads(response.data.decode("UTF-8")) self.assertEqual(result["status"], 200) def test_meetup_not_found(self): """Test correct response for question not found""" url = "http://localhost:5000/api/questions/0" response = self.post(url, data) result = json.loads(response.data.decode("UTF-8")) self.assertEqual(result["message"], "Meetup not found!") def test_bad_question_url(self): """Test correct response for wrong question url endpoint""" url = "http://localhost:5000/api/question/0" response = self.post(url, data) result = json.loads(response.data.decode("UTF-8")) self.assertEqual(result["message"], "Resource not found!") def test_comment_question(self): """Method to test comment question endpoint""" url = "http://localhost:5000/api/comments/1" response = self.post(url, comment) result = json.loads(response.data.decode("UTF-8")) self.assertEqual(result["status"], 201) self.delete_comment("Comment 1")
28.147059
67
0.636886
237
1,914
5.084388
0.261603
0.074689
0.104564
0.082988
0.575934
0.475519
0.337759
0.337759
0.337759
0.337759
0
0.028782
0.219436
1,914
67
68
28.567164
0.777778
0.141066
0
0.25641
0
0
0.217175
0
0
0
0
0
0.153846
1
0.128205
false
0
0.102564
0
0.25641
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cf7844c0843b1293636cf8069df2f14c752925e
392
py
Python
coinbase_commerce/aio/api_resources/base/create_api_resource.py
nkoshell/coinbase-commerce-python
94dc57951ac897ffbc7861dc909f413028d6a0b9
[ "Apache-2.0" ]
null
null
null
coinbase_commerce/aio/api_resources/base/create_api_resource.py
nkoshell/coinbase-commerce-python
94dc57951ac897ffbc7861dc909f413028d6a0b9
[ "Apache-2.0" ]
null
null
null
coinbase_commerce/aio/api_resources/base/create_api_resource.py
nkoshell/coinbase-commerce-python
94dc57951ac897ffbc7861dc909f413028d6a0b9
[ "Apache-2.0" ]
null
null
null
from coinbase_commerce import util from . import APIResource __all__ = ( 'CreateAPIResource', ) class CreateAPIResource(APIResource): """ Create operations mixin """ @classmethod async def create(cls, **params): response = await cls._api_client.post(cls.RESOURCE_PATH, data=params) return util.convert_to_api_object(response, cls._api_client, cls)
21.777778
77
0.706633
44
392
6
0.659091
0.045455
0.090909
0
0
0
0
0
0
0
0
0
0.19898
392
17
78
23.058824
0.840764
0.058673
0
0
0
0
0.048159
0
0
0
0
0
0
1
0
false
0
0.2
0
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cf7956415ed6ed6ed0b04f303733e9e61d8fd09
3,139
py
Python
src/torspray/modules/node.py
gergelykalman/torspray
a84c903b71da8c87e9e22d9bce123aaf6797abe8
[ "MIT" ]
11
2022-02-27T22:19:52.000Z
2022-03-18T19:57:06.000Z
src/torspray/modules/node.py
gergelykalman/torspray
a84c903b71da8c87e9e22d9bce123aaf6797abe8
[ "MIT" ]
null
null
null
src/torspray/modules/node.py
gergelykalman/torspray
a84c903b71da8c87e9e22d9bce123aaf6797abe8
[ "MIT" ]
null
null
null
import paramiko from .config import CONFIG class NodeAuthException(Exception): pass class NodeTimeoutException(Exception): pass # TODO: # - connection management should be improved, particularly unused connections # - what happens if connection is lost? this should be handled gracefully class Node: def __init__(self, name, addr, hostkeys, privpath, user=CONFIG.USER): self.name = name self.addr = addr self.__hostkeys = hostkeys self.__privpath = privpath self.__user = user self.__pkey = None self.__conn = None self.__sftp = None def __repr__(self): return "{} ({})".format(self.name, self.addr) def __load_private_key(self): if self.__pkey is None: self.__pkey = paramiko.RSAKey.from_private_key_file(self.__privpath) def __connect_ssh(self, ignore_missing=False): if self.__conn is not None: return client = paramiko.SSHClient() client.load_host_keys(self.__hostkeys) # load private key self.__load_private_key() # should we add missing keys? if ignore_missing: policy = paramiko.AutoAddPolicy() client.set_missing_host_key_policy(policy) client.connect(self.addr, username=self.__user, pkey=self.__pkey, timeout=CONFIG.TIMEOUT) self.__conn = client def __connect_sftp(self): self.__connect_ssh() self.__sftp = self.__conn.open_sftp() def connect(self, ignore_missing=False): try: self.__connect_ssh(ignore_missing) except paramiko.ssh_exception.PasswordRequiredException: raise NodeAuthException("Could not authenticate to the server, either the key is bad or the password") except paramiko.ssh_exception.NoValidConnectionsError: raise NodeTimeoutException("Failed to reach node, try again later") def disconnect(self): if self.__conn is not None: self.__conn.close() self.__conn = None def copyfile(self, direction, src, dst): if direction not in ("get", "put"): raise ValueError("Invalid value for direction (not 'get' or 'put')") self.__connect_sftp() if direction == "get": self.__sftp.get(src, dst) else: self.__sftp.put(src, dst) def file(self, *args, **kwargs): self.__connect_sftp() f = self.__sftp.file(*args, **kwargs) return f def run(self, command, env=None): self.__connect_ssh() _stdin, stdout, stderr = self.__conn.exec_command(command, environment=env) # TODO: merge stdout and stderr! out = stdout.read().decode().strip() err = stderr.read().decode().strip() retval = stdout.channel.recv_exit_status() # WARNING: anything that comes out here can be MALICIOUS! return retval, out, err def invoke_shell(self, term="vt100", width=80, height=24): self.__connect_ssh() chan = self.__conn.invoke_shell(term=term, width=width, height=height) return chan
29.895238
114
0.638738
372
3,139
5.094086
0.365591
0.037995
0.029551
0.018997
0.020053
0.020053
0
0
0
0
0
0.003036
0.265371
3,139
104
115
30.182692
0.818734
0.090793
0
0.15942
0
0
0.063598
0
0
0
0
0.009615
0
1
0.15942
false
0.057971
0.028986
0.014493
0.304348
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
5cf85cc05d2e35117fbd77e06d0033570013d07b
1,676
py
Python
tests/test_texture.py
alTeska/ratcave
954d638544330970c74532339b6696f7ec6376db
[ "MIT" ]
null
null
null
tests/test_texture.py
alTeska/ratcave
954d638544330970c74532339b6696f7ec6376db
[ "MIT" ]
null
null
null
tests/test_texture.py
alTeska/ratcave
954d638544330970c74532339b6696f7ec6376db
[ "MIT" ]
null
null
null
from ratcave import texture import pytest @pytest.fixture def tex(): return texture.Texture() @pytest.fixture def cubetex(): return texture.TextureCube() @pytest.fixture def depthtex(): return texture.DepthTexture() def test_texture_attributes_created(): old_id = 0 for idx, (w, h) in enumerate([(1024, 1024), (256, 128), (200, 301)]): tex = texture.Texture(width=w, height=h) assert tex.width == w assert tex.height == h assert tex.id != old_id old_id = tex.id cube = texture.TextureCube(width=1024, height=1024) assert cube.width == 1024 assert cube.height == 1024 with pytest.raises(ValueError): cube = texture.TextureCube(width=400, height=600) def test_texture_default_uniform_names(tex, cubetex, depthtex): assert 'TextureMap' in tex.uniforms assert 'TextureMap_isBound' in tex.uniforms assert 'CubeMap' in cubetex.uniforms assert 'CubeMap_isBound' in cubetex.uniforms assert 'DepthMap' in depthtex.uniforms assert 'DepthMap_isBound' in depthtex.uniforms assert 'CubeMap' not in tex.uniforms assert 'CubeMap_isBound' not in tex.uniforms assert 'TextureMap' not in cubetex.uniforms newtex = texture.Texture(name='NewMap') assert newtex.name == 'NewMap' assert 'NewMap' in newtex.uniforms assert 'NewMap_isBound' in newtex.uniforms assert 'TextureMap' not in newtex.uniforms newtex.name = 'Changed' assert newtex.name == 'Changed' assert 'Changed' in newtex.uniforms assert 'Changed_isBound' in newtex.uniforms assert 'NewMap' not in newtex.uniforms assert 'NewMap_isBound' not in newtex.uniforms
29.403509
73
0.701074
215
1,676
5.386047
0.24186
0.157168
0.096718
0.094991
0.227116
0.060449
0
0
0
0
0
0.032114
0.201074
1,676
56
74
29.928571
0.832711
0
0
0.066667
0
0
0.121864
0
0
0
0
0
0.511111
1
0.111111
false
0
0.044444
0.066667
0.222222
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
1
5cf90931f0e1b598361245b75fb7cb9889936a8b
1,829
py
Python
kaggle_environments/envs/identity/identity.py
hubcity/kaggle-environments
7fe29d7ed96f9d910cbf367f26663a9a8440bfd9
[ "Apache-2.0" ]
null
null
null
kaggle_environments/envs/identity/identity.py
hubcity/kaggle-environments
7fe29d7ed96f9d910cbf367f26663a9a8440bfd9
[ "Apache-2.0" ]
null
null
null
kaggle_environments/envs/identity/identity.py
hubcity/kaggle-environments
7fe29d7ed96f9d910cbf367f26663a9a8440bfd9
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 Kaggle Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from os import path from random import choice, gauss def random_agent(obs, config): return choice(range(config.min, config.max)) def max_agent(obs, config): return config.max def min_agent(obs, config): return config.min def avg_agent(obs, config): return (config.min + config.max) // 2 agents = { "random": random_agent, "max": max_agent, "min": min_agent, "avg": avg_agent, } def interpreter(state, env): if env.done: return state # Validate and assign actions as rewards !(min <= action <= max). for agent in state: value = 0 if isinstance(agent.action, (int, float)): value = agent.action if value < env.configuration.min or value > env.configuration.max: agent.status = f"Invalid action: {value}" else: agent.reward = value + \ gauss(0, 1) * env.configuration.noise // 1 agent.status = "DONE" return state def renderer(state, env): return json.dumps([{"action": a.action, "reward": a.reward} for a in state]) dirpath = path.dirname(__file__) jsonpath = path.abspath(path.join(dirpath, "identity.json")) with open(jsonpath) as f: specification = json.load(f)
25.760563
80
0.669218
256
1,829
4.734375
0.445313
0.049505
0.046205
0.066007
0.069307
0.047855
0
0
0
0
0
0.0092
0.227447
1,829
70
81
26.128571
0.848549
0.334609
0
0.054054
0
0
0.05574
0
0
0
0
0
0
1
0.162162
false
0
0.081081
0.135135
0.432432
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
0
0
0
1
5cfa55d58decf3e1c5433b4c930ab763da369af0
392
py
Python
api/app.py
loudest/vision_zero
91b094d864fabedbaa56cb9d1639aa75aa19bb00
[ "MIT" ]
2
2015-03-25T00:51:45.000Z
2015-06-18T10:54:24.000Z
api/app.py
loudest/vision_zero
91b094d864fabedbaa56cb9d1639aa75aa19bb00
[ "MIT" ]
null
null
null
api/app.py
loudest/vision_zero
91b094d864fabedbaa56cb9d1639aa75aa19bb00
[ "MIT" ]
null
null
null
#!flask/bin/python from flask import Flask, jsonify import requests app = Flask(__name__) @app.route('/') def index(): return "Hello, World!" @app.route('/signed_data', methods=['GET']) def signed_map(): r = requests.get('http://data.seattle.gov/resource/kb3s-zi3s.json') json_data = r.json() return jsonify({'data': json_data}) if __name__ == '__main__': app.run(debug=True)
18.666667
69
0.683673
56
392
4.5
0.589286
0.063492
0
0
0
0
0
0
0
0
0
0.005865
0.130102
392
20
70
19.6
0.733138
0.043367
0
0
0
0
0.236559
0
0
0
0
0
0
1
0.153846
false
0
0.153846
0.076923
0.461538
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cfb44436bfafdbb15a5fdebd26adcefad3628c4
1,809
py
Python
Ago-Dic-2021/valera-rangel-pablo/Practica 3/test_calculator_pytest.py
AnhellO/DAS_Sistemas
07b4eca78357d02d225d570033d05748d91383e3
[ "MIT" ]
41
2017-09-26T09:36:32.000Z
2022-03-19T18:05:25.000Z
Ago-Dic-2021/valera-rangel-pablo/Practica 3/test_calculator_pytest.py
AnhellO/DAS_Sistemas
07b4eca78357d02d225d570033d05748d91383e3
[ "MIT" ]
67
2017-09-11T05:06:12.000Z
2022-02-14T04:44:04.000Z
Ago-Dic-2021/valera-rangel-pablo/Practica 3/test_calculator_pytest.py
AnhellO/DAS_Sistemas
07b4eca78357d02d225d570033d05748d91383e3
[ "MIT" ]
210
2017-09-01T00:10:08.000Z
2022-03-19T18:05:12.000Z
import pytest from calculator import * @pytest.mark.parametrize("input_a, input_b, expected_result", [ (-5, 2, 'Imposible Raiz de un Negativo') ]) def testRaizDeNegativo(input_a, input_b, expected_result): assert Calculator(input_a, input_b).raiz() == expected_result @pytest.mark.parametrize("input_a, input_b, expected_result", [ (5, 0, 'ZeroDivisionError: division by zero') ]) def testSobreCero(input_a, input_b, expected_result): assert Calculator(input_a, input_b).division() == expected_result @pytest.mark.parametrize("input_a, input_b, expected_result", [ (100, 54, 154) ]) def testSumaDosNumeros(input_a, input_b, expected_result): assert Calculator(input_a, input_b).suma() == expected_result @pytest.mark.parametrize("input_a, input_b, expected_result", [ (150, 75, 75) ]) def testRestaDosNumeros(input_a, input_b, expected_result): assert Calculator(input_a, input_b).resta() == expected_result @pytest.mark.parametrize("input_a, input_b, expected_result", [ (5, 2, 25) ]) def testPotencia(input_a, input_b, expected_result): assert Calculator(input_a, input_b).potencia() == expected_result @pytest.mark.parametrize("input_a, input_b, expected_result", [ (100, 0, 'Sin Definir') ]) def testRaizCero(input_a, input_b, expected_result): assert Calculator(input_a, input_b).raiz() == expected_result @pytest.mark.parametrize("input_a, input_b, expected_result", [ (10, 0, 1) ]) def testPotenciaALaCero(input_a, input_b, expected_result): assert Calculator(input_a, input_b).potencia() == expected_result @pytest.mark.parametrize("input_a, input_b, expected_result", [ (100, 23, 2300) ]) def testMultiplicacion(input_a, input_b, expected_result): assert Calculator(input_a, input_b).multiplicacion() == expected_result
30.661017
75
0.739635
245
1,809
5.167347
0.2
0.113744
0.208531
0.227488
0.745656
0.745656
0.745656
0.745656
0.745656
0.745656
0
0.025332
0.127142
1,809
58
76
31.189655
0.776441
0
0
0.47619
0
0
0.187396
0
0
0
0
0
0.190476
1
0.190476
false
0
0.047619
0
0.238095
0
0
0
0
null
0
1
1
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
5cfc0222b944d024264d6196a63452889c5cce0e
5,482
py
Python
Modules/scan.py
mafiamasterhere/EvilNet
5b93d69ff9b6b16edfd3053f1f56857173b59eb1
[ "MIT" ]
91
2020-06-19T22:08:32.000Z
2022-03-28T08:27:10.000Z
scan.py
lunnar211/CRACK_WIFI
654af29306dd6582bf3ece38e9dd2de196f09aab
[ "MIT" ]
null
null
null
scan.py
lunnar211/CRACK_WIFI
654af29306dd6582bf3ece38e9dd2de196f09aab
[ "MIT" ]
22
2020-06-29T13:19:40.000Z
2021-11-26T11:22:40.000Z
import nmap3 from colored import fg, bg, attr import colored import socket as sock from Modules import intro class nmap3_Scan() : def __init__(self): self.angry1 = colored.fg("green") + colored.attr("bold") self.angry = colored.fg("white") + colored.attr("bold") print(f"""{self.angry1} 1 - Os 2 - Top PORT 3- Xmas Scan 4 - Fin Scan 5 - Dns brute 6 - UDP Scan 7 - TCP Scan 99 - back """) self.number = str(input("[?]>>")) if self.number == str(1) or "use os" in self.number : self.Host = str(input("%s[*] Host >>"%(self.angry1))) self.Timing = int(input("[*] Timing >>")) self.OS(self.Host,self.Timing) if self.number == str(2) or "use top port" in self.number : self.Host = str(input("%s[*] Host >>"%(self.angry1))) self.Timing = int(input("[*] Timing >>")) if self.Timing == None: self.Top_port(self.Host) else: self.Top_port(self.Host,self.Timing) if self.number == str(3) or "use xmas" in self.number : self.Host = str(input("%s[*] Host >>"%(self.angry1))) self.Timing = int(input("[*] Timing >>")) if self.Timing == None: self.Xmas_Scan(self.Host) else: self.Xmas_Scan(self.Host,self.Timing) if self.number == str(4) or "use fin" in self.number : self.Host = str(input("%s[*] Host >>"%(self.angry1))) self.Timing = int(input("[*] Timing >>")) if self.Timing == None: self.Fin_Scan(self.Host) else: self.Fin_Scan(self.Host,self.Timing) if self.number == str(5) or "use brute dns" in self.number : self.Host = str(input("%s[*] Domain >>"%(self.angry1))) self.Dns_Brute(self.Host) if self.number == str(6) or "use udp" in self.number : self.Host = str(input("%s[*] Host >>"%(self.angry1))) self.Timing = int(input("[*] Timing >>")) if self.Timing == None: self.UDP_Scan(self.Host) else: self.UDP_Scan(self.Host,self.Timing) if self.number == str(7) or "use tcp" in self.number : self.Host = str(input("%s[*] Host >>"%(self.angry1))) self.Timing = int(input("[*] Timing >>")) if self.Timing == None: self.TCP_Scan(self.Host) else: self.TCP_Scan(self.Host,self.Timing) if self.number == str(99) or "back" in self.number : intro.main() def OS(self,Host,Timing=4): self.Host = Host self.Timing = Timing try : print("Loading ........................................") HOST_lib = nmap3.Nmap() System=HOST_lib.nmap_os_detection(str(self.Host),args=f"-T{self.Timing} -vv") for i in System: print(f"System:{i['name']} CPE : {i['cpe']} ") except : pass def Top_port (self,Host,Timing=4): print("Loading ........................................") self.Host = sock.gethostbyname(self.Host) HOST_lib = nmap3.Nmap() System = HOST_lib.scan_top_ports(self.Host,self.Timing) for z in System[self.Host]: print(z['portid'],z['service']['name'],z['state']) def Dns_Brute(self,Host,Timing=4): print("Loading ........................................") HOST_lib = nmap3.NmapHostDiscovery() System = HOST_lib.nmap_dns_brute_script(self.Host) for output in System: print(" "+output['address']," "+output['hostname']+self.angry) def Xmas_Scan (self,Host,Timing=4): print("Loading ........................................") self.Host = sock.gethostbyname(self.Host) HOST_lib = nmap3.NmapHostDiscovery() System=HOST_lib.nmap_portscan_only(str(self.Host),args=f" -sX -T{self.Timing} -vv") for z in System[self.Host]: print(z['portid'],z['service']['name'],z['state']+self.angry) def Fin_Scan(self,Host,Timing=4): print("Loading ........................................") self.Host = sock.gethostbyname(self.Host) HOST_lib = nmap3.NmapHostDiscovery() System=HOST_lib.nmap_portscan_only(str(self.Host),args=f" -sF -T{self.Timing} -vv") for z in System[self.Host]: print(z['portid'],z['service']['name'],z['state']+self.angry) def UDP_Scan(self,Host,Timing=4): print("Loading ........................................") self.Host = sock.gethostbyname(self.Host) HOST_lib = nmap3.NmapScanTechniques() System=HOST_lib.nmap_udp_scan(str(self.Host),args=f"-T{self.Timing} -vv") for z in System[self.Host]: print(z['portid'],z['service']['name'],z['state']+self.angry) def TCP_Scan(self,Host,Timing=4): print("Loading ........................................") self.Host = sock.gethostbyname(self.Host) HOST_lib = nmap3.NmapScanTechniques() System=HOST_lib.nmap_tcp_scan(str(self.Host),args=f"-T{self.Timing} -vv") for z in System[self.Host]: print(z['portid'],z['service']['name'],z['state']+self.angry)
42.169231
91
0.506202
669
5,482
4.071749
0.13154
0.143906
0.052863
0.044053
0.733113
0.68025
0.670338
0.653451
0.601689
0.547357
0
0.011082
0.292229
5,482
129
92
42.496124
0.690979
0
0
0.431034
0
0
0.191901
0.051076
0
0
0
0
0
1
0.068966
false
0.008621
0.043103
0
0.12069
0.12931
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cfea876092666973d9916499f12af7785c199a1
1,524
py
Python
api/serializers.py
Wholefolio/marketmanager
5a8314707806a6790c507e1bd817891e8dc88811
[ "Apache-2.0" ]
null
null
null
api/serializers.py
Wholefolio/marketmanager
5a8314707806a6790c507e1bd817891e8dc88811
[ "Apache-2.0" ]
null
null
null
api/serializers.py
Wholefolio/marketmanager
5a8314707806a6790c507e1bd817891e8dc88811
[ "Apache-2.0" ]
null
null
null
"""Serializers module.""" from rest_framework import serializers from django_celery_results.models import TaskResult from api import models class ExchangeSerializer(serializers.ModelSerializer): """Serializer to map the Model instance into JSON format.""" class Meta: """Meta class to map serializer's fields with the model fields.""" model = models.Exchange fields = ('id', 'name', 'created', 'updated', "url", "api_url", "volume", "top_pair", "top_pair_volume", "interval", "enabled", "last_data_fetch", "logo") read_only_fields = ('created', 'updated') def get_type(self, obj): return obj.get_type_display() class MarketSerializer(serializers.ModelSerializer): class Meta: model = models.Market fields = ("id", "name", "exchange", "volume", "last", "bid", "ask", "base", "quote", "updated") class ExchangeStatusSerializer(serializers.ModelSerializer): """Serializer to map the Model instance into JSON format.""" class Meta: """Meta class to map serializer's fields with the model fields.""" model = models.ExchangeStatus fields = ('id', 'exchange', 'last_run', 'last_run_id', 'last_run_status', 'time_started', 'running') class TaskResultSerializer(serializers.ModelSerializer): class Meta: model = TaskResult fields = ("id", "date_done", "meta", "status", "result", "traceback", "task_id")
31.75
75
0.631234
163
1,524
5.766871
0.435583
0.110638
0.076596
0.080851
0.382979
0.297872
0.297872
0.297872
0.297872
0.297872
0
0
0.238189
1,524
47
76
32.425532
0.809647
0.164698
0
0.148148
0
0
0.207698
0
0
0
0
0
0
1
0.037037
false
0
0.111111
0.037037
0.481481
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cfff87f1a992e437041fea9fa36fffc753143d6
3,228
py
Python
invenio_oaiserver/views/server.py
ParthS007/invenio-oaiserver
6fa5d2e2a770377ffe34a44bc60b0a817853da95
[ "MIT" ]
null
null
null
invenio_oaiserver/views/server.py
ParthS007/invenio-oaiserver
6fa5d2e2a770377ffe34a44bc60b0a817853da95
[ "MIT" ]
null
null
null
invenio_oaiserver/views/server.py
ParthS007/invenio-oaiserver
6fa5d2e2a770377ffe34a44bc60b0a817853da95
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2015-2018 CERN. # Copyright (C) 2022 Graz University of Technology. # # Invenio is free software; you can redistribute it and/or modify it # under the terms of the MIT License; see LICENSE file for more details. """OAI-PMH 2.0 server.""" from flask import Blueprint, make_response from invenio_pidstore.errors import PIDDoesNotExistError from itsdangerous import BadSignature from lxml import etree from marshmallow.exceptions import ValidationError from webargs.flaskparser import use_args from .. import response as xml from ..errors import OAINoRecordsMatchError from ..verbs import make_request_validator blueprint = Blueprint( 'invenio_oaiserver', __name__, static_folder='../static', template_folder='../templates', ) @blueprint.errorhandler(ValidationError) @blueprint.errorhandler(422) def validation_error(exception): """Return formatter validation error.""" messages = getattr(exception, 'messages', None) if messages is None: messages = getattr(exception, 'data', {'messages': None})['messages'] def extract_errors(): """Extract errors from exception.""" if isinstance(messages, dict): for field, message in messages.items(): if field == 'verb': yield 'badVerb', '\n'.join(message) else: yield 'badArgument', '\n'.join(message) else: for field in exception.field_names: if field == 'verb': yield 'badVerb', '\n'.join(messages) else: yield 'badArgument', '\n'.join(messages) if not exception.field_names: yield 'badArgument', '\n'.join(messages) return (etree.tostring(xml.error(extract_errors())), 422, {'Content-Type': 'text/xml'}) @blueprint.errorhandler(PIDDoesNotExistError) def pid_error(exception): """Handle PID Exceptions.""" return (etree.tostring(xml.error([('idDoesNotExist', 'No matching identifier')])), 422, {'Content-Type': 'text/xml'}) @blueprint.errorhandler(BadSignature) def resumptiontoken_error(exception): """Handle resumption token exceptions.""" return (etree.tostring(xml.error([( 'badResumptionToken', 'The value of the resumptionToken argument is invalid or expired.') ])), 422, {'Content-Type': 'text/xml'}) @blueprint.errorhandler(OAINoRecordsMatchError) def no_records_error(exception): """Handle no records match Exceptions.""" return (etree.tostring(xml.error([('noRecordsMatch', '')])), 422, {'Content-Type': 'text/xml'}) @blueprint.route('/oai2d', methods=['GET', 'POST']) @use_args(make_request_validator) def response(args): """Response endpoint.""" e_tree = getattr(xml, args['verb'].lower())(**args) response = make_response(etree.tostring( e_tree, pretty_print=True, xml_declaration=True, encoding='UTF-8', )) response.headers['Content-Type'] = 'text/xml' return response
31.339806
77
0.629492
340
3,228
5.891176
0.405882
0.052421
0.037444
0.044933
0.218173
0.161258
0.090864
0
0
0
0
0.013109
0.243804
3,228
102
78
31.647059
0.807456
0.145601
0
0.188406
0
0
0.141805
0
0
0
0
0
0
1
0.086957
false
0
0.130435
0
0.289855
0.130435
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cf01e9ecb22b1e70b4470ec1161d194bd76c4e67
6,847
py
Python
searching/models.py
netvigator/auctions
f88bcce800b60083a5d1a6f272c51bb540b8342a
[ "MIT" ]
null
null
null
searching/models.py
netvigator/auctions
f88bcce800b60083a5d1a6f272c51bb540b8342a
[ "MIT" ]
13
2019-12-12T03:07:55.000Z
2022-03-07T12:59:27.000Z
searching/models.py
netvigator/auctions
f88bcce800b60083a5d1a6f272c51bb540b8342a
[ "MIT" ]
null
null
null
from django.db import models from core.utils import getReverseWithUpdatedQuery from ebayinfo.models import EbayCategory from categories.models import Category from core.dj_import import get_user_model User = get_user_model() from searching import ALL_PRIORITIES from pyPks.Time.Output import getIsoDateTimeFromDateTime # ### models can be FAT but not too FAT! ### class Search(models.Model): cTitle = models.CharField( 'short description', help_text = 'This is just a short description -- ebay will not search for this<br>' 'you must have a) key word(s) and/or b) an ebay category', max_length = 38, null = True ) cKeyWords = models.TextField( 'key words -- search for these (maximum length 350 characters)', max_length = 350, null = True, blank = True, help_text = 'What you type here will go into the ebay search box ' '-- mulitple terms will result in an AND search ' '(ebay will look for all terms).<br>' 'search for red OR green handbags as follows: ' 'handbags (red,green)<br>' 'TIPS: to exclude words, put a - in front ' '(without any space),<br>' 'search handbags but exclude red as follows: ' 'handbags -red<br>' 'search for handbags but ' 'exclude red and green as follows: handbags -red -green' ) # max length for a single key word is 98 #models.ForeignKey( EbayCategory, models.PositiveIntegerField( iEbayCategory = models.ForeignKey( EbayCategory, on_delete=models.CASCADE, verbose_name = 'ebay category', null = True, blank = True, help_text = 'Limit search to items listed in this category' ) # ### after updating ebay categories, check whether ### # ### searches that were connected are still connected !!! ### iDummyCategory = models.PositiveIntegerField( 'ebay category number', null = True, blank = True, help_text = 'Limit search to items listed in this category<br>' 'copy the category number from ebay and paste here!!! (sorry)' ) cPriority = models.CharField( 'processing priority', max_length = 2, null = True, choices = ALL_PRIORITIES, help_text = 'high priority A1 A2 A3 ... Z9 low priority' ) bGetBuyItNows = models.BooleanField( "also get 'Buy It Nows' (fixed price non auctions)?", help_text = 'You may get an avalanche of useless junk ' 'if you turn this on -- be careful!', blank = True, null = True, default = False ) bInventory = models.BooleanField( "also get 'Store Inventory' " "(fixed price items in ebay stores)?", help_text = 'You may get an avalanche of useless junk ' 'if you turn this on -- be careful!', blank = True, null = True, default = False ) iMyCategory = models.ForeignKey( Category, on_delete=models.DO_NOTHING, verbose_name = 'my category that matches ebay category', null = True, blank = True, help_text = 'Example: if you have a category for "Manuals" and ' 'this search is in the ebay category "Vintage Manuals" ' 'put your "Manuals" category here.<br>If you have a ' 'category "Widgets" and this search finds an item ' 'with "Widget Manual" in the title, the bot will know ' 'this item is for a manual, NOT a widget.') tBegSearch = models.DateTimeField( 'last search started', null = True ) tEndSearch = models.DateTimeField( 'last search completed', null = True ) cLastResult = models.TextField( 'last search outcome', null = True ) iUser = models.ForeignKey( User, on_delete=models.CASCADE, verbose_name = 'Owner' ) tCreate = models.DateTimeField( 'created on', auto_now_add= True ) tModify = models.DateTimeField( 'updated on', auto_now = True ) def __str__(self): return self.cTitle class Meta: verbose_name_plural = 'searches' db_table = 'searching' unique_together = ( ( 'iUser', 'cPriority' ), ( 'iUser', 'cTitle' ), ( 'iUser', 'cKeyWords', 'iEbayCategory',) ) ordering = ('cTitle',) def get_absolute_url(self): # return getReverseWithUpdatedQuery( 'searching:detail', kwargs = { 'pk': self.pk, 'tModify': self.tModify } ) class SearchLog(models.Model): iSearch = models.ForeignKey( Search, on_delete=models.CASCADE, verbose_name = 'Search that first found this item' ) tBegSearch = models.DateTimeField( 'search started', db_index = True ) tEndSearch = models.DateTimeField( 'search completed', null = True ) tBegStore = models.DateTimeField( 'processing started', null = True ) tEndStore = models.DateTimeField( 'processing completed', null = True ) iItems = models.PositiveIntegerField( 'items found', null = True ) iStoreItems = models.PositiveIntegerField( 'items stored', null = True ) iStoreUsers = models.PositiveIntegerField( 'stored for owner', null = True ) iItemHits = models.PositiveIntegerField( 'have category, brand & model', null = True ) cResult = models.TextField( 'search outcome', null = True ) cStoreDir = models.CharField( 'search files directory', max_length = 10, null = True, blank = True ) def __str__(self): sSayDir = ( self.cStoreDir if self.cStoreDir else getIsoDateTimeFromDateTime( self.tBegSearch ) ) return '%s - %s' % ( sSayDir, self.iSearch.cTitle ) class Meta: verbose_name_plural = 'searchlogs' db_table = verbose_name_plural
47.548611
91
0.535417
668
6,847
5.411677
0.33982
0.04426
0.017981
0.023513
0.17538
0.151591
0.100415
0.100415
0.08686
0.08686
0
0.004033
0.384402
6,847
143
92
47.881119
0.853618
0.035636
0
0.205128
0
0
0.282781
0
0
0
0
0
0
1
0.025641
false
0
0.059829
0.017094
0.358974
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cf025b07d576f1c46ac2887dea5c3dde0c945bf5
367
py
Python
Maths/fibonacciSeries.py
baiyongzhen/python
a8f367d2136f1aaeab63345e160e59fe16d62a11
[ "MIT" ]
1
2018-10-16T13:41:06.000Z
2018-10-16T13:41:06.000Z
Maths/fibonacciSeries.py
baiyongzhen/python
a8f367d2136f1aaeab63345e160e59fe16d62a11
[ "MIT" ]
null
null
null
Maths/fibonacciSeries.py
baiyongzhen/python
a8f367d2136f1aaeab63345e160e59fe16d62a11
[ "MIT" ]
2
2018-10-03T15:47:30.000Z
2019-10-23T16:35:48.000Z
# Fibonacci Sequence Using Recursion def recur_fibo(n): if n <= 1: return n else: return(recur_fibo(n-1) + recur_fibo(n-2)) limit = int(input("How many terms to include in fionacci series:")) if limit <= 0: print("Plese enter a positive integer") else: print("Fibonacci series:") for i in range(limit): print(recur_fibo(i))
21.588235
67
0.640327
56
367
4.125
0.607143
0.155844
0.12987
0
0
0
0
0
0
0
0
0.014286
0.237057
367
16
68
22.9375
0.810714
0.092643
0
0.166667
0
0
0.277946
0
0
0
0
0
0
1
0.083333
false
0
0
0
0.166667
0.25
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cf02f2ac21df464b0428c1c4c3f886070ec8055f
6,319
py
Python
extract_tokens.py
anuprulez/similar_galaxy_tools
19eefa567fdb56781dc5f42a0bea8af0969f5978
[ "MIT" ]
2
2018-02-02T18:52:12.000Z
2018-02-03T08:36:44.000Z
extract_tokens.py
anuprulez/similar_galaxy_tools
19eefa567fdb56781dc5f42a0bea8af0969f5978
[ "MIT" ]
null
null
null
extract_tokens.py
anuprulez/similar_galaxy_tools
19eefa567fdb56781dc5f42a0bea8af0969f5978
[ "MIT" ]
1
2018-02-03T08:36:57.000Z
2018-02-03T08:36:57.000Z
""" Extract useful tokens from multiple attributes of Galaxy tools """ import os import numpy as np import pandas as pd import operator import json import utils class ExtractTokens: @classmethod def __init__( self, tools_data_path ): self.tools_data_path = tools_data_path @classmethod def _read_file( self ): """ Read the description of all tools """ return pd.read_csv( self.tools_data_path ) @classmethod def _extract_tokens( self, file, tokens_source ): """ Extract tokens from the description of all tools """ tools_tokens_source = dict() for source in tokens_source: tools_tokens = dict() for row in file.iterrows(): tokens = self._get_tokens_from_source( row[ 1 ], source ) tools_tokens[ row[ 1 ][ "id" ] ] = tokens tools_tokens_source[ source ] = tools_tokens return tools_tokens_source @classmethod def _get_tokens_from_source( self, row, source ): """ Fetch tokens from different sources namely input and output files, names and desc of tools and further help and EDAM sources """ tokens = '' if source == 'input_output': # remove duplicate file type individually from input and output file types and merge input_tokens = utils._restore_space( utils._get_text( row, "inputs" ) ) input_tokens = utils._remove_duplicate_file_types( input_tokens ) output_tokens = utils._restore_space( utils._get_text( row, "outputs" ) ) output_tokens = utils._remove_duplicate_file_types( output_tokens ) if input_tokens is not "" and output_tokens is not "": tokens = input_tokens + ' ' + output_tokens elif output_tokens is not "": tokens = output_tokens elif input_tokens is not "": tokens = input_tokens elif source == 'name_desc_edam': tokens = utils._restore_space( utils._get_text( row, "name" ) ) + ' ' tokens += utils._restore_space( utils._get_text( row, "description" ) ) + ' ' tokens += utils._get_text( row, "edam_topics" ) elif source == "help_text": tokens = utils._get_text( row, "help" ) return utils._remove_special_chars( tokens ) @classmethod def _refine_tokens( self, tokens ): """ Refine the set of tokens by removing words like 'to', 'with' """ k = 1.75 b = 0.75 stop_words_file = "stop_words.txt" all_stopwords = list() refined_tokens_sources = dict() # collect all the stopwords with open( stop_words_file ) as file: lines = file.read() all_stopwords = lines.split( "\n" ) for source in tokens: refined_tokens = dict() files = dict() inverted_frequency = dict() file_id = -1 total_file_length = 0 for item in tokens[ source ]: file_id += 1 file_tokens = tokens[ source ][ item ].split(" ") if source in "name_desc_edam" or source in "help_text": file_tokens = utils._clean_tokens( file_tokens, all_stopwords ) total_file_length += len( file_tokens ) term_frequency = dict() for token in file_tokens: if token is not '': file_ids = list() if token not in inverted_frequency: file_ids.append( file_id ) else: file_ids = inverted_frequency[ token ] if file_id not in file_ids: file_ids.append( file_id ) inverted_frequency[ token ] = file_ids # for term frequency if token not in term_frequency: term_frequency[ token ] = 1 else: term_frequency[ token ] += 1 files[ item ] = term_frequency N = len( files ) average_file_length = float( total_file_length ) / N # find BM25 score for each token of each tool. It helps to determine # how important each word is with respect to the tool and other tools for item in files: file_item = files[ item ] file_length = len( file_item ) for token in file_item: tf = file_item[ token ] # normalize the term freq of token for each document tf = float( tf ) / file_length idf = np.log2( N / len( inverted_frequency[ token ] ) ) alpha = ( 1 - b ) + ( float( b * file_length ) / average_file_length ) tf_star = tf * float( ( k + 1 ) ) / ( k * alpha + tf ) tf_idf = tf_star * idf file_item[ token ] = tf_idf # filter tokens based on the BM25 scores and stop words. Not all tokens are important for item in files: file_tokens = files[ item ] tokens_scores = [ ( token, score ) for ( token, score ) in file_tokens.items() ] sorted_tokens = sorted( tokens_scores, key=operator.itemgetter( 1 ), reverse=True ) refined_tokens[ item ] = sorted_tokens tokens_file_name = 'tokens_' + source + '.txt' token_file_path = os.path.join( os.path.dirname( self.tools_data_path ) + '/' + tokens_file_name ) with open( token_file_path, 'w' ) as file: file.write( json.dumps( refined_tokens ) ) file.close() refined_tokens_sources[ source ] = refined_tokens return refined_tokens_sources @classmethod def get_tokens( self, data_source ): """ Get refined tokens """ print( "Extracting tokens..." ) dataframe = self._read_file() tokens = self._extract_tokens( dataframe, data_source ) return dataframe, self._refine_tokens( tokens )
42.126667
110
0.549929
704
6,319
4.673295
0.21733
0.027356
0.021885
0.027356
0.159574
0.084498
0.046201
0.046201
0
0
0
0.005304
0.373477
6,319
149
111
42.409396
0.825714
0.118531
0
0.106195
0
0
0.028713
0
0
0
0
0
0
1
0.053097
false
0
0.053097
0
0.159292
0.00885
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cf064ee4360718e1bf827036501177dcef34c6bd
2,209
py
Python
server/gestion/models/product.py
JetLightStudio/Jet-Gest-stock-management
333cbc3dd1b379f53f67250fbd581cbce8e20ca8
[ "MIT" ]
1
2021-08-18T18:53:02.000Z
2021-08-18T18:53:02.000Z
server/gestion/models/product.py
JetLightStudio/Jet-Gest-stock-management
333cbc3dd1b379f53f67250fbd581cbce8e20ca8
[ "MIT" ]
null
null
null
server/gestion/models/product.py
JetLightStudio/Jet-Gest-stock-management
333cbc3dd1b379f53f67250fbd581cbce8e20ca8
[ "MIT" ]
1
2021-08-04T23:53:52.000Z
2021-08-04T23:53:52.000Z
from django.core.files import storage from django.db import models from gestion.utils import products_path_and_rename from gestion.models.category import Category from gestion.models.subCategory import SubCategory class Product(models.Model): title = models.CharField(max_length=40, unique=True) description = models.TextField(default="", blank=True) creationDate = models.DateField(auto_now_add=True) image = models.ImageField(upload_to=products_path_and_rename ,blank=True , null=True) forcePrice = models.BooleanField(default=False) price = models.FloatField(default=0) categories = models.ManyToManyField(Category, blank=True, through="ProductCategoriesMany") subCategories = models.ManyToManyField(SubCategory, blank=True, through="ProductSubCategoriesMany") def getPrice(self): from .entries import Entries if self.forcePrice: return self.price else: entries = Entries.objects.filter(product=self) sumPrices = 0 count = entries.count() if count <= 0: count = 1 for entry in entries: sumPrices += entry.unitPrice if sumPrices == 0: return self.price else: return sumPrices / count def quantity(self): from .SalesAndBalance import SalesAndBalance sales, c = SalesAndBalance.objects.get_or_create(chooseProduct=self) return sales.balance def salesCount(self): from .SalesAndBalance import SalesAndBalance sales, c = SalesAndBalance.objects.get_or_create(chooseProduct=self) return sales.totalSales def __str__(self) -> str: return "{} - {}".format(self.id, self.title) class ProductCategoriesMany(models.Model): product = models.ForeignKey(Product, on_delete=models.CASCADE) category = models.ForeignKey(Category, on_delete=models.CASCADE) class ProductSubCategoriesMany(models.Model): product = models.ForeignKey(Product, on_delete=models.CASCADE) subCategory = models.ForeignKey(SubCategory, on_delete=models.CASCADE)
37.440678
104
0.673155
230
2,209
6.369565
0.373913
0.024573
0.038225
0.057338
0.236177
0.236177
0.236177
0.236177
0.236177
0.236177
0
0.004192
0.244002
2,209
58
105
38.086207
0.873054
0
0
0.217391
0
0
0.024175
0.020921
0
0
0
0
0
1
0.086957
false
0
0.173913
0.021739
0.717391
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
cf07119cf6e3bdb2abce6a77371ad7da0041ab09
2,260
py
Python
muk_utils/tests/test_search_parents.py
juazisco/gestion_rifa
bce6b75f17cb5ab2df7e2f7dd5141fc85a1a5bfb
[ "MIT" ]
null
null
null
muk_utils/tests/test_search_parents.py
juazisco/gestion_rifa
bce6b75f17cb5ab2df7e2f7dd5141fc85a1a5bfb
[ "MIT" ]
null
null
null
muk_utils/tests/test_search_parents.py
juazisco/gestion_rifa
bce6b75f17cb5ab2df7e2f7dd5141fc85a1a5bfb
[ "MIT" ]
null
null
null
########################################################################## # # Copyright (C) 2017 MuK IT GmbH # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ########################################################################## import os import base64 import logging from odoo import exceptions from odoo.tests import common _path = os.path.dirname(os.path.dirname(__file__)) _logger = logging.getLogger(__name__) class SearchParentTestCase(common.TransactionCase): def setUp(self): super(SearchParentTestCase, self).setUp() self.model = self.env['res.partner.category'] def tearDown(self): super(SearchParentTestCase, self).tearDown() def _evaluate_parent_result(self, parents, records): for parent in parents: self.assertTrue( not parent.parent_id or parent.parent_id.id not in records.ids ) def test_search_parents(self): records = self.model.search([]) parents = self.model.search_parents([]) self._evaluate_parent_result(parents, records) def test_search_parents_domain(self): records = self.model.search([('id', '!=', 1)]) parents = self.model.search_parents([('id', '!=', 1)]) self._evaluate_parent_result(parents, records) def test_search_read_parents(self): parents = self.model.search_parents([]) read_names = parents.read(['name']) search_names = self.model.search_read_parents([], ['name']) self.assertTrue(read_names == search_names)
36.451613
78
0.620354
266
2,260
5.12782
0.424812
0.046188
0.065982
0.064516
0.25
0.148094
0.124633
0.07478
0.07478
0
0
0.005155
0.227434
2,260
61
79
37.04918
0.77606
0.305752
0
0.125
0
0
0.026866
0
0
0
0
0
0.0625
1
0.1875
false
0
0.15625
0
0.375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cf07d61453467fdaf7a52e8c01b18b42bfc7226e
4,514
py
Python
daal4py/df_regr.py
PivovarA/scikit-learn_bench
52e96f28eda3ca25d0f51594041fd06ee3f8d4c2
[ "MIT" ]
null
null
null
daal4py/df_regr.py
PivovarA/scikit-learn_bench
52e96f28eda3ca25d0f51594041fd06ee3f8d4c2
[ "MIT" ]
null
null
null
daal4py/df_regr.py
PivovarA/scikit-learn_bench
52e96f28eda3ca25d0f51594041fd06ee3f8d4c2
[ "MIT" ]
2
2020-08-07T16:19:32.000Z
2020-08-07T16:22:12.000Z
# Copyright (C) 2018-2020 Intel Corporation # # SPDX-License-Identifier: MIT import argparse from bench import ( parse_args, measure_function_time, load_data, print_output, rmse_score, float_or_int, getFPType ) from daal4py import ( decision_forest_regression_training, decision_forest_regression_prediction, engines_mt2203 ) def df_regr_fit(X, y, n_trees=100, seed=12345, n_features_per_node=0, max_depth=0, min_impurity=0, bootstrap=True): fptype = getFPType(X) features_per_node = X.shape[1] if n_features_per_node > 0 and n_features_per_node <= features_per_node: features_per_node = n_features_per_node engine = engines_mt2203(seed=seed, fptype=fptype) algorithm = decision_forest_regression_training( fptype=fptype, method='defaultDense', nTrees=n_trees, observationsPerTreeFraction=1., featuresPerNode=features_per_node, maxTreeDepth=max_depth, minObservationsInLeafNode=1, engine=engine, impurityThreshold=min_impurity, varImportance='MDI', resultsToCompute='', memorySavingMode=False, bootstrap=bootstrap ) df_regr_result = algorithm.compute(X, y) return df_regr_result def df_regr_predict(X, training_result): algorithm = decision_forest_regression_prediction( fptype='float' ) result = algorithm.compute(X, training_result.model) return result.prediction if __name__ == '__main__': parser = argparse.ArgumentParser(description='daal4py random forest ' 'regression benchmark') parser.add_argument('--criterion', type=str, default='mse', choices=('mse'), help='The function to measure the quality of a split') parser.add_argument('--num-trees', type=int, default=100, help='Number of trees in the forest') parser.add_argument('--max-features', type=float_or_int, default=0, help='Upper bound on features used at each split') parser.add_argument('--max-depth', type=int, default=0, help='Upper bound on depth of constructed trees') parser.add_argument('--min-samples-split', type=float_or_int, default=2, help='Minimum samples number for node splitting') parser.add_argument('--max-leaf-nodes', type=int, default=None, help='Grow trees with max_leaf_nodes in best-first fashion' 'if it is not None') parser.add_argument('--min-impurity-decrease', type=float, default=0., help='Needed impurity decrease for node splitting') parser.add_argument('--no-bootstrap', dest='bootstrap', default=True, action='store_false', help="Don't control bootstraping") parser.add_argument('--use-sklearn-class', action='store_true', help='Force use of ' 'sklearn.ensemble.RandomForestRegressor') params = parse_args(parser, prefix='daal4py') # Load data X_train, X_test, y_train, y_test = load_data( params, add_dtype=True, label_2d=True) columns = ('batch', 'arch', 'prefix', 'function', 'threads', 'dtype', 'size', 'num_trees', 'time') if isinstance(params.max_features, float): params.max_features = int(X_train.shape[1] * params.max_features) # Time fit and predict fit_time, res = measure_function_time( df_regr_fit, X_train, y_train, n_trees=params.num_trees, n_features_per_node=params.max_features, max_depth=params.max_depth, min_impurity=params.min_impurity_decrease, bootstrap=params.bootstrap, seed=params.seed, params=params) yp = df_regr_predict(X_train, res) train_rmse = rmse_score(yp, y_train) predict_time, yp = measure_function_time( df_regr_predict, X_test, res, params=params) test_rmse = rmse_score(yp, y_test) print_output(library='daal4py', algorithm='decision_forest_regression', stages=['training', 'prediction'], columns=columns, params=params, functions=['df_regr.fit', 'df_regr.predict'], times=[fit_time, predict_time], accuracy_type='rmse', accuracies=[train_rmse, test_rmse], data=[X_train, X_test])
36.699187
83
0.636907
530
4,514
5.158491
0.316981
0.019751
0.049378
0.029261
0.125457
0.060351
0.019751
0
0
0
0
0.013209
0.262074
4,514
122
84
37
0.807565
0.022375
0
0
0
0
0.175176
0.019741
0
0
0
0
0
1
0.022222
false
0
0.044444
0
0.088889
0.022222
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cf0923f79d25f11e0f15404bd9b6b8cdac76eb86
8,658
py
Python
src/feature/eme_data_loader.py
0shimax/Pytorch-DRN
a5e70784d0097069e9e1cf958a446f819dbdb7f1
[ "MIT" ]
null
null
null
src/feature/eme_data_loader.py
0shimax/Pytorch-DRN
a5e70784d0097069e9e1cf958a446f819dbdb7f1
[ "MIT" ]
null
null
null
src/feature/eme_data_loader.py
0shimax/Pytorch-DRN
a5e70784d0097069e9e1cf958a446f819dbdb7f1
[ "MIT" ]
null
null
null
from pathlib import Path import pandas as pd import numpy as np import random import torch from torch.utils.data import Dataset from sklearn.model_selection import train_test_split def get_id_columns(df): user_and_target_id_columns = ["user_id", "target_user_id"] return df[user_and_target_id_columns] def extranct_interacted_user_rows(df): tmp = df[["user_id", "label"]].groupby('user_id').sum() interacted_user_id = tmp[tmp.label>0].reset_index() return df[df.user_id.isin(interacted_user_id.user_id)] def get_ethnicity_columns(df): ethnicity_user = df.ethnicity_user ethnicity_target = df.ethnicity_target ethnicity_columns = [c for c in df.columns if "ethnicity_" in c] df.drop(ethnicity_columns, axis=1, inplace=True) df = df.assign(ethnicity_user=ethnicity_user, ethnicity_target=ethnicity_target) return df def calculate_user_features(df): c_id = 'user_id' user_feature_columns = [c for c in df.columns if '_user' in c and 'target_user_id' != c] user_features = df.groupby(c_id)[user_feature_columns].head(1) user_features[c_id] = df.loc[user_features.index].user_id return user_features def calculate_target_features(df): c_id = 'target_user_id' target_feature_columns =\ [c for c in df.columns.values if '_target' in c] target_features = df[[c_id] + target_feature_columns] return target_features def calcurate_target_clicked(df): result = df[['target_user_id', 'label']]\ .groupby('target_user_id')\ .agg(['sum', 'count'])\ .reset_index() result.columns = ['target_user_id', 'label_sum', 'label_cnt'] result = result.assign(label_rate=result.label_sum/result.label_cnt) result.index = df.groupby('target_user_id').head(1).index return result def get_target_ids_for_train_input(squewed_user_target_labels, valued_target_idxs, n_high, n_low): # 全て返す return squewed_user_target_labels.index.values n_total = n_high + n_low high_rate_flag = squewed_user_target_labels.label > 0 if len(valued_target_idxs) >= n_total: idxs = np.random.permutation(len(valued_target_idxs))[:n_total] return valued_target_idxs[idxs] query = ~squewed_user_target_labels.index.isin(valued_target_idxs) query &= high_rate_flag n_rest = n_total - len(valued_target_idxs) if n_rest == 1: hight = squewed_user_target_labels[query].sample(n_rest).index.values return np.concatenate([valued_target_idxs, hight]) m_n_high = int(n_rest * n_high / n_total) m_n_low = n_rest - m_n_high hight = squewed_user_target_labels[query].sample(m_n_high, replace=True).index.values low = squewed_user_target_labels[ squewed_user_target_labels.label == 0].sample(m_n_low, replace=True).index.values idxs = np.concatenate([valued_target_idxs, hight, low]) return idxs def get_target_ids_for_test_input(squewed_user_target_labels, n_high, n_low): # 全て返す return squewed_user_target_labels.index.values n_total = n_high + n_low high_rate_flag = squewed_user_target_labels.label > 0 if sum(high_rate_flag) < n_high: hight = squewed_user_target_labels[high_rate_flag].index.values n_low = n_total - sum(high_rate_flag) else: hight = squewed_user_target_labels[high_rate_flag].sample(n_high).index.values low = squewed_user_target_labels[ squewed_user_target_labels.label == 0].sample(n_low, replace=True).index.values idxs = np.concatenate([hight, low]) return idxs def get_target_ids_for_input(squewed_user_target_labels, valued_target_idxs, n_high, n_low, train=True): if train: return get_target_ids_for_train_input(squewed_user_target_labels, valued_target_idxs, n_high, n_low) else: return get_target_ids_for_test_input(squewed_user_target_labels, n_high, n_low) class OwnDataset(Dataset): def __init__(self, file_name, root_dir, n_high, n_low, subset=False, transform=None, train=True, split_seed=555): super().__init__() print("Train:", train) self.file_name = file_name self.root_dir = root_dir self.transform = transform self.n_high = n_high self.n_low = n_low self._train = train self.split_seed = split_seed self.prepare_data() self.user_features_orig = self.user_features def __len__(self): return len(self.user_and_target_ids) def reset(self): self.user_features = self.user_features_orig def prepare_data(self): data_path = Path(self.root_dir, self.file_name) eme_data = pd.read_csv(data_path) extracted_interacted_rows = extranct_interacted_user_rows(eme_data) unique_user_ids = extracted_interacted_rows.user_id.unique() train_user_ids, test_user_ids = train_test_split(unique_user_ids, random_state=self.split_seed, shuffle=True, test_size=0.2) if self._train: _data = eme_data[eme_data.user_id.isin(train_user_ids)] self.user_features = calculate_user_features(_data) self.user_and_target_ids = get_id_columns(_data) self.rewards = eme_data[["user_id", "target_user_id", "label"]] self.target_features_all = calculate_target_features(eme_data) # _data else: _data = eme_data[eme_data.user_id.isin(test_user_ids)] self.user_and_target_ids = get_id_columns(_data) self.user_features = calculate_user_features(_data) self.rewards = eme_data[["user_id", "target_user_id", "label"]] self.target_features_all = calculate_target_features(eme_data) print("user", self.user_features.shape) print("target", len(self.target_features_all.target_user_id.unique())) def __getitem__(self, idx): ids = self.user_and_target_ids.iloc[idx].values current_user_id = ids[0] user_feature = self.user_features[self.user_features.user_id == current_user_id] user_feature = user_feature.copy().drop("user_id", axis=1) user_feature = user_feature.astype(np.float32).values user_feature = user_feature.reshape(-1) query = (self.rewards.user_id == current_user_id) query &= (self.rewards.label == 1) valued_target_idxs = self.rewards[query].index.values # TODO: 後で名前変えたる squewed_user_target_labels =\ self.rewards.groupby("target_user_id").head(1) target_idxs = get_target_ids_for_input( squewed_user_target_labels, valued_target_idxs, self.n_high, self.n_low, self._train) target_features = self.target_features_all.loc[target_idxs].copy().reindex() target_ids = target_features.target_user_id.values target_features =\ target_features.copy().drop("target_user_id", axis=1) target_features = target_features.astype(np.float32).values eliminate_teacher = self.target_features_all.loc[valued_target_idxs].copy().reindex() eliminate_teacher_ids = eliminate_teacher.target_user_id.values eliminate_teacher_val = target_ids == eliminate_teacher_ids[0] for v in eliminate_teacher_ids[1:]: eliminate_teacher_val += target_ids == v eliminate_teacher_val = eliminate_teacher_val.astype(np.float32) return (torch.FloatTensor(user_feature), torch.FloatTensor(target_features), current_user_id, target_ids, eliminate_teacher_val) def get_reward(self, current_user_id, target_ids): query_user = self.rewards.user_id == current_user_id query_target = self.rewards.target_user_id.isin(target_ids) query = (query_user) & (query_target) reward = self.rewards[query].label.values if len(reward) == 0: return 0. else: return float(reward.max()) def loader(dataset, batch_size, shuffle=True): loader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, shuffle=shuffle, num_workers=0) return loader
39.176471
109
0.661354
1,156
8,658
4.544118
0.121972
0.044546
0.064725
0.087569
0.416905
0.364744
0.300781
0.284028
0.214163
0.189796
0
0.00477
0.249365
8,658
220
110
39.354545
0.803508
0.003465
0
0.127907
0
0
0.034154
0
0
0
0
0.004545
0
1
0.093023
false
0
0.040698
0.005814
0.25
0.017442
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cf09fa6ef4f4a3fb801b920743bebb3502eaa28b
2,320
py
Python
mp/plot_process.py
RawPikachu/valor
02e1eb0e599904d3f0c49b52534fcb6c3762951d
[ "MIT" ]
null
null
null
mp/plot_process.py
RawPikachu/valor
02e1eb0e599904d3f0c49b52534fcb6c3762951d
[ "MIT" ]
null
null
null
mp/plot_process.py
RawPikachu/valor
02e1eb0e599904d3f0c49b52534fcb6c3762951d
[ "MIT" ]
null
null
null
from sql import ValorSQL from util import guild_name_from_tag import matplotlib.pyplot as plt import matplotlib.dates as md from scipy.interpolate import make_interp_spline from matplotlib.ticker import MaxNLocator import numpy as np from datetime import datetime import time def plot_process(lock, opt, query): a = [] b = [] xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S') fig = plt.figure() fig.set_figwidth(20) fig.set_figheight(10) ax = plt.gca() ax.xaxis.set_major_formatter(xfmt) plt.xticks(rotation=25) data_pts = 0 for name in opt.guild: with lock: res = ValorSQL.execute_sync(query % name) if opt.split: b = np.array([x[2] for x in res]) a = np.array([x[1] for x in res]) if opt.moving_average > 1: a = np.convolve(a, np.ones(opt.moving_average)/opt.moving_average, mode="valid") b = b[:len(b)-opt.moving_average+1] if opt.smooth: spline = make_interp_spline(b, a) b = np.linspace(b.min(), b.max(), 500) a = spline(b) plt.plot([datetime.fromtimestamp(x) for x in b], a, label=name) plt.legend(loc="upper left") else: for i in range(len(res)): if i >= len(a): a.append(0) b.append(res[i][2]) a[i] += res[i][1] a = np.array(a) b = np.array(b) data_pts += len(res) content = "Plot" if opt.split: content = "Split graph" else: content =f"""``` Mean: {sum(a)/len(a):.7} Max: {max(a)} Min: {min(a)}```""" if opt.moving_average > 1: a = np.convolve(a, np.ones(opt.moving_average)/opt.moving_average, mode="valid") b = b[:len(b)-opt.moving_average+1] if opt.smooth: spline = make_interp_spline(b, a) b = np.linspace(b.min(), b.max(), 500) a = spline(b) plt.plot([datetime.fromtimestamp(x) for x in b], a) ax.xaxis.set_major_locator(MaxNLocator(30)) plt.title("Online Player Activity") plt.ylabel("Player Count") plt.xlabel("Date Y-m-d H:M:S") fig.savefig("/tmp/valor_guild_plot.png") return data_pts, content
25.494505
96
0.551293
337
2,320
3.706231
0.335312
0.057646
0.102482
0.054444
0.339472
0.339472
0.339472
0.32506
0.32506
0.32506
0
0.015645
0.311207
2,320
90
97
25.777778
0.765957
0
0
0.272727
0
0
0.080172
0.010776
0
0
0
0
0
1
0.015152
false
0
0.136364
0
0.166667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cf0d0e8e618571c35cece51daafa83cd4f90bede
47,627
py
Python
bot.py
SHIA1204/kyaru
c76a5df7c26fb30136ac473bd3f1ca90a2b65739
[ "Apache-2.0" ]
null
null
null
bot.py
SHIA1204/kyaru
c76a5df7c26fb30136ac473bd3f1ca90a2b65739
[ "Apache-2.0" ]
null
null
null
bot.py
SHIA1204/kyaru
c76a5df7c26fb30136ac473bd3f1ca90a2b65739
[ "Apache-2.0" ]
null
null
null
import os import shutil from os import system import discord import asyncio import os.path import linecache import datetime import urllib import requests from bs4 import BeautifulSoup from discord.utils import get from discord.ext import commands from discord.ext.commands import CommandNotFound import logging import itertools import sys import traceback import random import itertools import math from async_timeout import timeout from functools import partial import functools from youtube_dl import YoutubeDL import youtube_dl from io import StringIO import time import urllib.request from gtts import gTTS from urllib.request import URLError from urllib.request import HTTPError from urllib.request import urlopen from urllib.request import Request, urlopen from urllib.parse import quote import re import warnings import unicodedata import json from googleapiclient.discovery import build from googleapiclient.errors import HttpError from oauth2client.tools import argparser ##################### 로깅 ########################### log_stream = StringIO() logging.basicConfig(stream=log_stream, level=logging.WARNING) #ilsanglog = logging.getLogger('discord') #ilsanglog.setLevel(level = logging.WARNING) #handler = logging.StreamHandler() #handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s')) #ilsanglog.addHandler(handler) ##################################################### def init(): global command command = [] fc = [] command_inidata = open('command.ini', 'r', encoding = 'utf-8') command_inputData = command_inidata.readlines() ############## 뮤직봇 명령어 리스트 ##################### for i in range(len(command_inputData)): tmp_command = command_inputData[i][12:].rstrip('\n') fc = tmp_command.split(', ') command.append(fc) fc = [] del command[0] command_inidata.close() #print (command) init() #mp3 파일 생성함수(gTTS 이용, 남성목소리) async def MakeSound(saveSTR, filename): tts = gTTS(saveSTR, lang = 'ko') tts.save('./' + filename + '.wav') ''' try: encText = urllib.parse.quote(saveSTR) urllib.request.urlretrieve("https://clova.ai/proxy/voice/api/tts?text=" + encText + "%0A&voicefont=1&format=wav",filename + '.wav') except Exception as e: print (e) tts = gTTS(saveSTR, lang = 'ko') tts.save('./' + filename + '.wav') pass ''' #mp3 파일 재생함수 async def PlaySound(voiceclient, filename): source = discord.FFmpegPCMAudio(filename) try: voiceclient.play(source) except discord.errors.ClientException: while voiceclient.is_playing(): await asyncio.sleep(1) while voiceclient.is_playing(): await asyncio.sleep(1) voiceclient.stop() source.cleanup() # Silence useless bug reports messages youtube_dl.utils.bug_reports_message = lambda: '' class VoiceError(Exception): pass class YTDLError(Exception): pass class YTDLSource(discord.PCMVolumeTransformer): YTDL_OPTIONS = { 'format': 'bestaudio/best', 'extractaudio': True, 'audioformat': 'mp3', 'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s', 'restrictfilenames': True, 'noplaylist': False, 'nocheckcertificate': True, 'ignoreerrors': False, 'logtostderr': False, 'quiet': True, 'no_warnings': True, 'default_search': 'auto', 'source_address': '0.0.0.0', 'force-ipv4' : True, '-4': True } FFMPEG_OPTIONS = { 'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options': '-vn', } ytdl = youtube_dl.YoutubeDL(YTDL_OPTIONS) def __init__(self, ctx: commands.Context, source: discord.FFmpegPCMAudio, *, data: dict, volume: float = 0.5): super().__init__(source, volume) self.requester = ctx.author self.channel = ctx.channel self.data = data self.uploader = data.get('uploader') self.uploader_url = data.get('uploader_url') date = data.get('upload_date') self.upload_date = date[6:8] + '.' + date[4:6] + '.' + date[0:4] self.title = data.get('title') self.thumbnail = data.get('thumbnail') self.description = data.get('description') self.duration = self.parse_duration(int(data.get('duration'))) self.tags = data.get('tags') self.url = data.get('webpage_url') self.views = data.get('view_count') self.likes = data.get('like_count') self.dislikes = data.get('dislike_count') self.stream_url = data.get('url') def __str__(self): return '**{0.title}** by **{0.uploader}**'.format(self) @classmethod async def create_source(cls, bot, ctx: commands.Context, search: str, *, loop: asyncio.BaseEventLoop = None): loop = loop or asyncio.get_event_loop() if "http" not in search: partial = functools.partial(cls.ytdl.extract_info, f"ytsearch5:{search}", download=False, process=False) data = await loop.run_in_executor(None, partial) if data is None: raise YTDLError('Couldn\'t find anything that matches `{}`'.format(search)) emoji_list : list = ["1️⃣", "2️⃣", "3️⃣", "4️⃣", "5️⃣", "🚫"] song_list_str : str = "" cnt : int = 0 song_index : int = 0 for data_info in data["entries"]: cnt += 1 if 'title' not in data_info: data_info['title'] = f"{search} - 제목 정보 없음" song_list_str += f"`{cnt}.` [**{data_info['title']}**](https://www.youtube.com/watch?v={data_info['url']})\n" embed = discord.Embed(description= song_list_str) embed.set_footer(text=f"10초 안에 미선택시 취소됩니다.") song_list_message = await ctx.send(embed = embed) for emoji in emoji_list: await song_list_message.add_reaction(emoji) def reaction_check(reaction, user): return (reaction.message.id == song_list_message.id) and (user.id == ctx.author.id) and (str(reaction) in emoji_list) try: reaction, user = await bot.wait_for('reaction_add', check = reaction_check, timeout = 10) except asyncio.TimeoutError: reaction = "🚫" for emoji in emoji_list: await song_list_message.remove_reaction(emoji, bot.user) await song_list_message.delete(delay = 10) if str(reaction) == "1️⃣": song_index = 0 elif str(reaction) == "2️⃣": song_index = 1 elif str(reaction) == "3️⃣": song_index = 2 elif str(reaction) == "4️⃣": song_index = 3 elif str(reaction) == "5️⃣": song_index = 4 else: return False result_url = f"https://www.youtube.com/watch?v={data['entries'][song_index]['url']}" else: result_url = search webpage_url = result_url partial = functools.partial(cls.ytdl.extract_info, webpage_url, download=False) processed_info = await loop.run_in_executor(None, partial) if processed_info is None: raise YTDLError('Couldn\'t fetch `{}`'.format(webpage_url)) if 'entries' not in processed_info: info = processed_info else: info = None while info is None: try: info = processed_info['entries'].pop(0) except IndexError: raise YTDLError('Couldn\'t retrieve any matches for `{}`'.format(webpage_url)) return cls(ctx, discord.FFmpegPCMAudio(info['url'], **cls.FFMPEG_OPTIONS), data=info) @staticmethod def parse_duration(duration: int): return time.strftime('%H:%M:%S', time.gmtime(duration)) class Song: __slots__ = ('source', 'requester') def __init__(self, source: YTDLSource): self.source = source self.requester = source.requester def create_embed(self): embed = (discord.Embed(title='Now playing', description='**```fix\n{0.source.title}\n```**'.format(self), color=discord.Color.blurple()) .add_field(name='Duration', value=self.source.duration) .add_field(name='Requested by', value=self.requester.mention) .add_field(name='Uploader', value='[{0.source.uploader}]({0.source.uploader_url})'.format(self)) .add_field(name='URL', value='[Click]({0.source.url})'.format(self)) .set_thumbnail(url=self.source.thumbnail)) return embed class SongQueue(asyncio.Queue): def __getitem__(self, item): if isinstance(item, slice): return list(itertools.islice(self._queue, item.start, item.stop, item.step)) else: return self._queue[item] def __iter__(self): return self._queue.__iter__() def __len__(self): return self.qsize() def clear(self): self._queue.clear() def shuffle(self): random.shuffle(self._queue) def select(self, index : int, loop : bool = False): for i in range(index-1): if not loop: del self._queue[0] else: self._queue.append(self._queue[0]) del self._queue[0] def remove(self, index: int): del self._queue[index] class VoiceState: def __init__(self, bot: commands.Bot, ctx: commands.Context): self.bot = bot self._ctx = ctx self._cog = ctx.cog self.current = None self.voice = None self.next = asyncio.Event() self.songs = SongQueue() self._loop = False self._volume = 0.5 self.skip_votes = set() self.audio_player = bot.loop.create_task(self.audio_player_task()) def __del__(self): self.audio_player.cancel() @property def loop(self): return self._loop @loop.setter def loop(self, value: bool): self._loop = value @property def volume(self): return self._volume @volume.setter def volume(self, value: float): self._volume = value @property def is_playing(self): return self.voice and self.current async def audio_player_task(self): while True: self.next.clear() if self.loop and self.current is not None: source1 = await YTDLSource.create_source(self.bot, self._ctx, self.current.source.url, loop=self.bot.loop) song1 = Song(source1) await self.songs.put(song1) else: pass try: async with timeout(180): # 3 minutes self.current = await self.songs.get() except asyncio.TimeoutError: self.bot.loop.create_task(self.stop()) return self.current.source.volume = self._volume self.voice.play(self.current.source, after=self.play_next_song) play_info_msg = await self.current.source.channel.send(embed=self.current.create_embed()) # await play_info_msg.delete(delay = 20) await self.next.wait() def play_next_song(self, error=None): if error: raise VoiceError(str(error)) self.next.set() def skip(self): self.skip_votes.clear() if self.is_playing: self.voice.stop() async def stop(self): self.songs.clear() if self.voice: await self.voice.disconnect() self.voice = None self.bot.loop.create_task(self._cog.cleanup(self._ctx)) class Music(commands.Cog): def __init__(self, bot: commands.Bot): self.bot = bot self.voice_states = {} def get_voice_state(self, ctx: commands.Context): state = self.voice_states.get(ctx.guild.id) if not state: state = VoiceState(self.bot, ctx) self.voice_states[ctx.guild.id] = state return state def cog_unload(self): for state in self.voice_states.values(): self.bot.loop.create_task(state.stop()) def cog_check(self, ctx: commands.Context): if not ctx.guild: raise commands.NoPrivateMessage('This command can\'t be used in DM channels.') return True async def cog_before_invoke(self, ctx: commands.Context): ctx.voice_state = self.get_voice_state(ctx) async def cog_command_error(self, ctx: commands.Context, error: commands.CommandError): await ctx.send('에러 : {}'.format(str(error))) ''' @commands.command(name='join', invoke_without_subcommand=True) async def _join(self, ctx: commands.Context): destination = ctx.author.voice.channel if ctx.voice_state.voice: await ctx.voice_state.voice.move_to(destination) return ctx.voice_state.voice = await destination.connect() ''' async def cleanup(self, ctx: commands.Context): del self.voice_states[ctx.guild.id] @commands.command(name=command[0][0], aliases=command[0][1:]) #음성 채널 입장 #@commands.has_permissions(manage_guild=True) async def _summon(self, ctx: commands.Context, *, channel: discord.VoiceChannel = None): channel = ctx.message.author.voice.channel if not channel and not ctx.author.voice: raise VoiceError(':no_entry_sign: 현재 접속중인 음악채널이 없습니다.') destination = channel or ctx.author.voice.channel if ctx.voice_state.voice: await ctx.voice_state.voice.move_to(destination) return ctx.voice_state.voice = await destination.connect() @commands.command(name=command[1][0], aliases=command[1][1:]) #음성 채널 퇴장 #@commands.has_permissions(manage_guild=True) async def _leave(self, ctx: commands.Context): if not ctx.voice_state.voice: return await ctx.send(embed=discord.Embed(title=":no_entry_sign: 현재 접속중인 음악채널이 없습니다.",colour = 0x2EFEF7)) await ctx.voice_state.stop() del self.voice_states[ctx.guild.id] @commands.command(name=command[8][0], aliases=command[8][1:]) #볼륨 조절 async def _volume(self, ctx: commands.Context, *, volume: int): vc = ctx.voice_client if not ctx.voice_state.is_playing: return await ctx.send(embed=discord.Embed(title=":mute: 현재 재생중인 음악이 없습니다.",colour = 0x2EFEF7)) if not 0 < volume < 101: return await ctx.send(embed=discord.Embed(title=":no_entry_sign: 볼륨은 1 ~ 100 사이로 입력 해주세요.",colour = 0x2EFEF7)) if vc.source: vc.source.volume = volume / 100 ctx.voice_state.volume = volume / 100 await ctx.send(embed=discord.Embed(title=f":loud_sound: 볼륨을 {volume}%로 조정하였습니다.",colour = 0x2EFEF7)) @commands.command(name=command[7][0], aliases=command[7][1:]) #현재 재생 중인 목록 async def _now(self, ctx: commands.Context): await ctx.send(embed=ctx.voice_state.current.create_embed()) @commands.command(name=command[3][0], aliases=command[3][1:]) #음악 일시 정지 #@commands.has_permissions(manage_guild=True) async def _pause(self, ctx: commands.Context): if ctx.voice_state.is_playing and ctx.voice_state.voice.is_playing(): ctx.voice_state.voice.pause() await ctx.message.add_reaction('⏸') @commands.command(name=command[4][0], aliases=command[4][1:]) #음악 다시 재생 #@commands.has_permissions(manage_guild=True) async def _resume(self, ctx: commands.Context): if ctx.voice_state.is_playing and ctx.voice_state.voice.is_paused(): ctx.voice_state.voice.resume() await ctx.message.add_reaction('⏯') @commands.command(name=command[9][0], aliases=command[9][1:]) #음악 정지 #@commands.has_permissions(manage_guild=True) async def _stop(self, ctx: commands.Context): ctx.voice_state.songs.clear() if ctx.voice_state.is_playing: ctx.voice_state.voice.stop() await ctx.message.add_reaction('⏹') @commands.command(name=command[5][0], aliases=command[5][1:]) #현재 음악 스킵 async def _skip(self, ctx: commands.Context, *, args: int = 1): if not ctx.voice_state.is_playing: return await ctx.send(embed=discord.Embed(title=':mute: 현재 재생중인 음악이 없습니다.',colour = 0x2EFEF7)) await ctx.message.add_reaction('⏭') if args != 1: ctx.voice_state.songs.select(args, ctx.voice_state.loop) ctx.voice_state.skip() ''' voter = ctx.message.author if voter == ctx.voice_state.current.requester: await ctx.message.add_reaction('⏭') ctx.voice_state.skip() elif voter.id not in ctx.voice_state.skip_votes: ctx.voice_state.skip_votes.add(voter.id) total_votes = len(ctx.voice_state.skip_votes) if total_votes >= 3: await ctx.message.add_reaction('⏭') ctx.voice_state.skip() else: await ctx.send('Skip vote added, currently at **{}/3**'.format(total_votes)) else: await ctx.send('```이미 투표하셨습니다.```') ''' @commands.command(name=command[6][0], aliases=command[6][1:]) #재생 목록 async def _queue(self, ctx: commands.Context, *, page: int = 1): if len(ctx.voice_state.songs) == 0: return await ctx.send(embed=discord.Embed(title=':mute: 재생목록이 없습니다.',colour = 0x2EFEF7)) items_per_page = 10 pages = math.ceil(len(ctx.voice_state.songs) / items_per_page) start = (page - 1) * items_per_page end = start + items_per_page queue = '' for i, song in enumerate(ctx.voice_state.songs[start:end], start=start): queue += '`{0}.` [**{1.source.title}**]({1.source.url})\n'.format(i + 1, song) if ctx.voice_state.loop: embed = discord.Embed(title = '🔁 Now playing', description='**```fix\n{0.source.title}\n```**'.format(ctx.voice_state.current)) else: embed = discord.Embed(title = 'Now playing', description='**```fix\n{0.source.title}\n```**'.format(ctx.voice_state.current)) embed.add_field(name ='\u200B\n**{} tracks:**\n'.format(len(ctx.voice_state.songs)), value = f"\u200B\n{queue}") embed.set_thumbnail(url=ctx.voice_state.current.source.thumbnail) embed.set_footer(text='Viewing page {}/{}'.format(page, pages)) await ctx.send(embed=embed) @commands.command(name=command[11][0], aliases=command[11][1:]) #음악 셔플 async def _shuffle(self, ctx: commands.Context): if len(ctx.voice_state.songs) == 0: return await ctx.send(embed=discord.Embed(title=':mute: 재생목록이 없습니다.',colour = 0x2EFEF7)) ctx.voice_state.songs.shuffle() result = await ctx.send(embed=discord.Embed(title=':twisted_rightwards_arrows: 셔플 완료!',colour = 0x2EFEF7)) await result.add_reaction('🔀') @commands.command(name=command[10][0], aliases=command[10][1:]) #음악 삭제 async def _remove(self, ctx: commands.Context, index: int): if len(ctx.voice_state.songs) == 0: return ctx.send(embed=discord.Embed(title=':mute: 재생목록이 없습니다.',colour = 0x2EFEF7)) # remove_result = '`{0}.` [**{1.source.title}**] 삭제 완료!\n'.format(index, ctx.voice_state.songs[index - 1]) result = await ctx.send(embed=discord.Embed(title='`{0}.` [**{1.source.title}**] 삭제 완료!\n'.format(index, ctx.voice_state.songs[index - 1]),colour = 0x2EFEF7)) ctx.voice_state.songs.remove(index - 1) await result.add_reaction('✅') @commands.command(name=command[14][0], aliases=command[14][1:]) #음악 반복 async def _loop(self, ctx: commands.Context): if not ctx.voice_state.is_playing: return await ctx.send(embed=discord.Embed(title=':mute: 현재 재생중인 음악이 없습니다.',colour = 0x2EFEF7)) # Inverse boolean value to loop and unloop. ctx.voice_state.loop = not ctx.voice_state.loop if ctx.voice_state.loop : result = await ctx.send(embed=discord.Embed(title=':repeat: 반복재생이 설정되었습니다!',colour = 0x2EFEF7)) else: result = await ctx.send(embed=discord.Embed(title=':repeat_one: 반복재생이 취소되었습니다!',colour = 0x2EFEF7)) await result.add_reaction('🔁') @commands.command(name=command[2][0], aliases=command[2][1:]) #음악 재생 async def _play(self, ctx: commands.Context, *, search: str): if not ctx.voice_state.voice: await ctx.invoke(self._summon) async with ctx.typing(): try: source = await YTDLSource.create_source(self.bot, ctx, search, loop=self.bot.loop) if not source: return await ctx.send(f"노래 재생/예약이 취소 되었습니다.") except YTDLError as e: await ctx.send('에러가 발생했습니다 : {}'.format(str(e))) else: song = Song(source) await ctx.channel.purge(limit=1) await ctx.voice_state.songs.put(song) await ctx.send(embed=discord.Embed(title=f':musical_note: 재생목록 추가 : {str(source)}',colour = 0x2EFEF7)) # @commands.command(name=command[13][0], aliases=command[13][1:]) #지우기 # async def clear_channel_(self, ctx: commands.Context, *, msg: int = 1): # try: # msg = int(msg) # except: # await ctx.send(f"```지우고 싶은 줄수는 [숫자]로 입력해주세요!```") # await ctx.channel.purge(limit = msg) @_summon.before_invoke @_play.before_invoke async def ensure_voice_state(self, ctx: commands.Context): if not ctx.author.voice or not ctx.author.voice.channel: raise commands.CommandError('음성채널에 접속 후 사용해주십시오.') if ctx.voice_client: if ctx.voice_client.channel != ctx.author.voice.channel: raise commands.CommandError('봇이 이미 음성채널에 접속해 있습니다.') # @commands.command(name=command[12][0], aliases=command[12][1:]) #도움말 # async def menu_(self, ctx): # command_list = '' # command_list += '!인중 : 봇상태가 안좋을 때 쓰세요!' #! # command_list += ','.join(command[0]) + '\n' #!들어가자 # command_list += ','.join(command[1]) + '\n' #!나가자 # command_list += ','.join(command[2]) + ' [검색어] or [url]\n' #!재생 # command_list += ','.join(command[3]) + '\n' #!일시정지 # command_list += ','.join(command[4]) + '\n' #!다시재생 # command_list += ','.join(command[5]) + ' (숫자)\n' #!스킵 # command_list += ','.join(command[6]) + ' 혹은 [명령어] + [숫자]\n' #!목록 # command_list += ','.join(command[7]) + '\n' #!현재재생 # command_list += ','.join(command[8]) + ' [숫자 1~100]\n' #!볼륨 # command_list += ','.join(command[9]) + '\n' #!정지 # command_list += ','.join(command[10]) + '\n' #!삭제 # command_list += ','.join(command[11]) + '\n' #!섞기 # command_list += ','.join(command[14]) + '\n' #! # command_list += ','.join(command[13]) + ' [숫자]\n' #!경주 # embed = discord.Embed( # title = "----- 명령어 -----", # description= '```' + command_list + '```', # color=0xff00ff # ) # await ctx.send( embed=embed, tts=False) ################ 음성파일 생성 후 재생 ################ @commands.command(name="==인중") async def playText_(self, ctx): #msg = ctx.message.content[len(ctx.invoked_with)+1:] #sayMessage = msg await MakeSound('뮤직봇이 많이 아파요. 잠시 후 사용해주세요.', './say' + str(ctx.guild.id)) await ctx.send("```뮤직봇이 많이 아파요. 잠시 후 사용해주세요.```", tts=False) if not ctx.voice_state.voice: await ctx.invoke(self._summon) if ctx.voice_state.is_playing: ctx.voice_state.voice.stop() await PlaySound(ctx.voice_state.voice, './say' + str(ctx.guild.id) + '.wav') await ctx.voice_state.stop() del self.voice_states[ctx.guild.id] #client = commands.Bot(command_prefix='==', help_command = None) client = commands.Bot('', help_command = None) client.add_cog(Music(client)) access_client_id = os.environ["client_id"] access_client_secret = os.environ["client_secret"] client_id = access_client_id client_secret = access_client_secret def create_soup(url, headers): res = requests.get(url, headers=headers) res.raise_for_status() soup = BeautifulSoup(res.text, 'lxml') return soup @client.event async def on_ready(): print(f'로그인 성공: {client.user.name}!') game = discord.Game("==명령어") await client.change_presence(status=discord.Status.online, activity=game) @client.event async def on_command_error(ctx, error): if isinstance(error, CommandNotFound): return elif isinstance(error, discord.ext.commands.MissingRequiredArgument): return raise error @client.command(pass_context = True, aliases=['==명령어']) async def cmd_cmd_abc(ctx): await ctx.channel.purge(limit=1) emoji_list : list = ["🅰️", "1️⃣", "2️⃣", "3️⃣", "🚫"] embed = discord.Embed(title = "캬루봇 명령어 목록", colour = 0x30e08b) embed.add_field(name = ':a: 전체', value = '전체 명령어 보기', inline = False) embed.add_field(name = ':one: 일반', value = '일반 명령어 보기', inline = False) embed.add_field(name = ':two: TruckersMP', value = 'TruckersMP 관련 명령어 보기', inline = False) embed.add_field(name = ':three: 음악', value = '음악 재생 관련 명령어 보기', inline = False) embed.add_field(name = ':no_entry_sign: 취소', value = '실행 취소', inline = False) cmd_message = await ctx.send(embed = embed) for emoji in emoji_list: await cmd_message.add_reaction(emoji) def reaction_check(reaction, user): return (reaction.message.id == cmd_message.id) and (user.id == ctx.author.id) and (str(reaction) in emoji_list) try: reaction, user = await client.wait_for('reaction_add', check = reaction_check, timeout = 10) except asyncio.TimeoutError: reaction = "🚫" for emoji in emoji_list: # await cmd_message.remove_reaction(emoji, client.user) await cmd_message.delete(delay = 0) await cmd_message.delete(delay = 10) if str(reaction) == "1️⃣": embed1 = discord.Embed(title = "캬루봇 명령어 목록 [일반 명령어]", colour = 0x30e08b) embed1.add_field(name = '==지우기 <숫자>', value = '최근 1~99개의 메세지를 삭제합니다.', inline = False) embed1.add_field(name = '==내정보', value = '자신의 디스코드 정보를 보여줍니다.', inline = False) embed1.add_field(name = '==실검', value = '네이버의 급상승 검색어 TOP10을 보여줍니다.', inline = False) embed1.add_field(name = '==날씨 <지역>', value = '<지역>의 날씨를 알려줍니다.', inline = False) embed1.add_field(name = '==말해 <text>', value = '<text>를 말합니다.', inline = False) embed1.add_field(name = '==번역 <언어> <text>', value = '<text>를 번역합니다.', inline = False) embed1.add_field(name = '==유튜브 <text>', value = '유튜브에서 <text>를 검색합니다.', inline = False) embed1.set_footer(text = 'Service provided by RyuZU', icon_url="https://cdn.discordapp.com/attachments/740877681209507880/755440825667813497/20200817_184231.jpg") await ctx.channel.send(embed = embed1) elif str(reaction) == "2️⃣": embed2 = discord.Embed(title = "캬루봇 명령어 목록 [TruckersMP]", colour = 0x30e08b) embed2.add_field(name = '==T정보, ==ts', value = 'TruckersMP의 정보를 보여줍니다.', inline = False) embed2.add_field(name = '==T프로필 <TMPID>, ==tp', value = '해당 TMPID 아이디를 가진 사람의 프로필을 보여줍니다.', inline = False) embed2.add_field(name = '==T트래픽순위, ==ttr', value = 'TruckersMP의 트래픽 순위 TOP5를 보여줍니다.', inline = False) embed2.set_footer(text = 'Service provided by RyuZU', icon_url="https://cdn.discordapp.com/attachments/740877681209507880/755440825667813497/20200817_184231.jpg") await ctx.channel.send(embed = embed2) elif str(reaction) == "3️⃣": embed3 = discord.Embed(title = "캬루봇 명령어 목록 [음악 재생]", colour = 0x30e08b) embed3.add_field(name = '==들어와', value = '봇이 음성 통화방에 들어옵니다.', inline = False) embed3.add_field(name = '==나가', value = '봇이 음성 통화방에서 나갑니다.', inline = False) embed3.add_field(name = '==재생', value = '봇이 음악을 재생합니다.', inline = False) embed3.add_field(name = '==일시정지', value = '현재 재생 중인 음악을 일시 정지합니다.', inline = False) embed3.add_field(name = '==다시재생', value = '일시 정지한 음악을 다시 재생합니다.', inline = False) embed3.add_field(name = '==스킵', value = '현재 재생 중인 음악을 스킵합니다.', inline = False) embed3.add_field(name = '==목록', value = '재생 목록을 보여줍니다.', inline = False) embed3.add_field(name = '==현재재생', value = '현재 재생 중인 음악을 보여줍니다.', inline = False) embed3.add_field(name = '==볼륨', value = '봇의 볼륨을 조절합니다.', inline = False) embed3.add_field(name = '==정지', value = '현재 재생 중인 음악을 정지합니다.', inline = False) embed3.add_field(name = '==삭제 <트랙 번호>', value = '재생 목록에 있는 특정 음악을 삭제합니다.', inline = False) embed3.add_field(name = '==섞기', value = '재생 목록을 섞습니다.', inline = False) embed3.add_field(name = '==반복', value = '현재 재생 중인 음악을 반복 재생합니다.', inline = False) embed3.set_footer(text = 'Service provided by RyuZU', icon_url="https://cdn.discordapp.com/attachments/740877681209507880/755440825667813497/20200817_184231.jpg") await ctx.channel.send(embed = embed3) elif str(reaction) == "🅰️": embed6 = discord.Embed(title = "캬루봇 명령어 목록 [전체 명령어]", colour = 0x30e08b) embed6.add_field(name = '==지우기 <숫자>', value = '최근 1~99개의 메세지를 삭제합니다.', inline = False) embed6.add_field(name = '==내정보', value = '자신의 디스코드 정보를 보여줍니다.', inline = False) embed6.add_field(name = '==실검', value = '네이버의 급상승 검색어 TOP10을 보여줍니다.', inline = False) embed6.add_field(name = '==날씨 <지역>', value = '<지역>의 날씨를 알려줍니다.', inline = False) embed6.add_field(name = '==말해 <내용>', value = '<내용>을 말합니다.', inline = False) embed6.add_field(name = '==번역 <언어> <내용>', value = '<내용>을 번역합니다.', inline = False) embed6.add_field(name = '==유튜브 <text>', value = '유튜브에서 <text>를 검색합니다.', inline = False) embed6.add_field(name = '==T정보, ==ts', value = 'TruckersMP의 서버 정보를 보여줍니다.', inline = False) embed6.add_field(name = '==T프로필 <TMPID>, ==tp', value = '해당 TMPID 아이디를 가진 사람의 프로필을 보여줍니다.', inline = False) embed6.add_field(name = '==T트래픽순위, ==ttr', value = 'TruckersMP의 트래픽 순위 TOP5를 보여줍니다.', inline = False) embed6.add_field(name = '==들어와', value = '봇이 음성 통화방에 들어옵니다.', inline = False) embed6.add_field(name = '==나가', value = '봇이 음성 통화방에서 나갑니다.', inline = False) embed6.add_field(name = '==재생', value = '봇이 음악을 재생합니다.', inline = False) embed6.add_field(name = '==일시정지', value = '현재 재생 중인 음악을 일시 정지합니다.', inline = False) embed6.add_field(name = '==다시재생', value = '일시 정지한 음악을 다시 재생합니다.', inline = False) embed6.add_field(name = '==스킵', value = '현재 재생 중인 음악을 스킵합니다.', inline = False) embed6.add_field(name = '==목록', value = '재생 목록을 보여줍니다.', inline = False) embed6.add_field(name = '==현재재생', value = '현재 재생 중인 음악을 보여줍니다.', inline = False) embed6.add_field(name = '==볼륨', value = '봇의 볼륨을 조절합니다.', inline = False) embed6.add_field(name = '==정지', value = '현재 재생 중인 음악을 정지합니다.', inline = False) embed6.add_field(name = '==삭제 <트랙 번호>', value = '재생 목록에 있는 특정 음악을 삭제합니다.', inline = False) embed6.add_field(name = '==섞기', value = '재생 목록을 섞습니다.', inline = False) embed6.add_field(name = '==반복', value = '현재 재생 중인 음악을 반복 재생합니다.', inline = False) embed6.set_footer(text = 'Service provided by RyuZU', icon_url="https://cdn.discordapp.com/attachments/740877681209507880/755440825667813497/20200817_184231.jpg") await ctx.channel.send(embed = embed6) elif str(reaction) == "🚫": await cmd_message.delete(delay = 0) else: return False @client.command(pass_context = True, aliases=['==지우기']) @commands.has_permissions(administrator=True) async def claer_clear_abc(ctx, amount): amount = int(amount) if amount < 100: await ctx.channel.purge(limit=amount) embed = discord.Embed(title=f":put_litter_in_its_place: {amount}개의 채팅을 삭제했어요.",colour = 0x2EFEF7) embed.set_footer(text = 'Service provided by RyuZU') await ctx.channel.send(embed=embed) else: await ctx.channel.purge(limit=1) await ctx.channel.send(embed=discord.Embed(title=f":no_entry_sign: 숫자를 99 이하로 입력해 주세요.",colour = 0x2EFEF7)) embed = discord.Embed(title=f":put_litter_in_its_place: {amount}개의 채팅을 삭제했어요.",colour = 0x2EFEF7) embed.set_footer(text = 'Service provided by RyuZU', icon_url="https://cdn.discordapp.com/attachments/740877681209507880/755440825667813497/20200817_184231.jpg") await ctx.channel.send(embed=embed) @client.command(aliases=['==핑']) async def ping_ping_abc(ctx): await ctx.channel.send('퐁! `{}ms`'.format(round(client.latency * 1000))) @client.command(pass_context = True, aliases=['==내정보']) async def my_my_abc_profile(ctx): date = datetime.datetime.utcfromtimestamp(((int(ctx.author.id) >> 22) + 1420070400000) / 1000) embed = discord.Embed(title = ctx.author.display_name + "님의 정보", colour = 0x2EFEF7) embed.add_field(name = '사용자명', value = ctx.author.name, inline = False) embed.add_field(name = '가입일', value = str(date.year) + "년" + str(date.month) + "월" + str(date.day) + "일", inline = False) embed.add_field(name = '아이디', value = ctx.author.id, inline = False) embed.set_thumbnail(url = ctx.author.avatar_url) embed.set_footer(text = 'Service provided by RyuZU', icon_url="https://cdn.discordapp.com/attachments/740877681209507880/755440825667813497/20200817_184231.jpg") await ctx.channel.send(embed = embed) @client.command(pass_context = True, aliases=['==카페']) async def cafe_cafe_abc(ctx): embed = discord.Embed(title = "KCTG 공식 카페", colour = 0x2EFEF7) embed.add_field(name = 'https://cafe.naver.com/kctgofficial', value = "\n\u200b", inline = False) embed.set_thumbnail(url = "https://cdn.discordapp.com/attachments/740877681209507880/744451389396353106/KCTG_Wolf_1.png") embed.set_footer(text = 'Service provided by RyuZU', icon_url="https://cdn.discordapp.com/attachments/740877681209507880/755440825667813497/20200817_184231.jpg") await ctx.channel.send(embed = embed) @client.command(pass_context = True, aliases=['==실검']) async def search_search_abc_rank(ctx): headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Whale/2.8.105.22 Safari/537.36'} url = "https://datalab.naver.com/keyword/realtimeList.naver?where=main" soup = create_soup(url, headers) rank_list = soup.find("ul", attrs={"class":"ranking_list"}) one = rank_list.find_all("span", attrs={"class":"item_title"})[0].get_text().strip().replace("1", "") #순서대로 실검 1~10위 two = rank_list.find_all("span", attrs={"class":"item_title"})[1].get_text().strip().replace("2", "") three = rank_list.find_all("span", attrs={"class":"item_title"})[2].get_text().strip().replace("3", "") four = rank_list.find_all("span", attrs={"class":"item_title"})[3].get_text().strip().replace("4", "") five = rank_list.find_all("span", attrs={"class":"item_title"})[4].get_text().strip().replace("5", "") six = rank_list.find_all("span", attrs={"class":"item_title"})[5].get_text().strip().replace("6", "") seven = rank_list.find_all("span", attrs={"class":"item_title"})[6].get_text().strip().replace("7", "") eight = rank_list.find_all("span", attrs={"class":"item_title"})[7].get_text().strip().replace("8", "") nine = rank_list.find_all("span", attrs={"class":"item_title"})[8].get_text().strip().replace("9", "") ten = rank_list.find_all("span", attrs={"class":"item_title"})[9].get_text().strip().replace("10", "") time = soup.find("span", attrs={"class":"time_txt _title_hms"}).get_text() #현재 시간 await ctx.channel.send(f'Ⅰ ``{one}``\nⅡ ``{two}``\nⅢ ``{three}``\nⅣ ``{four}``\nⅤ ``{five}``\nⅥ ``{six}``\nⅦ ``{seven}``\nⅧ ``{eight}``\nⅨ ``{nine}``\nⅩ ``{ten}``\n\n``Time[{time}]``') @client.command(pass_context = True, aliases=['==날씨']) async def weather_weather_abc(ctx, arg1): headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Whale/2.8.105.22 Safari/537.36'} url = f"https://search.naver.com/search.naver?sm=tab_hty.top&where=nexearch&query={arg1}+날씨&oquery=날씨&tqi=U1NQ%2FsprvmsssUNA1MVssssssPN-224813" soup = create_soup(url, headers) rotate = soup.find("span", attrs={"class":"btn_select"}).get_text() #지역 cast = soup.find("p", attrs={"class":"cast_txt"}).get_text() #맑음, 흐림 같은거 curr_temp = soup.find("p", attrs={"class":"info_temperature"}).get_text().replace("도씨", "") #현재 온도 sen_temp = soup.find("span", attrs={"class":"sensible"}).get_text().replace("체감온도", "체감") #체감 온도 min_temp = soup.find("span", attrs={"class":"min"}).get_text() #최저 온도 max_temp = soup.find("span", attrs={"class":"max"}).get_text() #최고 온도 # 오전, 오후 강수 확률 morning_rain_rate = soup.find("span", attrs={"class":"point_time morning"}).get_text().strip() #오전 afternoon_rain_rate = soup.find("span", attrs={"class":"point_time afternoon"}).get_text().strip() #오후 # 미세먼지, 초미세먼지 dust = soup.find("dl", attrs={"class":"indicator"}) pm10 = dust.find_all("dd")[0].get_text() #미세먼지 pm25 = dust.find_all("dd")[1].get_text() #초미세먼지 daylist = soup.find("ul", attrs={"class":"list_area _pageList"}) tomorrow = daylist.find_all("li")[1] #내일 온도 to_min_temp = tomorrow.find_all("span")[12].get_text() #최저 to_max_temp = tomorrow.find_all("span")[14].get_text() #최고 #내일 강수 to_morning_rain_rate = daylist.find_all("span", attrs={"class":"point_time morning"})[1].get_text().strip() #오전 to_afternoon_rain_rate = daylist.find_all("span", attrs={"class":"point_time afternoon"})[1].get_text().strip() #오후 await ctx.channel.send((rotate) + f'\n오늘의 날씨 ``' + (cast) + f'``\n__기온__ ``현재 {curr_temp}({sen_temp}) 최저 {min_temp} 최고 {max_temp}``\n__강수__ ``오전 {morning_rain_rate}`` ``오후 {afternoon_rain_rate}``\n__대기__ ``미세먼지 {pm10}`` ``초미세먼지 {pm25}``\n\n내일의 날씨\n__기온__ ``최저 {to_min_temp}˚`` ``최고 {to_max_temp}˚``\n__강수__ ``오전 {to_morning_rain_rate}`` ``오후 {to_afternoon_rain_rate}``') @client.command(pass_context = True, aliases=['==말해']) async def tell_tell_abc(ctx, *, arg): tell = str(arg) await ctx.channel.purge(limit=1) await ctx.channel.send(tell) @client.command(pass_context = True, aliases=['==T정보', '==TS', '==t정보', '==ts']) async def tmp_tmp_abc_server_status(ctx): headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Whale/2.8.105.22 Safari/537.36'} url = "https://stats.truckersmp.com/" soup = create_soup(url, headers) #현재 접속중인 플레이어 curr_status = soup.find("div", attrs={"class":"container-fluid"}) sim1 = curr_status.find_all("div", attrs={"class":"server-count"})[0].get_text().strip() sim2 = curr_status.find_all("div", attrs={"class":"server-count"})[1].get_text().strip() sim_us = curr_status.find_all("div", attrs={"class":"server-count"})[2].get_text().strip() sim_sgp = curr_status.find_all("div", attrs={"class":"server-count"})[3].get_text().strip() arc = curr_status.find_all("div", attrs={"class":"server-count"})[4].get_text().strip() pro = curr_status.find_all("div", attrs={"class":"server-count"})[5].get_text().strip() pro_arc = curr_status.find_all("div", attrs={"class":"server-count"})[6].get_text().strip() #서버 온오프 여부 sim1_sta = curr_status.find_all("div", attrs={"class":"server-status ONLINE"})[0].get_text().strip().replace("LINE", "") sim2_sta = curr_status.find_all("div", attrs={"class":"server-status ONLINE"})[1].get_text().strip().replace("LINE", "") sim_us_sta = curr_status.find_all("div", attrs={"class":"server-status ONLINE"})[2].get_text().strip().replace("LINE", "") sim_sgp_sta = curr_status.find_all("div", attrs={"class":"server-status ONLINE"})[3].get_text().strip().replace("LINE", "") arc_sta = curr_status.find_all("div", attrs={"class":"server-status ONLINE"})[4].get_text().strip().replace("LINE", "") pro_sta = curr_status.find_all("div", attrs={"class":"server-status ONLINE"})[5].get_text().strip().replace("LINE", "") pro_arc_sta = curr_status.find_all("div", attrs={"class":"server-status ONLINE"})[6].get_text().strip().replace("LINE", "") #서버 시간 curr_game_time = soup.find("span", attrs={"id":"game_time"}).get_text().strip() embed = discord.Embed(title = "[ETS2] TruckersMP 서버 현황", colour = 0x2EFEF7) embed.add_field(name = f'`[{sim1_sta}]` Simulation 1', value = f"{sim1}", inline = False) embed.add_field(name = f'`[{sim2_sta}]` Simulation 2', value = f"{sim2}", inline = False) embed.add_field(name = f'`[{sim_us_sta}]` [US] Simulation', value = f"{sim_us}", inline = False) embed.add_field(name = f'`[{sim_sgp_sta}]` [SGP] Simulation', value = f"{sim_sgp}", inline = False) embed.add_field(name = f'`[{arc_sta}]` Arcade', value = f"{arc}", inline = False) embed.add_field(name = f'`[{pro_sta}]` ProMods', value = f"{pro}", inline = False) embed.add_field(name = f'`[{pro_arc_sta}]` ProMods Arcade', value = f"{pro_arc}", inline = False) embed.set_footer(text=f"서버 시간: {curr_game_time}") await ctx.channel.send(embed = embed) @client.command(pass_context = True, aliases=['==T트래픽순위', '==TTR', '==t트래픽순위', '==ttr']) async def tmp_tmp_abc_traffic(ctx): headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Whale/2.8.105.22 Safari/537.36'} url = "https://traffic.krashnz.com/" soup = create_soup(url, headers) #실시간 트래픽 순위 traffic_top = soup.find("ul", attrs={"class":"list-group mb-3"}) rank1 = traffic_top.find_all("div")[1].get_text().strip() rank2 = traffic_top.find_all("div")[2].get_text().strip() rank3 = traffic_top.find_all("div")[3].get_text().strip() rank4 = traffic_top.find_all("div")[4].get_text().strip() rank5 = traffic_top.find_all("div")[5].get_text().strip() g_set = soup.find("div", attrs={"class":"row text-center mb-2"}) g_player = g_set.find_all("span", attrs={"class":"stats-number"})[0].get_text().strip() g_time = g_set.find_all("span", attrs={"class":"stats-number"})[1].get_text().strip() embed = discord.Embed(title = "[ETS2] TruckersMP 실시간 트래픽 TOP5", colour = 0x2EFEF7) embed.add_field(name = f'{rank1}', value = "\n\u200b", inline = False) embed.add_field(name = f'{rank2}', value = "\n\u200b", inline = False) embed.add_field(name = f'{rank3}', value = "\n\u200b", inline = False) embed.add_field(name = f'{rank4}', value = "\n\u200b", inline = False) embed.add_field(name = f'{rank5}', value = f"\n{g_player} players tracked / {g_time} in-game time", inline = False) await ctx.channel.send(embed = embed) @client.command(pass_context = True, aliases=['==T프로필', '==TP', '==t프로필', '==tp']) async def tmp_tmp_abc_user_profile(ctx, arg): headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Whale/2.8.105.22 Safari/537.36'} url = f"https://truckersmp.com/user/{arg}" soup = create_soup(url, headers) #플레이어 정보 user_status = soup.find("div", attrs={"class":"profile-bio"}) name = user_status.find_all("span")[0].get_text().strip() check = user_status.find_all("strong")[0].get_text() if check == "Also known as": steam = user_status.find_all("span")[3].get_text().strip().replace("Steam ID:", "") birt = user_status.find_all("span")[5].get_text().strip().replace("Member since:", "") bans = user_status.find_all("span")[6].get_text().strip().replace("Active bans:", "") else: steam = user_status.find_all("span")[2].get_text().strip().replace("Steam ID:", "") birt = user_status.find_all("span")[4].get_text().strip().replace("Member since:", "") bans = user_status.find_all("span")[5].get_text().strip().replace("Active bans:", "") vtc_check = soup.find_all("h2", attrs={"class":"panel-title heading-sm pull-left"})[2].get_text() if vtc_check == " VTC": vtc_find = soup.find_all("div", attrs={"class":"panel panel-profile"})[2] vtc_name = vtc_find.find("h5", attrs={"class":"text-center break-all"}).get_text().strip() else: vtc_name = "없음" #프로필 이미지 img = soup.find_all("div", attrs={"class": "col-md-3 md-margin-bottom-40"})[0] imgs = img.find("img", attrs={"class": "img-responsive profile-img margin-bottom-20 shadow-effect-1"}) prof_image = imgs.get("src") embed = discord.Embed(title = f"[TruckersMP] {arg}'s 프로필", colour = 0x2EFEF7) embed.add_field(name = 'Name', value = f"{name}", inline = False) embed.add_field(name = 'Steam ID', value = f"{steam}", inline = False) embed.add_field(name = 'Member since', value = f"{birt}", inline = False) embed.add_field(name = 'Active bans', value = f"{bans}", inline = False) embed.add_field(name = 'VTC', value = f"{vtc_name}", inline = False) embed.set_thumbnail(url=prof_image) await ctx.channel.send(embed = embed) @client.command(aliases=['==번역']) async def _translator_abc(ctx, arg, *, content): content = str(content) if arg[0] == '한': langso = "Korean" so = "ko" elif arg[0] == '영': langso = "English" so = "en" elif arg[0] == '일': langso = "Japanese" so = "ja" elif arg[0] == '중': langso = "Chinese" so = "zh-CN" else: pass if arg[1] == '한': langta = "Korean" ta = "ko" elif arg[1] == '영': langta = "English" ta = "en" elif arg[1] == '일': langta = "Japanese" ta = "ja" elif arg[1] == '중': langta = "Chinese" ta = "zh-CN" else: pass url = "https://openapi.naver.com/v1/papago/n2mt" #띄어쓰기 : split처리후 [1:]을 for문으로 붙인다. trsText = str(content) try: if len(trsText) == 1: await ctx.channel.send("단어 혹은 문장을 입력해주세요.") else: trsText = trsText[0:] combineword = "" for word in trsText: combineword += "" + word sourcetext = combineword.strip() combineword = quote(sourcetext) dataParmas = f"source={so}&target={ta}&text=" + combineword request = Request(url) request.add_header("X-Naver-Client-Id", client_id) request.add_header("X-Naver-Client-Secret", client_secret) response = urlopen(request, data=dataParmas.encode("utf-8")) responsedCode = response.getcode() if (responsedCode == 200): response_body = response.read() # response_body -> byte string : decode to utf-8 api_callResult = response_body.decode('utf-8') # JSON data will be printed as string type. So need to make it back to type JSON(like dictionary) api_callResult = json.loads(api_callResult) #번역 결과 translatedText = api_callResult['message']['result']["translatedText"] embed = discord.Embed(title=f"번역 ┃ {langso} → {langta}", description="", color=0x2e9fff) embed.add_field(name=f"{langso}", value=sourcetext, inline=False) embed.add_field(name=f"{langta}", value=translatedText, inline=False) embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/740877681209507880/755471340227526706/papago_og.png") embed.set_footer(text="Provided by Naver Open API", icon_url='https://cdn.discordapp.com/attachments/740877681209507880/755471340227526706/papago_og.png') await ctx.channel.send(embed=embed) else: await ctx.channel.send("Error Code : " + responsedCode) except HTTPError as e: await ctx.channel.send("번역 실패. HTTP에러 발생.") @client.command(pass_context = True, aliases=['==유튜브']) async def _youtube_abc_search(ctx, * , arg): arg_title = str(arg) arg = str(arg).replace(" ", "%20") DEVELOPER_KEY = os.environ["DEVELOPER_KEY"] YOUTUBE_API_SERVICE_NAME="youtube" YOUTUBE_API_VERSION="v3" youtube = build(YOUTUBE_API_SERVICE_NAME,YOUTUBE_API_VERSION,developerKey=DEVELOPER_KEY) search_response = youtube.search().list( q = f"{arg_title}", order = "relevance", part = "snippet", maxResults = 6 ).execute() thumbnail_img = search_response['items'][1]['snippet']['thumbnails']['high']['url'] title1 = search_response['items'][1]['snippet']['title'].replace('&quot;', '"').replace("&#39;", "'") title2 = search_response['items'][2]['snippet']['title'].replace('&quot;', '"').replace("&#39;", "'") title3 = search_response['items'][3]['snippet']['title'].replace('&quot;', '"').replace("&#39;", "'") title4 = search_response['items'][4]['snippet']['title'].replace('&quot;', '"').replace("&#39;", "'") title5 = search_response['items'][5]['snippet']['title'].replace('&quot;', '"').replace("&#39;", "'") link = "https://www.youtube.com/watch?v=" link1 = link + search_response['items'][1]['id']['videoId'] link2 = link + search_response['items'][2]['id']['videoId'] link3 = link + search_response['items'][3]['id']['videoId'] link4 = link + search_response['items'][4]['id']['videoId'] link5 = link + search_response['items'][5]['id']['videoId'] url = f"https://www.youtube.com/results?search_query={arg}" embed = discord.Embed(title = f":movie_camera: {arg_title} 검색 결과", colour = 0xb30e11) embed.set_author(name = '더보기', url = url) embed.add_field(name = "\n\u200b", value = f'**1. [{title1}]({link1})**', inline = False) embed.add_field(name = "\n\u200b", value = f'**2. [{title2}]({link2})**', inline = False) embed.add_field(name = "\n\u200b", value = f'**3. [{title3}]({link3})**', inline = False) embed.add_field(name = "\n\u200b", value = f'**4. [{title4}]({link4})**', inline = False) embed.add_field(name = "\n\u200b", value = f'**5. [{title5}]({link5})**\n\u200b', inline = False) embed.set_thumbnail(url=thumbnail_img) embed.set_footer(text='Provided by Youtube API') await ctx.channel.send(embed = embed) access_token = os.environ["BOT_TOKEN"] client.run(access_token)
43.140399
374
0.659752
6,782
47,627
4.506488
0.143763
0.021987
0.032981
0.018912
0.491608
0.43749
0.358538
0.331577
0.30478
0.259759
0
0.032297
0.161358
47,627
1,103
375
43.17951
0.730985
0.058538
0
0.154501
0
0.014599
0.21004
0.015797
0
0
0.005343
0
0
1
0.03528
false
0.019465
0.051095
0.010949
0.13747
0.001217
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cf0d1e9a030a45715261e6af31490bcb4b8a55b2
3,361
py
Python
tapioca_disqus/resource_mapping/forums.py
marctc/tapioca-disqus
a397d21ad1b6c69f7691015338e491d106e58da4
[ "MIT" ]
5
2015-08-13T10:21:13.000Z
2019-01-05T21:12:24.000Z
tapioca_disqus/resource_mapping/forums.py
marctc/tapioca-disqus
a397d21ad1b6c69f7691015338e491d106e58da4
[ "MIT" ]
3
2015-12-09T05:41:13.000Z
2016-03-25T17:34:45.000Z
tapioca_disqus/resource_mapping/forums.py
marctc/tapioca-disqus
a397d21ad1b6c69f7691015338e491d106e58da4
[ "MIT" ]
4
2015-12-09T05:41:18.000Z
2019-01-05T21:12:27.000Z
FORUMS_MAPPING = { 'forums_add_moderator': { 'resource': 'forums/addModerator.json', 'docs': 'https://disqus.com/api/docs/forums/addModerator/', 'methods': ['POST'], }, 'forums_create': { 'resource': 'forums/create.json', 'docs': 'https://disqus.com/api/docs/forums/create/', 'methods': ['POST'], }, 'forums_details': { 'resource': 'forums/details.json', 'docs': 'https://disqus.com/api/docs/forums/details/', 'methods': ['GET'], }, 'forums_fix_fav_icons_for_classified_forums': { 'resource': 'forums/fixFavIconsForClassifiedForums.json', 'docs': 'https://disqus.com/api/docs/forums/fixFavIconsForClassifiedForums/', 'methods': ['GET'], }, 'forums_follow': { 'resource': 'forums/follow.json', 'docs': 'https://disqus.com/api/docs/forums/follow/', 'methods': ['POST'], }, 'forums_generate_interesting_content': { 'resource': 'forums/generateInterestingContent.json', 'docs': 'https://disqus.com/api/docs/forums/generateInterestingContent/', 'methods': ['GET'], }, 'forums_interesting_forums': { 'resource': 'forums/interestingForums.json', 'docs': 'https://disqus.com/api/docs/forums/interestingForums/', 'methods': ['GET'], }, 'forums_list_categories': { 'resource': 'forums/listCategories.json', 'docs': 'https://disqus.com/api/docs/forums/listCategories/', 'methods': ['GET'], }, 'forums_list_followers': { 'resource': 'forums/listFollowers.json', 'docs': 'https://disqus.com/api/docs/forums/listFollowers/', 'methods': ['GET'], }, 'forums_list_moderators': { 'resource': 'forums/listModerators.json', 'docs': 'https://disqus.com/api/docs/forums/listModerators/', 'methods': ['GET'], }, 'forums_list_most_active_users': { 'resource': 'forums/listMostActiveUsers.json', 'docs': 'https://disqus.com/api/docs/forums/listMostActiveUsers/', 'methods': ['GET'], }, 'forums_list_most_liked_users': { 'resource': 'forums/listMostLikedUsers.json', 'docs': 'https://disqus.com/api/docs/forums/listMostLikedUsers/', 'methods': ['GET'], }, 'forums_list_posts': { 'resource': 'forums/listPosts.json', 'docs': 'https://disqus.com/api/docs/forums/listPosts/', 'methods': ['GET'], }, 'forums_list_threads': { 'resource': 'forums/listThreads.json', 'docs': 'https://disqus.com/api/docs/forums/listThreads/', 'methods': ['GET'], }, 'forums_list_users': { 'resource': 'forums/listUsers.json', 'docs': 'https://disqus.com/api/docs/forums/listUsers/', 'methods': ['GET'], }, 'forums_remove_moderator': { 'resource': 'forums/removeModerator.json', 'docs': 'https://disqus.com/api/docs/forums/removeModerator/', 'methods': ['POST'], }, 'forums_unfollow': { 'resource': 'forums/unfollow.json', 'docs': 'https://disqus.com/api/docs/forums/unfollow/', 'methods': ['POST'], }, 'forums_update': { 'resource': 'forums/update.json', 'docs': 'https://disqus.com/api/docs/forums/update/', 'methods': ['POST'], }, }
36.139785
85
0.580184
312
3,361
6.11859
0.163462
0.132006
0.122577
0.179151
0.35516
0.330016
0.330016
0.330016
0
0
0
0
0.220768
3,361
92
86
36.532609
0.728904
0
0
0.195652
0
0
0.63493
0.181494
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
cf0dd9eac4b7a622418f1535040a8fa0ae6f0b9d
50,168
py
Python
experiment_extra.py
Shihab-Shahriar/500-miles
49fc9c6d037521f454da4bc02cccd62117c0ac5f
[ "MIT" ]
null
null
null
experiment_extra.py
Shihab-Shahriar/500-miles
49fc9c6d037521f454da4bc02cccd62117c0ac5f
[ "MIT" ]
null
null
null
experiment_extra.py
Shihab-Shahriar/500-miles
49fc9c6d037521f454da4bc02cccd62117c0ac5f
[ "MIT" ]
1
2018-10-03T21:17:27.000Z
2018-10-03T21:17:27.000Z
from __future__ import division, print_function import pickle import pdb import os import time from sklearn.cross_validation import StratifiedKFold from sklearn import svm from sklearn import metrics import gensim import random from learners import SK_SVM,SK_KNN,SK_LDA from tuner import DE_Tune_ML from model import PaperData from utility import study from results import results_process import numpy as np #import wget import zipfile from sklearn import neighbors from sklearn.neighbors import KNeighborsClassifier from sklearn import metrics import threading from threading import Barrier import timeit import multiprocessing from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.lda import LDA from sklearn.decomposition import NMF, LatentDirichletAllocation from sklearn.neighbors import NearestNeighbors from sklearn.cluster import KMeans from sklearn.cluster import AffinityPropagation import collections from multiprocessing import Queue import pandas as pd def tune_learner(learner, train_X, train_Y, tune_X, tune_Y, goal, target_class=None): """ :param learner: :param train_X: :param train_Y: :param tune_X: :param tune_Y: :param goal: :param target_class: :return: """ if not target_class: target_class = goal clf = learner(train_X, train_Y, tune_X, tune_Y, goal) tuner = DE_Tune_ML(clf, clf.get_param(), goal, target_class) return tuner.Tune() def load_vec(d, data, use_pkl=False, file_name=None): if use_pkl: if os.path.isfile(file_name): with open(file_name, "rb") as my_pickle: return pickle.load(my_pickle) else: # print("call get_document_vec") return d.get_document_vec(data, file_name) def print_results(clfs): file_name = time.strftime(os.path.sep.join([".", "results", "%Y%m%d_%H:%M:%S.txt"])) file_name = os.path.sep.join(["20171103.txt"]) content = "" for each in clfs: content += each.confusion print(content) with open(file_name, "w") as f: f.write(content) results_process.reports(file_name) def get_acc(cm): out = [] for i in range(4): out.append(cm[i][i] / 400) return out @study def run_tuning_SVM(word2vec_src, repeats=1, fold=10, tuning=True): """ :param word2vec_src:str, path of word2vec model :param repeats:int, number of repeats :param fold: int,number of folds :param tuning: boolean, tuning or not. :return: None """ print("# word2vec:", word2vec_src) word2vec_model = gensim.models.Word2Vec.load(word2vec_src) data = PaperData(word2vec=word2vec_model) train_pd = load_vec(data, data.train_data, file_name=False) print(train_pd) test_pd = load_vec(data, data.test_data, file_name=False) learner = [SK_SVM][0] goal = {0: "PD", 1: "PF", 2: "PREC", 3: "ACC", 4: "F", 5: "G", 6: "Macro_F", 7: "Micro_F"}[6] print(goal) F = {} clfs = [] start = timeit.default_timer() for i in range(repeats): # repeat n times here kf = StratifiedKFold(train_pd.loc[:, "LinkTypeId"].values, fold, shuffle=True) for train_index, tune_index in kf: print(train_pd) print(train_index) train_data = train_pd.ix[train_index] print(train_data) tune_data = train_pd.ix[tune_index] train_X = train_data.loc[:, "Output"].values train_Y = train_data.loc[:, "LinkTypeId"].values tune_X = tune_data.loc[:, "Output"].values tune_Y = tune_data.loc[:, "LinkTypeId"].values test_X = test_pd.loc[:, "Output"].values test_Y = test_pd.loc[:, "LinkTypeId"].values params, evaluation = tune_learner(learner, train_X, train_Y, tune_X, tune_Y, goal) if tuning else ({}, 0) clf = learner(train_X, train_Y, test_X, test_Y, goal) F = clf.learn(F, **params) clfs.append(clf) stop = timeit.default_timer() print("Model training time: ", stop - start) print_results(clfs) @study def run_tuning_KNN(word2vec_src, repeats=1, fold=10, tuning=True): """ :param word2vec_src:str, path of word2vec model :param repeats:int, number of repeats :param fold: int,number of folds :param tuning: boolean, tuning or not. :return: None """ print("# word2vec:", word2vec_src) word2vec_model = gensim.models.Word2Vec.load(word2vec_src) data = PaperData(word2vec=word2vec_model) train_pd = load_vec(data, data.train_data, file_name=False) test_pd = load_vec(data, data.test_data, file_name=False) learner = [SK_KNN][0] goal = {0: "PD", 1: "PF", 2: "PREC", 3: "ACC", 4: "F", 5: "G", 6: "Macro_F", 7: "Micro_F"}[6] F = {} clfs = [] start = timeit.default_timer() for i in range(repeats): # repeat n times here kf = StratifiedKFold(train_pd.loc[:, "LinkTypeId"].values, fold, shuffle=True) for train_index, tune_index in kf: train_data = train_pd.ix[train_index] tune_data = train_pd.ix[tune_index] train_X = train_data.loc[:, "Output"].values train_Y = train_data.loc[:, "LinkTypeId"].values tune_X = tune_data.loc[:, "Output"].values tune_Y = tune_data.loc[:, "LinkTypeId"].values test_X = test_pd.loc[:, "Output"].values test_Y = test_pd.loc[:, "LinkTypeId"].values params, evaluation = tune_learner(learner, train_X, train_Y, tune_X, tune_Y, goal) if tuning else ({}, 0) clf = learner(train_X, train_Y, test_X, test_Y, goal) F = clf.learn(F, **params) clfs.append(clf) stop = timeit.default_timer() print("Model training time: ", stop - start) print_results(clfs) @study def run_tuning_LDA(word2vec_src, repeats=1, fold=10, tuning=True): """ :param word2vec_src:str, path of word2vec model :param repeats:int, number of repeats :param fold: int,number of folds :param tuning: boolean, tuning or not. :return: None """ print("# word2vec:", word2vec_src) word2vec_model = gensim.models.Word2Vec.load(word2vec_src) data = PaperData(word2vec=word2vec_model) train_pd = load_vec(data, data.train_data, file_name=False) test_pd = load_vec(data, data.test_data, file_name=False) learner = [SK_LDA][0] goal = {0: "PD", 1: "PF", 2: "PREC", 3: "ACC", 4: "F", 5: "G", 6: "Macro_F", 7: "Micro_F"}[6] F = {} clfs = [] for i in range(repeats): # repeat n times here kf = StratifiedKFold(train_pd.loc[:, "LinkTypeId"].values, fold, shuffle=True) for train_index, tune_index in kf: print(train_index) train_data = train_pd.ix[train_index] print(train_data) tune_data = train_pd.ix[tune_index] train_X = train_data.loc[:, "Output"].values train_Y = train_data.loc[:, "LinkTypeId"].values tune_X = tune_data.loc[:, "Output"].values tune_Y = tune_data.loc[:, "LinkTypeId"].values test_X = test_pd.loc[:, "Output"].values test_Y = test_pd.loc[:, "LinkTypeId"].values params, evaluation = tune_learner(learner, train_X, train_Y, tune_X, tune_Y, goal) if tuning else ({}, 0) clf = learner(train_X, train_Y, test_X, test_Y, goal) F = clf.learn(F, **params) clfs.append(clf) print_results(clfs) @study def run_SVM_baseline(word2vec_src): """ Run SVM+word embedding experiment ! This is the baseline method. :return:None """ # Create a subplot with 1 row and 2 columns print("# word2vec:", word2vec_src) clf = svm.SVC(kernel="rbf", gamma=0.005) word2vec_model = gensim.models.Word2Vec.load(word2vec_src) data = PaperData(word2vec=word2vec_model) train_pd = load_vec(data, data.train_data, use_pkl=False) test_pd = load_vec(data, data.test_data, use_pkl=False) train_X = train_pd.loc[:, "Output"].tolist() train_Y = train_pd.loc[:, "LinkTypeId"].tolist() test_X = test_pd.loc[:, "Output"].tolist() test_Y = test_pd.loc[:, "LinkTypeId"].tolist() start = timeit.default_timer() clf.fit(train_X, train_Y) stop = timeit.default_timer() predicted = clf.predict(test_X) print(metrics.classification_report(test_Y, predicted, labels=["1", "2", "3", "4"], digits=3)) cm=metrics.confusion_matrix(test_Y, predicted, labels=["1", "2", "3", "4"]) print("accuracy ", get_acc(cm)) print("Model training time: ", stop - start) @study def run_LDA(word2vec_src): """ Run LDA+word embedding experiment ! This is the baseline method. :return:None """ # Create a subplot with 1 row and 2 columns print("# word2vec:", word2vec_src) clf = LDA(solver='lsqr', shrinkage='auto') word2vec_model = gensim.models.Word2Vec.load(word2vec_src) data = PaperData(word2vec=word2vec_model) train_pd = load_vec(data, data.train_data, use_pkl=False) test_pd = load_vec(data, data.test_data, use_pkl=False) train_X = train_pd.loc[:, "Output"].tolist() train_Y = train_pd.loc[:, "LinkTypeId"].tolist() test_X = test_pd.loc[:, "Output"].tolist() test_Y = test_pd.loc[:, "LinkTypeId"].tolist() start = timeit.default_timer() clf.fit(train_X, train_Y) stop = timeit.default_timer() predicted = clf.predict(test_X) print(metrics.classification_report(test_Y, predicted, labels=["1", "2", "3", "4"], digits=3)) cm=metrics.confusion_matrix(test_Y, predicted, labels=["1", "2", "3", "4"]) print("accuracy ", get_acc(cm)) print("Model training time: ", stop - start) @study def run_LinearDiscriminantAnalysis(word2vec_src): """ Run LinearDiscriminantAnalysis+word embedding experiment ! This is the baseline method. :return:None """ # Create a subplot with 1 row and 2 columns def select_n_components(var_ratio, goal_var: float) -> int: # Set initial variance explained so far total_variance = 0.0 # Set initial number of features n_components = 0 # For the explained variance of each feature: for explained_variance in var_ratio: # Add the explained variance to the total total_variance += explained_variance # Add one to the number of components n_components += 1 # If we reach our goal level of explained variance if total_variance >= goal_var: # End the loop break # Return the number of components return n_components print("# word2vec:", word2vec_src) clf = LinearDiscriminantAnalysis(n_components=None) word2vec_model = gensim.models.Word2Vec.load(word2vec_src) data = PaperData(word2vec=word2vec_model) train_pd = load_vec(data, data.train_data, use_pkl=False) test_pd = load_vec(data, data.test_data, use_pkl=False) train_X = train_pd.loc[:, "Output"].tolist() train_Y = train_pd.loc[:, "LinkTypeId"].tolist() test_X = test_pd.loc[:, "Output"].tolist() test_Y = test_pd.loc[:, "LinkTypeId"].tolist() start = timeit.default_timer() clf.fit(train_X, train_Y) lda_var_ratios = clf.explained_variance_ratio_ n_com = select_n_components(lda_var_ratios, 0.99) clf = LinearDiscriminantAnalysis(n_components=n_com) clf.fit(train_X, train_Y) stop = timeit.default_timer() predicted = clf.predict(test_X) print(metrics.classification_report(test_Y, predicted, labels=["1", "2", "3", "4"], digits=3)) cm=metrics.confusion_matrix(test_Y, predicted, labels=["1", "2", "3", "4"]) print("accuracy ", get_acc(cm)) print("Model training time: ", stop - start) @study def run_KNN(word2vec_src): """ Run KNN+word embedding experiment ! This is the baseline method. :return:None """ # Create a subplot with 1 row and 2 columns print("# word2vec:", word2vec_src) clf = neighbors.KNeighborsClassifier(n_neighbors = 5) word2vec_model = gensim.models.Word2Vec.load(word2vec_src) data = PaperData(word2vec=word2vec_model) train_pd = load_vec(data, data.train_data, use_pkl=False) test_pd = load_vec(data, data.test_data, use_pkl=False) train_X = train_pd.loc[:, "Output"].tolist() train_Y = train_pd.loc[:, "LinkTypeId"].tolist() test_X = test_pd.loc[:, "Output"].tolist() test_Y = test_pd.loc[:, "LinkTypeId"].tolist() start = timeit.default_timer() clf.fit(train_X, train_Y) stop = timeit.default_timer() predicted = clf.predict(test_X) print(metrics.classification_report(test_Y, predicted, labels=["1", "2", "3", "4"], digits=3)) cm=metrics.confusion_matrix(test_Y, predicted, labels=["1", "2", "3", "4"]) print("accuracy ", get_acc(cm)) print("Model training time: ", stop - start) @study def run_RNN(word2vec_src): """ Run KNN+word embedding experiment ! This is the baseline method. :return:None """ # Create a subplot with 1 row and 2 columns print("# word2vec:", word2vec_src) clf = neighbors.RadiusNeighborsClassifier(radius=5.0) word2vec_model = gensim.models.Word2Vec.load(word2vec_src) data = PaperData(word2vec=word2vec_model) train_pd = load_vec(data, data.train_data, use_pkl=False) test_pd = load_vec(data, data.test_data, use_pkl=False) train_X = train_pd.loc[:, "Output"].tolist() train_Y = train_pd.loc[:, "LinkTypeId"].tolist() test_X = test_pd.loc[:, "Output"].tolist() test_Y = test_pd.loc[:, "LinkTypeId"].tolist() start = timeit.default_timer() clf.fit(train_X, train_Y) stop = timeit.default_timer() predicted = clf.predict(test_X) print(metrics.classification_report(test_Y, predicted, labels=["1", "2", "3", "4"], digits=3)) cm=metrics.confusion_matrix(test_Y, predicted, labels=["1", "2", "3", "4"]) print("accuracy ", get_acc(cm)) print("Model training time: ", stop - start) @study def run_SVM_KNN(word2vec_src): """ Run SVM->KNN+word embedding experiment ! This is the baseline method. :return:None """ classX1 = [] classX2 = [] classX3 = [] classX4 = [] classY1 = [] classY2 = [] classY3 = [] classY4 = [] classTX1 = [] classTX2 = [] classTX3 = [] classTX4 = [] classTY1 = [] classTY2 = [] classTY3 = [] classTY4 = [] predicted_F = [] finalY = [] # Create a subplot with 1 row and 2 columns print("# word2vec:", word2vec_src) clf = svm.SVC(kernel="rbf", gamma=0.005) word2vec_model = gensim.models.Word2Vec.load(word2vec_src) data = PaperData(word2vec=word2vec_model) train_pd = load_vec(data, data.train_data, use_pkl=False) test_pd = load_vec(data, data.test_data, use_pkl=False) train_X = train_pd.loc[:, "Output"].tolist() train_Y = train_pd.loc[:, "LinkTypeId"].tolist() test_X = test_pd.loc[:, "Output"].tolist() test_Y = test_pd.loc[:, "LinkTypeId"].tolist() start = timeit.default_timer() clf.fit(train_X, train_Y) predicted = clf.predict(train_X) # predicted = pd.DataFrame(predicted) # train_X = pd.DataFrame(train_X) # t = predicted.index[predicted.loc[1] == 1].tolist() # print(predicted.axes) # print(t) for i in range(len(predicted)): if predicted[i] == '1': classX1.append(train_X[i]) classY1.append(train_Y[i]) elif predicted[i] == '2': classX2.append(train_X[i]) classY2.append(train_Y[i]) elif predicted[i] == '3': classX3.append(train_X[i]) classY3.append(train_Y[i]) elif predicted[i] == '4': classX4.append(train_X[i]) classY4.append(train_Y[i]) clf2 = neighbors.KNeighborsClassifier(n_neighbors = 5) clf3 = neighbors.KNeighborsClassifier(n_neighbors = 5) clf4 = neighbors.KNeighborsClassifier(n_neighbors = 5) clf5 = neighbors.KNeighborsClassifier(n_neighbors = 5) clf2.fit(classX1,classY1) clf3.fit(classX2,classY2) clf4.fit(classX3,classY3) clf5.fit(classX4,classY4) stop = timeit.default_timer() predicted0 = clf.predict(test_X) for i in range(len(predicted0)): if predicted0[i] == '1': classTX1.append(test_X[i]) classTY1.append(test_Y[i]) elif predicted0[i] == '2': classTX2.append(test_X[i]) classTY2.append(test_Y[i]) elif predicted0[i] == '3': classTX3.append(test_X[i]) classTY3.append(test_Y[i]) elif predicted0[i] == '4': classTX4.append(test_X[i]) classTY4.append(test_Y[i]) predicted1 = clf2.predict(classTX1) predicted2 = clf3.predict(classTX2) predicted3 = clf4.predict(classTX3) predicted4 = clf5.predict(classTX4) finalY = np.append(classTY1, classTY2) finalY = np.append(finalY, classTY3) finalY = np.append(finalY, classTY4) predicted_F = np.append(predicted1, predicted2) predicted_F = np.append(predicted_F, predicted3) predicted_F = np.append(predicted_F, predicted4) print("+++++++++++++++++++Original Predcition Result+++++++++++++++++++++++++") print(metrics.classification_report(test_Y, predicted0, labels=["1", "2", "3", "4"], digits=3)) #print("print classification data") cm=metrics.confusion_matrix(test_Y, predicted0, labels=["1", "2", "3", "4"]) print("accuracy ", get_acc(cm)) print("+++++++++++++++++++2nd Layer 1st Prediction Model+++++++++++++++++++++++++") print(metrics.classification_report(classTY1, predicted1, labels=["1", "2", "3", "4"], digits=3)) #print("print classification data") cm=metrics.confusion_matrix(classTY1, predicted1, labels=["1", "2", "3", "4"]) print("+++++++++++++++++++2nd Layer 2nd Prediction Model+++++++++++++++++++++++++") print(metrics.classification_report(classTY2, predicted2, labels=["1", "2", "3", "4"], digits=3)) #print("print classification data") cm=metrics.confusion_matrix(classTY2, predicted2, labels=["1", "2", "3", "4"]) print("+++++++++++++++++++2nd Layer 3rd Prediction Model+++++++++++++++++++++++++") print(metrics.classification_report(classTY3, predicted3, labels=["1", "2", "3", "4"], digits=3)) #print("print classification data") cm=metrics.confusion_matrix(classTY3, predicted3, labels=["1", "2", "3", "4"]) print("+++++++++++++++++++2nd Layer 4th Prediction Model+++++++++++++++++++++++++") print(metrics.classification_report(classTY4, predicted4, labels=["1", "2", "3", "4"], digits=3)) #print("print classification data") cm=metrics.confusion_matrix(classTY4, predicted4, labels=["1", "2", "3", "4"]) print("+++++++++++++++++++combined result+++++++++++++++++++++++++") print(metrics.classification_report(finalY, predicted_F, labels=["1", "2", "3", "4"], digits=3)) cm=metrics.confusion_matrix(finalY, predicted_F, labels=["1", "2", "3", "4"]) print("accuracy ", get_acc(cm)) print("Model training time: ", stop - start) @study def run_SVM_KNN_thread(word2vec_src): """ Run SVM->KNN+word embedding experiment ! This is the baseline method. :return:None """ classX1 = [] classX2 = [] classX3 = [] classX4 = [] classY1 = [] classY2 = [] classY3 = [] classY4 = [] classTX1 = [] classTX2 = [] classTX3 = [] classTX4 = [] classTY1 = [] classTY2 = [] classTY3 = [] classTY4 = [] TrainingSamplesX = [] TrainingSamplesY = [] models = [] predicted_F = [] finalY = [] # Create a subplot with 1 row and 2 columns print("# word2vec:", word2vec_src) clf = svm.SVC(kernel="rbf", gamma=0.005) word2vec_model = gensim.models.Word2Vec.load(word2vec_src) data = PaperData(word2vec=word2vec_model) train_pd = load_vec(data, data.train_data, use_pkl=False) test_pd = load_vec(data, data.test_data, use_pkl=False) train_X = train_pd.loc[:, "Output"].tolist() train_Y = train_pd.loc[:, "LinkTypeId"].tolist() test_X = test_pd.loc[:, "Output"].tolist() test_Y = test_pd.loc[:, "LinkTypeId"].tolist() start0 = timeit.default_timer() clf.fit(train_X, train_Y) stop0 = timeit.default_timer() predicted = clf.predict(train_X) for i in range(len(predicted)): if predicted[i] == '1': classX1.append(train_X[i]) classY1.append(train_Y[i]) elif predicted[i] == '2': classX2.append(train_X[i]) classY2.append(train_Y[i]) elif predicted[i] == '3': classX3.append(train_X[i]) classY3.append(train_Y[i]) elif predicted[i] == '4': classX4.append(train_X[i]) classY4.append(train_Y[i]) TrainingSamplesX.append(classX1) TrainingSamplesY.append(classY1) TrainingSamplesX.append(classX2) TrainingSamplesY.append(classY2) TrainingSamplesX.append(classX3) TrainingSamplesY.append(classY3) TrainingSamplesX.append(classX4) TrainingSamplesY.append(classY4) clf2 = neighbors.KNeighborsClassifier(n_neighbors = 5) clf3 = neighbors.KNeighborsClassifier(n_neighbors = 5) clf4 = neighbors.KNeighborsClassifier(n_neighbors = 5) clf5 = neighbors.KNeighborsClassifier(n_neighbors = 5) models.append(clf2) models.append(clf3) models.append(clf4) models.append(clf5) start1 = timeit.default_timer() for i in range((len(TrainingSamplesX))): t = threading.Thread(target= models[i].fit, args = [TrainingSamplesX[i],TrainingSamplesY[i]]) threads.append(t) t.start() stop1 = timeit.default_timer() predicted0 = clf.predict(test_X) for i in range(len(predicted0)): if predicted0[i] == '1': classTX1.append(test_X[i]) classTY1.append(test_Y[i]) elif predicted0[i] == '2': classTX2.append(test_X[i]) classTY2.append(test_Y[i]) elif predicted0[i] == '3': classTX3.append(test_X[i]) classTY3.append(test_Y[i]) elif predicted0[i] == '4': classTX4.append(test_X[i]) classTY4.append(test_Y[i]) predicted1 = clf2.predict(classTX1) predicted2 = clf3.predict(classTX2) predicted3 = clf4.predict(classTX3) predicted4 = clf5.predict(classTX4) finalY = np.append(classTY1, classTY2) finalY = np.append(finalY, classTY3) finalY = np.append(finalY, classTY4) predicted_F = np.append(predicted1, predicted2) predicted_F = np.append(predicted_F, predicted3) predicted_F = np.append(predicted_F, predicted4) print("+++++++++++++++++++Original Predcition Result+++++++++++++++++++++++++") print(metrics.classification_report(test_Y, predicted0, labels=["1", "2", "3", "4"], digits=3)) #print("print classification data") cm=metrics.confusion_matrix(test_Y, predicted0, labels=["1", "2", "3", "4"]) print("accuracy ", get_acc(cm)) print("+++++++++++++++++++2nd Layer 1st Prediction Model+++++++++++++++++++++++++") print(metrics.classification_report(classTY1, predicted1, labels=["1", "2", "3", "4"], digits=3)) #print("print classification data") cm=metrics.confusion_matrix(classTY1, predicted1, labels=["1", "2", "3", "4"]) print("+++++++++++++++++++2nd Layer 2nd Prediction Model+++++++++++++++++++++++++") print(metrics.classification_report(classTY2, predicted2, labels=["1", "2", "3", "4"], digits=3)) #print("print classification data") cm=metrics.confusion_matrix(classTY2, predicted2, labels=["1", "2", "3", "4"]) print("+++++++++++++++++++2nd Layer 3rd Prediction Model+++++++++++++++++++++++++") print(metrics.classification_report(classTY3, predicted3, labels=["1", "2", "3", "4"], digits=3)) #print("print classification data") cm=metrics.confusion_matrix(classTY3, predicted3, labels=["1", "2", "3", "4"]) print("+++++++++++++++++++2nd Layer 4th Prediction Model+++++++++++++++++++++++++") print(metrics.classification_report(classTY4, predicted4, labels=["1", "2", "3", "4"], digits=3)) #print("print classification data") cm=metrics.confusion_matrix(classTY4, predicted4, labels=["1", "2", "3", "4"]) print("+++++++++++++++++++combined result+++++++++++++++++++++++++") print(metrics.classification_report(finalY, predicted_F, labels=["1", "2", "3", "4"], digits=3)) cm=metrics.confusion_matrix(finalY, predicted_F, labels=["1", "2", "3", "4"]) print("accuracy ", get_acc(cm)) print("1st Model training time: ", (stop0 - start0)) print("layer 2 Models training time: ", (stop1 - start1)) print("Total Model training time: ", (stop1 - start0)) @study def run_KNN_SVM(word2vec_src): """ Run KNN -> SVM+word embedding experiment ! This is the baseline method. :return:None """ classX1 = [] classX2 = [] classX3 = [] classX4 = [] classY1 = [] classY2 = [] classY3 = [] classY4 = [] classTX1 = [] classTX2 = [] classTX3 = [] classTX4 = [] classTY1 = [] classTY2 = [] classTY3 = [] classTY4 = [] TrainingSamplesX = [] TrainingSamplesY = [] models = [] predicted_F = [] finalY = [] # Create a subplot with 1 row and 2 columns print("# word2vec:", word2vec_src) clf = neighbors.KNeighborsClassifier(n_neighbors = 5) word2vec_model = gensim.models.Word2Vec.load(word2vec_src) data = PaperData(word2vec=word2vec_model) train_pd = load_vec(data, data.train_data, use_pkl=False) test_pd = load_vec(data, data.test_data, use_pkl=False) train_X = train_pd.loc[:, "Output"].tolist() train_Y = train_pd.loc[:, "LinkTypeId"].tolist() test_X = test_pd.loc[:, "Output"].tolist() test_Y = test_pd.loc[:, "LinkTypeId"].tolist() #print("before train") start0 = timeit.default_timer() clf.fit(train_X, train_Y) stop0 = timeit.default_timer() predicted = clf.predict(train_X) for i in range(len(predicted)): if predicted[i] == '1': classX1.append(train_X[i]) classY1.append(train_Y[i]) elif predicted[i] == '2': classX2.append(train_X[i]) classY2.append(train_Y[i]) elif predicted[i] == '3': classX3.append(train_X[i]) classY3.append(train_Y[i]) elif predicted[i] == '4': classX4.append(train_X[i]) classY4.append(train_Y[i]) TrainingSamplesX.append(classX1) TrainingSamplesY.append(classY1) TrainingSamplesX.append(classX2) TrainingSamplesY.append(classY2) TrainingSamplesX.append(classX3) TrainingSamplesY.append(classY3) TrainingSamplesX.append(classX4) TrainingSamplesY.append(classY4) clf2 = svm.SVC(kernel="rbf", gamma=0.005) clf3 = svm.SVC(kernel="rbf", gamma=0.005) clf4 = svm.SVC(kernel="rbf", gamma=0.005) clf5 = svm.SVC(kernel="rbf", gamma=0.005) models.append(clf2) models.append(clf3) models.append(clf4) models.append(clf5) start1 = timeit.default_timer() for i in range((len(TrainingSamplesX))): t = threading.Thread(target= models[i].fit, args = [TrainingSamplesX[i],TrainingSamplesY[i]]) threads.append(t) t.start() stop1 = timeit.default_timer() predicted0 = clf.predict(test_X) for i in range(len(predicted0)): if predicted0[i] == '1': classTX1.append(test_X[i]) classTY1.append(test_Y[i]) elif predicted0[i] == '2': classTX2.append(test_X[i]) classTY2.append(test_Y[i]) elif predicted0[i] == '3': classTX3.append(test_X[i]) classTY3.append(test_Y[i]) elif predicted0[i] == '4': classTX4.append(test_X[i]) classTY4.append(test_Y[i]) predicted1 = clf2.predict(classTX1) predicted2 = clf3.predict(classTX2) predicted3 = clf4.predict(classTX3) predicted4 = clf5.predict(classTX4) finalY = np.append(classTY1, classTY2) finalY = np.append(finalY, classTY3) finalY = np.append(finalY, classTY4) predicted_F = np.append(predicted1, predicted2) predicted_F = np.append(predicted_F, predicted3) predicted_F = np.append(predicted_F, predicted4) print("+++++++++++++++++++Original Predcition Result+++++++++++++++++++++++++") print(metrics.classification_report(test_Y, predicted0, labels=["1", "2", "3", "4"], digits=3)) #print("print classification data") cm=metrics.confusion_matrix(test_Y, predicted0, labels=["1", "2", "3", "4"]) print("accuracy ", get_acc(cm)) print("+++++++++++++++++++2nd Layer 1st Prediction Model+++++++++++++++++++++++++") print(metrics.classification_report(classTY1, predicted1, labels=["1", "2", "3", "4"], digits=3)) #print("print classification data") cm=metrics.confusion_matrix(classTY1, predicted1, labels=["1", "2", "3", "4"]) print("+++++++++++++++++++2nd Layer 2nd Prediction Model+++++++++++++++++++++++++") print(metrics.classification_report(classTY2, predicted2, labels=["1", "2", "3", "4"], digits=3)) #print("print classification data") cm=metrics.confusion_matrix(classTY2, predicted2, labels=["1", "2", "3", "4"]) print("+++++++++++++++++++2nd Layer 3rd Prediction Model+++++++++++++++++++++++++") print(metrics.classification_report(classTY3, predicted3, labels=["1", "2", "3", "4"], digits=3)) #print("print classification data") cm=metrics.confusion_matrix(classTY3, predicted3, labels=["1", "2", "3", "4"]) print("+++++++++++++++++++2nd Layer 4th Prediction Model+++++++++++++++++++++++++") print(metrics.classification_report(classTY4, predicted4, labels=["1", "2", "3", "4"], digits=3)) #print("print classification data") cm=metrics.confusion_matrix(classTY4, predicted4, labels=["1", "2", "3", "4"]) print("+++++++++++++++++++combined result+++++++++++++++++++++++++") print(metrics.classification_report(finalY, predicted_F, labels=["1", "2", "3", "4"], digits=3)) cm=metrics.confusion_matrix(finalY, predicted_F, labels=["1", "2", "3", "4"]) print("accuracy ", get_acc(cm)) print("1st Model training time: ", (stop0 - start0)) print("layer 2 Models training time: ", (stop1 - start1)) print("Total Model training time: ", (stop1 - start0)) @study def run_KNN_KNN(word2vec_src): """ Run KNN+word embedding experiment ! This is the baseline method. :return:None """ classX1 = [] classX2 = [] classX3 = [] classX4 = [] classY1 = [] classY2 = [] classY3 = [] classY4 = [] classTX1 = [] classTX2 = [] classTX3 = [] classTX4 = [] classTY1 = [] classTY2 = [] classTY3 = [] classTY4 = [] TrainingSamplesX = [] TrainingSamplesY = [] models = [] predicted_F = [] finalY = [] # Create a subplot with 1 row and 2 columns print("# word2vec:", word2vec_src) #clf = svm.SVC(kernel="rbf", gamma=0.005) clf = neighbors.KNeighborsClassifier(n_neighbors = 5) #clf = KMeans(n_clusters=4, init='k-means++', max_iter=100, n_init=1) word2vec_model = gensim.models.Word2Vec.load(word2vec_src) data = PaperData(word2vec=word2vec_model) train_pd = load_vec(data, data.train_data, use_pkl=False) test_pd = load_vec(data, data.test_data, use_pkl=False) train_X = train_pd.loc[:, "Output"].tolist() train_Y = train_pd.loc[:, "LinkTypeId"].tolist() test_X = test_pd.loc[:, "Output"].tolist() test_Y = test_pd.loc[:, "LinkTypeId"].tolist() #print("before train") start0 = timeit.default_timer() clf.fit(train_X, train_Y) stop0 = timeit.default_timer() predicted = clf.predict(train_X) for i in range(len(predicted)): if predicted[i] == '1': classX1.append(train_X[i]) classY1.append(train_Y[i]) elif predicted[i] == '2': classX2.append(train_X[i]) classY2.append(train_Y[i]) elif predicted[i] == '3': classX3.append(train_X[i]) classY3.append(train_Y[i]) elif predicted[i] == '4': classX4.append(train_X[i]) classY4.append(train_Y[i]) #print(classX1) TrainingSamplesX.append(classX1) TrainingSamplesY.append(classY1) TrainingSamplesX.append(classX2) TrainingSamplesY.append(classY2) TrainingSamplesX.append(classX3) TrainingSamplesY.append(classY3) TrainingSamplesX.append(classX4) TrainingSamplesY.append(classY4) clf2 = neighbors.KNeighborsClassifier(n_neighbors = 10) clf3 = neighbors.KNeighborsClassifier(n_neighbors = 10) clf4 = neighbors.KNeighborsClassifier(n_neighbors = 10) clf5 = neighbors.KNeighborsClassifier(n_neighbors = 10) models.append(clf2) models.append(clf3) models.append(clf4) models.append(clf5) start1 = timeit.default_timer() for i in range((len(TrainingSamplesX))): t = threading.Thread(target= models[i].fit, args = [TrainingSamplesX[i],TrainingSamplesY[i]]) threads.append(t) t.start() stop1 = timeit.default_timer() predicted0 = clf.predict(test_X) for i in range(len(predicted0)): if predicted0[i] == '1': classTX1.append(test_X[i]) classTY1.append(test_Y[i]) elif predicted0[i] == '2': classTX2.append(test_X[i]) classTY2.append(test_Y[i]) elif predicted0[i] == '3': classTX3.append(test_X[i]) classTY3.append(test_Y[i]) elif predicted0[i] == '4': classTX4.append(test_X[i]) classTY4.append(test_Y[i]) predicted1 = clf2.predict(classTX1) predicted2 = clf3.predict(classTX2) predicted3 = clf4.predict(classTX3) predicted4 = clf5.predict(classTX4) finalY = np.append(classTY1, classTY2) finalY = np.append(finalY, classTY3) finalY = np.append(finalY, classTY4) predicted_F = np.append(predicted1, predicted2) predicted_F = np.append(predicted_F, predicted3) predicted_F = np.append(predicted_F, predicted4) print("+++++++++++++++++++Original Predcition Result+++++++++++++++++++++++++") print(metrics.classification_report(test_Y, predicted0, labels=["1", "2", "3", "4"], digits=3)) #print("print classification data") cm=metrics.confusion_matrix(test_Y, predicted0, labels=["1", "2", "3", "4"]) print("accuracy ", get_acc(cm)) print("+++++++++++++++++++2nd Layer 1st Prediction Model+++++++++++++++++++++++++") print(metrics.classification_report(classTY1, predicted1, labels=["1", "2", "3", "4"], digits=3)) #print("print classification data") cm=metrics.confusion_matrix(classTY1, predicted1, labels=["1", "2", "3", "4"]) print("+++++++++++++++++++2nd Layer 2nd Prediction Model+++++++++++++++++++++++++") print(metrics.classification_report(classTY2, predicted2, labels=["1", "2", "3", "4"], digits=3)) #print("print classification data") cm=metrics.confusion_matrix(classTY2, predicted2, labels=["1", "2", "3", "4"]) print("+++++++++++++++++++2nd Layer 3rd Prediction Model+++++++++++++++++++++++++") print(metrics.classification_report(classTY3, predicted3, labels=["1", "2", "3", "4"], digits=3)) #print("print classification data") cm=metrics.confusion_matrix(classTY3, predicted3, labels=["1", "2", "3", "4"]) print("+++++++++++++++++++2nd Layer 4th Prediction Model+++++++++++++++++++++++++") print(metrics.classification_report(classTY4, predicted4, labels=["1", "2", "3", "4"], digits=3)) #print("print classification data") cm=metrics.confusion_matrix(classTY4, predicted4, labels=["1", "2", "3", "4"]) print("+++++++++++++++++++combined result+++++++++++++++++++++++++") print(metrics.classification_report(finalY, predicted_F, labels=["1", "2", "3", "4"], digits=3)) cm=metrics.confusion_matrix(finalY, predicted_F, labels=["1", "2", "3", "4"]) print("accuracy ", get_acc(cm)) print("1st Model training time: ", (stop0 - start0)) print("layer 2 Models training time: ", (stop1 - start1)) print("Total Model training time: ", (stop1 - start0)) @study def run_KMeans_Wpair(word2vec_src): """ Run KMeans+word embedding experiment ! This is the baseline method. :return:None """ # Create a subplot with 1 row and 2 columns print("# word2vec:", word2vec_src) #clf = svm.SVC(kernel="rbf", gamma=0.005) #clf = neighbors.KNeighborsClassifier(n_neighbors = 5) clf = KMeans(n_clusters=4, init='k-means++', max_iter=100, n_init=1) word2vec_model = gensim.models.Word2Vec.load(word2vec_src) data = PaperData(word2vec=word2vec_model) train_pd = load_vec(data, data.train_data, use_pkl=False) test_pd = load_vec(data, data.test_data, use_pkl=False) train_X = train_pd.loc[:, "PostIdVec"].tolist() train_Y = train_pd.loc[:, "LinkTypeId"].tolist() train_X1 = train_pd.loc[:, "RelatedPostIdVec"].tolist() train_Y1 = train_pd.loc[:, "LinkTypeId"].tolist() np.append(train_X,train_X1) np.append(train_Y,train_Y1) test_X = test_pd.loc[:, "PostIdVec"].tolist() test_Y = test_pd.loc[:, "LinkTypeId"].tolist() clf.fit(train_X, train_Y) predicted = clf.predict(test_X) print(predicted) x = list(np.asarray(clf.labels_) + 1) print("Homogeneity: %0.3f" % metrics.homogeneity_score(train_Y, x)) print("Completeness: %0.3f" % metrics.completeness_score(train_Y, clf.labels_)) print("V-measure: %0.3f" % metrics.v_measure_score(train_Y, clf.labels_)) print("Adjusted Rand-Index: %.3f" % metrics.adjusted_rand_score(train_Y, clf.labels_)) print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(train_X, clf.labels_, sample_size=1000)) #################Katie's Code +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # returns the svm model def run_SVM(word2vec_src, train_pd, queue): clf = svm.SVC(kernel="rbf", gamma=0.005) # word2vec_model = gensim.models.Word2Vec.load(word2vec_src) # data = PaperData(word2vec=word2vec_model) # print("Train data: " + str(train_pd.shape)) # if train_pd is None: train_pd = load_vec( # data, data.train_data, use_pkl=False) train_X = train_pd.loc[:, "Output"].tolist() train_Y = train_pd.loc[:, "LinkTypeId"].tolist() start = timeit.default_timer() clf.fit(train_X, train_Y) stop = timeit.default_timer() print("SVM Model Train Time", (stop-start)) queue.put(clf) return clf def run_KNN_clustering(word2vec_src, train_pd, queue): print("# word2vec:", word2vec_src) clf = neighbors.KNeighborsClassifier(n_neighbors = 10) # word2vec_model = gensim.models.Word2Vec.load(word2vec_src) # data = PaperData(word2vec=word2vec_model) # print("Train data: " + str(train_pd.shape)) # if train_pd is None: train_pd = load_vec( # data, data.train_data, use_pkl=False) train_X = train_pd.loc[:, "Output"].tolist() train_Y = train_pd.loc[:, "LinkTypeId"].tolist() start = timeit.default_timer() clf.fit(train_X, train_Y) stop = timeit.default_timer() print("SVM Model Train Time", (stop-start)) queue.put(clf) return clf @study def run_tuning_SVM_C(word2vec_src,train_pd_c,queue, repeats=1, fold=10, tuning=True): """ :param word2vec_src:str, path of word2vec model :param repeats:int, number of repeats :param fold: int,number of folds :param tuning: boolean, tuning or not. :return: None """ print("# word2vec:", word2vec_src) word2vec_model = gensim.models.Word2Vec.load(word2vec_src) data = PaperData(word2vec=word2vec_model) train_pd_c = train_pd_c.reset_index() train_pd = train_pd_c test_pd = load_vec(data, data.test_data, file_name=False) learner = [SK_SVM][0] goal = {0: "PD", 1: "PF", 2: "PREC", 3: "ACC", 4: "F", 5: "G", 6: "Macro_F", 7: "Micro_F"}[6] print(goal) F = {} clfs = [] for i in range(repeats): # repeat n times here kf = StratifiedKFold(train_pd.loc[:, "LinkTypeId"].values, fold, shuffle=True) for train_index, tune_index in kf: print(train_pd) train_data = train_pd.ix[train_index] print(train_index) print(train_data) tune_data = train_pd.ix[tune_index] train_X = train_data.loc[:, "Output"].values print(train_X) train_Y = train_data.loc[:, "LinkTypeId"].values print(train_Y) tune_X = tune_data.loc[:, "Output"].values tune_Y = tune_data.loc[:, "LinkTypeId"].values test_X = test_pd.loc[:, "Output"].values test_Y = test_pd.loc[:, "LinkTypeId"].values params, evaluation = tune_learner(learner, train_X, train_Y, tune_X, tune_Y, goal) if tuning else ({}, 0) clf = learner(train_X, train_Y, test_X, test_Y, goal) F = clf.learn(F, **params) clfs.append(clf) queue.put(clfs) print_results(clfs) # parses and returns a given svm in the format of dictionary - # [class](precision, recall, f1score, support) def results_SVM(clf, test_X, test_Y): predicted = clf.predict(test_X) # labels: ["Duplicates", "DirectLink","IndirectLink", "Isolated"] report_gen = metrics.classification_report( test_Y, predicted, labels=["1", "2", "3", "4"], digits=3) parsed_report = parse_classification_report(report_gen) return parsed_report #cm=metrics.confusion_matrix(test_Y, predicted, labels=["1", "2", "3", "4"]) #print("accuracy ", get_acc(cm) def total_summary(result_set, num_rows, start0,start1,stop0,stop1): weightedAvgs = [0, 0, 0] for l in result_set: avg_list = l['avg'] for i in range(3): support_count = avg_list[3] weightedAvgs[i] += (avg_list[i] * support_count)/num_rows result = {} result['precision'] = weightedAvgs[0] result['recall'] = weightedAvgs[1] result['f1'] = weightedAvgs[2] print(result) print("1st Model training time: ", (stop0 - start0)) print("layer 2 Models training time: ", (stop1 - start1)) print("Total Model training time: ", (stop1 - start0)) def run_kmeans(word2vec_src): print("# word2vec:", word2vec_src) word2vec_model = gensim.models.Word2Vec.load(word2vec_src) data = PaperData(word2vec=word2vec_model) train_pd = load_vec(data, data.train_data, use_pkl=False) test_pd = load_vec(data, data.test_data, use_pkl=False) train_X = train_pd.loc[:, "Output"].tolist() queue = Queue() numClusters = optimalK(train_X) #numClusters = 5 print("Found optimal k: " + str(numClusters)) clf = KMeans(n_clusters=numClusters, init='k-means++', max_iter=200, n_init=1) start0 = timeit.default_timer() clf.fit(train_X) stop0 = timeit.default_timer() svm_models = [] # maintain a list of svms s1 = timeit.default_timer() data.train_data['clabel'] = clf.labels_ s2 = timeit.default_timer() print("Inter - ", (s2-s1)) start1 = timeit.default_timer() #b = Barrier(numClusters-1) for l in range(numClusters): cluster = data.train_data.loc[data.train_data['clabel'] == l] t = threading.Thread(target=run_tuning_SVM_C, args = [word2vec_src,cluster,queue]) threads.append(t) t.start() response = queue.get() svm_models.append(response) #b.wait() t.join() stop1 = timeit.default_timer() svm_results = [] # maintain a list of svm results test_X = test_pd.loc[:, "Output"].tolist() predicted = clf.predict(test_X) data.test_data['clabel'] = predicted for l in range(numClusters): #print("Label " + str(l)) cluster = data.test_data.loc[data.test_data['clabel'] == l] svm_model = svm_models[l] cluster_X = cluster.loc[:, "Output"].tolist() cluster_Y = cluster.loc[:, "LinkTypeId"].tolist() svm_results.append(results_SVM(svm_model, cluster_X, cluster_Y))# store all the SVM result report in a dictionary # call the helper method to summarize the svm results total_summary(svm_results, test_pd.shape[0],start0,start1,stop0,stop1) # Source: https://anaconda.org/milesgranger/gap-statistic/notebook def optimalK(data, nrefs=3, maxClusters=15): """ Calculates KMeans optimal K using Gap Statistic from Tibshirani, Walther, Hastie Params: data: ndarry of shape (n_samples, n_features) nrefs: number of sample reference datasets to create maxClusters: Maximum number of clusters to test for Returns: (gaps, optimalK) """ gaps = np.zeros((len(range(1, maxClusters)),)) resultsdf = pd.DataFrame({'clusterCount': [], 'gap': []}) for gap_index, k in enumerate(range(1, maxClusters)): # Holder for reference dispersion results refDisps = np.zeros(nrefs) # For n references, generate random sample and perform kmeans getting resulting dispersion of each loop for i in range(nrefs): # Create new random reference set # randomReference = np.random.random_sample(size=data.shape) # Fit to it km = KMeans(n_clusters=k, init='k-means++', max_iter=200, n_init=1) km.fit(data) refDisp = km.inertia_ refDisps[i] = refDisp # Fit cluster to original data and create dispersion km = KMeans(k) km.fit(data) origDisp = km.inertia_ # print(str(i+1) + ": " + str(origDisp)) # Calculate gap statistic gap = np.log(np.mean(refDisps)) - np.log(origDisp) # Assign this loop's gap statistic to gaps gaps[gap_index] = gap resultsdf = resultsdf.append( {'clusterCount': k, 'gap': gap}, ignore_index=True) # return (gaps.argmax() + 1, resultsdf) # Plus 1 because index of 0 means 1 cluster is optimal, index 2 = 3 clusters are optimal return gaps.argmax() # Not used, but wanted to put this code somewhere def results_kmeans(clf, train_X, train_Y, test_X, test_Y): predicted = clf.predict(test_X) print("Homogeneity: %0.3f" % metrics.homogeneity_score(train_Y, clf.labels_)) print("Completeness: %0.3f" % metrics.completeness_score(train_Y, clf.labels_)) print("V-measure: %0.3f" % metrics.v_measure_score(train_Y, clf.labels_)) print("Adjusted Rand-Index: %.3f" % metrics.adjusted_rand_score(train_Y, clf.labels_)) print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(train_X, clf.labels_, sample_size=1000)) """ Parse a sklearn classification report into a dict keyed by class name and containing a tuple (precision, recall, fscore, support) for each class Reference: https://gist.github.com/julienr/6b9b9a03bd8224db7b4f """ def parse_classification_report(clfreport): lines = clfreport.split('\n') # Remove empty lines lines = list(filter(lambda l: not len(l.strip()) == 0, lines)) # Starts with a header, then score for each class and finally an average header = lines[0] cls_lines = lines[1:-1] avg_line = lines[-1] assert header.split() == ['precision', 'recall', 'f1-score', 'support'] assert avg_line.split()[0] == 'avg' # class names can have spaces - figure the width of the class field # using indentation of the precision header cls_field_width = len(header) - len(header.lstrip()) # Now, collect all the class names and score in a dict def parse_line(l): """Parse a line of classification_report""" cls_name = l[:cls_field_width].strip() precision, recall, fscore, support = l[cls_field_width:].split() precision = float(precision) recall = float(recall) fscore = float(fscore) support = int(support) return (cls_name, precision, recall, fscore, support) data = collections.OrderedDict() for l in cls_lines: ret = parse_line(l) cls_name = ret[0] scores = ret[1:] data[cls_name] = scores data['avg'] = parse_line(avg_line)[1:] # average return data #################Katie's Code +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ def prepare_word2vec(): print("Downloading pretrained word2vec models") url = "https://zenodo.org/record/807727/files/word2vecs_models.zip" file_name = wget.download(url) with zipfile.ZipFile(file_name, "r") as zip_ref: zip_ref.extractall() if __name__ == "__main__": word_src = "word2vecs_models" threads = [] if not os.path.exists(word_src): prepare_word2vec() elif len(os.listdir(word_src)) == 0: os.rmdir(word_src) prepare_word2vec() for x in range(1): random.seed(x) np.random.seed(x) myword2vecs = [os.path.join(word_src, i) for i in os.listdir(word_src) if "syn" not in i] # t = threading.Thread(target=run_tuning_SVM_KNN, args = [myword2vecs[x]]) # threads.append(t) # t.start() run_SVM_baseline(myword2vecs[x]) #run_SVM_KNN_thread(myword2vecs[x]) #run_LinearDiscriminantAnalysis(myword2vecs[x]) #run_KNN(myword2vecs[x]) #run_SVM_KNN(myword2vecs[x]) #run_KMeans_Wpair(myword2vecs[x]) #run_kmeans(myword2vecs[x]) #run_KNN_SVM(myword2vecs[x]) #run_KNN_KNN(myword2vecs[x]) #Srun_LDA(myword2vecs[x]) #run_RNN(myword2vecs[x]) #print("Run completed for baseline model--------------------------------------------------") #run_tuning_SVM(myword2vecs[x]) #run_tuning_LDA(myword2vecs[x]) #run_tuning_KNN(myword2vecs[x]) #print("Run completed for DE model--------------------------------------------------")
38.296183
131
0.62514
6,349
50,168
4.77335
0.074815
0.013859
0.015838
0.017818
0.771167
0.754207
0.750808
0.742163
0.738138
0.725896
0
0.030163
0.212287
50,168
1,310
132
38.296183
0.736728
0.131917
0
0.777126
0
0
0.096464
0.030205
0
0
0
0
0.001955
1
0.028348
false
0
0.032258
0
0.071359
0.135875
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
cf0de3ba9e20ca39a57f3acbae18add6e56df71b
70
py
Python
core/nn/__init__.py
achaiah/awesome-semantic-segmentation-pytorch
4f945a1989ae8b1bb6b24f1214fa84a7ca8c8e07
[ "Apache-2.0" ]
1
2019-09-09T16:58:48.000Z
2019-09-09T16:58:48.000Z
core/nn/__init__.py
achaiah/awesome-semantic-segmentation-pytorch
4f945a1989ae8b1bb6b24f1214fa84a7ca8c8e07
[ "Apache-2.0" ]
null
null
null
core/nn/__init__.py
achaiah/awesome-semantic-segmentation-pytorch
4f945a1989ae8b1bb6b24f1214fa84a7ca8c8e07
[ "Apache-2.0" ]
1
2019-12-04T03:06:07.000Z
2019-12-04T03:06:07.000Z
"""Seg NN Modules""" from .sync_bn.syncbn import * from .loss import *
23.333333
29
0.7
11
70
4.363636
0.818182
0
0
0
0
0
0
0
0
0
0
0
0.142857
70
3
30
23.333333
0.8
0.2
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
cf0ef43a8bc52fd3f88dd05dc9e8f4a26b23551b
760
py
Python
deploy.py
ksksksks-dev/Solidity-Demo
572f26efdfcaeb8721cf9f98c08205dd344848b3
[ "MIT" ]
null
null
null
deploy.py
ksksksks-dev/Solidity-Demo
572f26efdfcaeb8721cf9f98c08205dd344848b3
[ "MIT" ]
null
null
null
deploy.py
ksksksks-dev/Solidity-Demo
572f26efdfcaeb8721cf9f98c08205dd344848b3
[ "MIT" ]
1
2021-10-02T07:23:28.000Z
2021-10-02T07:23:28.000Z
import json import solcx from solcx import compile_standard # solcx.install_solc() with open("./SimpleStorage.sol", "r") as file: simple_storage_file = file.read() compiled_sol = compile_standard( { "language": "Solidity", "sources": {"SimpleStorage.sol": {"content": simple_storage_file}}, "settings": { "outputSelection": { "*": {"*": ["abi", "metadata", "evm.bytecode", "evm.sourceMap"]} } }, }, ) with open("./compiled_code.json", "w") as file: json.dump(compiled_sol, file) bytecode = compiled_sol["contracts"]["SimpleStorage.sol"]["SimpleStorage"]["evm"][ "bytecode" ]["object"] abi = compiled_sol["contracts"]["SimpleStorage.sol"]["SimpleStorage"]["abi"]
25.333333
82
0.610526
76
760
5.947368
0.460526
0.141593
0.075221
0.146018
0.216814
0.216814
0
0
0
0
0
0
0.203947
760
29
83
26.206897
0.747107
0.026316
0
0
0
0
0.334688
0
0
0
0
0
0
1
0
false
0
0.136364
0
0.136364
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cf0f43f9858fd6194f42e3f825cc15ff13ea7eeb
1,433
py
Python
solutions/036.valid-sudoku/valid-sudoku.py
wangsongiam/leetcode
96ff21bca1871816ae51fccb1fa13587b378dc50
[ "MIT" ]
3
2018-11-25T15:19:57.000Z
2019-09-28T03:01:11.000Z
solutions/036.valid-sudoku/valid-sudoku.py
casprwang/leetcode
96ff21bca1871816ae51fccb1fa13587b378dc50
[ "MIT" ]
null
null
null
solutions/036.valid-sudoku/valid-sudoku.py
casprwang/leetcode
96ff21bca1871816ae51fccb1fa13587b378dc50
[ "MIT" ]
3
2018-02-11T20:23:44.000Z
2020-06-05T15:39:56.000Z
class Solution: def isValidSudoku(self, board): """ :type board: List[List[str]] :rtype: bool 00 01 02 10 11 12 20 21 22 divide 3 : 0 mod 3 : < 3 m = { 'row0-8' : set() 'col0-8' 'div3 + mod3' } """ m = {} # row for i in range(9): for j in range(9): cur = board[i][j] if cur == '.': continue sym_row = 'r' + str(i) if sym_row in m: if cur in m[sym_row]: return False m[sym_row].add(cur) else: m[sym_row] = set() m[sym_row].add(cur) sym_col = 'c' + str(j) if sym_col in m: if cur in m[sym_col]: return False m[sym_col].add(cur) else: m[sym_col] = set() m[sym_col].add(cur) sym_box = str(int(i/3)) + str(int(j/3)) if sym_box in m: if cur in m[sym_box]: return False m[sym_box].add(cur) else: m[sym_box] = set() m[sym_box].add(cur) return True
24.288136
55
0.328681
161
1,433
2.813665
0.322981
0.10596
0.06181
0.05298
0.317881
0.092715
0.092715
0
0
0
0
0.052202
0.572226
1,433
58
56
24.706897
0.686786
0.114445
0
0.363636
0
0
0.002593
0
0
0
0
0
0
1
0.030303
false
0
0
0
0.181818
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
cf10b831b724d9102e64bddbd11566c602b17ffc
2,185
py
Python
test_clouddb/test_instance.py
adregner/python-clouddb
6c77261a0e9cda221980c9240c7fffc93a78f7f7
[ "X11" ]
1
2018-05-21T23:09:36.000Z
2018-05-21T23:09:36.000Z
test_clouddb/test_instance.py
adregner/python-clouddb
6c77261a0e9cda221980c9240c7fffc93a78f7f7
[ "X11" ]
null
null
null
test_clouddb/test_instance.py
adregner/python-clouddb
6c77261a0e9cda221980c9240c7fffc93a78f7f7
[ "X11" ]
null
null
null
"""Primary testing suite for clouddb.models.instance. This code is licensed under the MIT license. See COPYING for more details.""" import time import unittest import clouddb import test_clouddb CLOUDDB_TEST_INSTANCE_OBJECT = None CLOUDDB_TEST_BASELINE_INSTANCE_COUNT = None CLOUDDB_TEST_INSTANCE_NAME = "testsuite-ci-%d" % time.time() class InstanceBaseline(test_clouddb.BaseTestCase): def test_instance_list_baseline(self): instances = self.raxdb.instances() self.assertIsInstance(instances, list) test_clouddb.test_instance.CLOUDDB_TEST_BASELINE_INSTANCE_COUNT = len(instances) class InstanceCreate(test_clouddb.BaseTestCase): def test_create_instance(self): test_clouddb.test_instance.CLOUDDB_TEST_INSTANCE_OBJECT = \ self.raxdb.create_instance(CLOUDDB_TEST_INSTANCE_NAME, 1, 1, wait=True) self.assertIsInstance(test_clouddb.test_instance.CLOUDDB_TEST_INSTANCE_OBJECT, clouddb.models.instance.Instance) class InstanceListGet(test_clouddb.BaseTestCase): def test_instance_list(self): instances = self.raxdb.instances() self.assertIsInstance(instances, list) self.assertEqual(len(instances), test_clouddb.test_instance.CLOUDDB_TEST_BASELINE_INSTANCE_COUNT + 1) self.assertIsInstance(instances[-1], clouddb.models.instance.Instance) class InstanceDestroy(test_clouddb.BaseTestCase): def test_instance_remove(self): test_clouddb.test_instance.CLOUDDB_TEST_INSTANCE_OBJECT.delete(wait=True) class InstanceListFinal(test_clouddb.BaseTestCase): def test_instance_list_baseline_again(self): instances = self.raxdb.instances() self.assertEqual(len(instances), test_clouddb.test_instance.CLOUDDB_TEST_BASELINE_INSTANCE_COUNT) def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(InstanceBaseline)) suite.addTest(unittest.makeSuite(InstanceCreate)) suite.addTest(unittest.makeSuite(InstanceListGet)) suite.addTest(unittest.makeSuite(InstanceDestroy)) suite.addTest(unittest.makeSuite(InstanceListFinal)) return suite if __name__ == "__main__": unittest.main()
37.672414
88
0.769794
250
2,185
6.432
0.232
0.109453
0.141791
0.085821
0.522388
0.441542
0.398632
0.372512
0.280473
0.10199
0
0.002142
0.145538
2,185
57
89
38.333333
0.859132
0.058124
0
0.162791
0
0
0.011214
0
0
0
0
0
0.139535
1
0.139535
false
0
0.093023
0
0.372093
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cf12d221f01553d46a5821f0b5720d8d94341b9e
3,327
py
Python
examples/tensorflow/nlp/bert_large_squad/tune_squad.py
kevinintel/neural-compressor
b57645566aeff8d3c18dc49d2739a583c072f940
[ "Apache-2.0" ]
100
2020-12-01T02:40:12.000Z
2021-09-09T08:14:22.000Z
examples/tensorflow/nlp/bert_large_squad/tune_squad.py
kevinintel/neural-compressor
b57645566aeff8d3c18dc49d2739a583c072f940
[ "Apache-2.0" ]
25
2021-01-05T00:16:17.000Z
2021-09-10T03:24:01.000Z
examples/tensorflow/nlp/bert_large_squad/tune_squad.py
kevinintel/neural-compressor
b57645566aeff8d3c18dc49d2739a583c072f940
[ "Apache-2.0" ]
25
2020-12-01T19:07:08.000Z
2021-08-30T14:20:07.000Z
#!/usr/bin/env python # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run BERT on SQuAD 1.1 and SQuAD 2.0.""" import tensorflow as tf import numpy as np flags = tf.compat.v1.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( 'input_model', None, 'Run inference with specified pb graph.') flags.DEFINE_string( 'output_model', None, 'The output model of the quantized model.') flags.DEFINE_string( 'mode', 'performance', 'define benchmark mode for accuracy or performance') flags.DEFINE_bool( 'tune', False, 'whether to tune the model') flags.DEFINE_bool( 'benchmark', False, 'whether to benchmark the model') flags.DEFINE_string( 'config', 'bert.yaml', 'yaml configuration of the model') flags.DEFINE_bool( 'strip_iterator', False, 'whether to strip the iterator of the model') def strip_iterator(graph_def): from neural_compressor.adaptor.tf_utils.util import strip_unused_nodes input_node_names = ['input_ids', 'input_mask', 'segment_ids'] output_node_names = ['unstack'] # create the placeholder and merge with the graph with tf.compat.v1.Graph().as_default() as g: input_ids = tf.compat.v1.placeholder(tf.int32, shape=(None,384), name="input_ids") input_mask = tf.compat.v1.placeholder(tf.int32, shape=(None,384), name="input_mask") segment_ids = tf.compat.v1.placeholder(tf.int32, shape=(None,384), name="segment_ids") tf.import_graph_def(graph_def, name='') graph_def = g.as_graph_def() # change the input from iterator to placeholder for node in graph_def.node: for idx, in_tensor in enumerate(node.input): if 'IteratorGetNext:0' == in_tensor or 'IteratorGetNext' == in_tensor: node.input[idx] = 'input_ids' if 'IteratorGetNext:1' in in_tensor: node.input[idx] = 'input_mask' if 'IteratorGetNext:2' in in_tensor: node.input[idx] = 'segment_ids' graph_def = strip_unused_nodes(graph_def, input_node_names, output_node_names) return graph_def def main(_): tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) if FLAGS.benchmark: from neural_compressor.experimental import Benchmark evaluator = Benchmark(FLAGS.config) evaluator.model = FLAGS.input_model evaluator(FLAGS.mode) elif FLAGS.tune: from neural_compressor.experimental import Quantization quantizer = Quantization(FLAGS.config) quantizer.model = FLAGS.input_model q_model = quantizer() if FLAGS.strip_iterator: q_model.graph_def = strip_iterator(q_model.graph_def) q_model.save(FLAGS.output_model) if __name__ == "__main__": tf.compat.v1.app.run()
36.56044
94
0.703937
468
3,327
4.839744
0.326923
0.038852
0.03532
0.025166
0.175717
0.121854
0.065342
0.065342
0.065342
0.065342
0
0.014574
0.195672
3,327
90
95
36.966667
0.831839
0.227833
0
0.127273
0
0
0.198821
0
0
0
0
0
0
1
0.036364
false
0
0.109091
0
0.163636
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cf1379541ce40bdf7c286012870b15d463d7defa
58
py
Python
gfxlcd/driver/ad7843/__init__.py
bkosciow/gfxlcd
953d013abb9b695c2226b348093cc64391a01f6c
[ "MIT" ]
12
2018-01-31T18:43:23.000Z
2021-10-06T10:23:05.000Z
gfxlcd/driver/ad7843/__init__.py
bkosciow/gfxlcd
953d013abb9b695c2226b348093cc64391a01f6c
[ "MIT" ]
4
2018-04-25T15:15:16.000Z
2021-03-21T09:21:50.000Z
gfxlcd/driver/ad7843/__init__.py
bkosciow/gfxlcd
953d013abb9b695c2226b348093cc64391a01f6c
[ "MIT" ]
4
2018-03-15T09:12:09.000Z
2021-03-19T20:07:33.000Z
"""driver/ad7843 module""" __author__ = 'Bartosz Kosciow'
19.333333
30
0.724138
6
58
6.333333
1
0
0
0
0
0
0
0
0
0
0
0.076923
0.103448
58
2
31
29
0.653846
0.344828
0
0
0
0
0.46875
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
cf144bab411bff8503d65becf1b966fa838b391b
286
py
Python
utils/targetTools.py
brzx/pydataloader
005c347b8fd9aca0a35ecf8eccce0a35e7e6da52
[ "BSD-2-Clause" ]
null
null
null
utils/targetTools.py
brzx/pydataloader
005c347b8fd9aca0a35ecf8eccce0a35e7e6da52
[ "BSD-2-Clause" ]
null
null
null
utils/targetTools.py
brzx/pydataloader
005c347b8fd9aca0a35ecf8eccce0a35e7e6da52
[ "BSD-2-Clause" ]
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- import abc class TargetTools(): __metaclass__ = abc.ABCMeta @abc.abstractmethod def getConnection(self, username, password, url): pass @abc.abstractmethod def validTarget(self, target): pass
19.066667
54
0.611888
29
286
5.896552
0.758621
0.19883
0.233918
0
0
0
0
0
0
0
0
0.004808
0.272727
286
15
55
19.066667
0.817308
0.132867
0
0.444444
0
0
0
0
0
0
0
0
0
1
0.222222
false
0.333333
0.111111
0
0.555556
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
5
cf151f0c30a45fd3252d2c9ba1d95617b3dd6f69
7,627
py
Python
psi/app/models/product_inventory.py
lusi1990/betterlifepsi
8e7f8562967ab1816d8c25db3251c550a357f39c
[ "MIT" ]
33
2018-10-19T03:41:56.000Z
2022-01-23T16:26:02.000Z
psi/app/models/product_inventory.py
lusi1990/betterlifepsi
8e7f8562967ab1816d8c25db3251c550a357f39c
[ "MIT" ]
318
2018-09-23T15:16:54.000Z
2022-03-31T22:58:55.000Z
psi/app/models/product_inventory.py
lusi1990/betterlifepsi
8e7f8562967ab1816d8c25db3251c550a357f39c
[ "MIT" ]
19
2018-10-22T18:04:18.000Z
2021-12-06T19:49:05.000Z
from sqlalchemy import select, func, or_ from sqlalchemy.ext.hybrid import hybrid_property from psi.app import const from psi.app.models import Product, InventoryTransactionLine, \ InventoryTransaction from psi.app.service import Info from psi.app.utils import format_decimal, get_weeks_between db = Info.get_db() class ProductInventory(Product): @hybrid_property def inventory_advice(self): from psi.app.advice import InventoryAdvice return InventoryAdvice.advice(self) @inventory_advice.setter def inventory_advice(self, value): pass @inventory_advice.expression def inventory_advice(self): pass @hybrid_property def average_purchase_price(self): return self.cal_inv_trans_average(const.PURCHASE_IN_INV_TRANS_KEY) @average_purchase_price.setter def average_purchase_price(self, val): pass @average_purchase_price.expression def average_purchase_price(self): from psi.app.models import EnumValues return (select([func.sum(InventoryTransactionLine.quantity * InventoryTransactionLine.price) / func.sum(InventoryTransactionLine.quantity)]) .where(self.id == InventoryTransactionLine.product_id and InventoryTransactionLine.inventory_transaction_id == InventoryTransaction.id and InventoryTransaction.type_id == EnumValues.id and EnumValues.code == const.PURCHASE_IN_INV_TRANS_KEY) .label('average_purchase_price')) @hybrid_property def average_retail_price(self): return self.cal_inv_trans_average(const.SALES_OUT_INV_TRANS_TYPE_KEY) @average_retail_price.setter def average_retail_price(self, val): pass @average_retail_price.expression def average_retail_price(self): from psi.app.models import EnumValues return (select([func.sum(InventoryTransactionLine.quantity * InventoryTransactionLine.price) / func.greatest(func.sum(InventoryTransactionLine.quantity), 1)]) .where(self.id == InventoryTransactionLine.product_id and InventoryTransactionLine.inventory_transaction_id == InventoryTransaction.id and InventoryTransaction.type_id == EnumValues.id and EnumValues.code == const.SALES_OUT_INV_TRANS_TYPE_KEY) .label('average_retail_price')) @hybrid_property def average_unit_profit(self): if self.average_purchase_price != 0 and self.average_retail_price != 0: return self.average_retail_price - self.average_purchase_price return 0 @average_unit_profit.setter def average_unit_profit(self, value): pass @average_unit_profit.expression def average_unit_profit(self): from .enum_values import EnumValues return ((select([-func.sum(InventoryTransactionLine.quantity * InventoryTransactionLine.price) / func.greatest(func.sum(InventoryTransactionLine.quantity), 1)]) .where(self.id == InventoryTransactionLine.product_id) .where(InventoryTransactionLine.inventory_transaction_id == InventoryTransaction.id) .where(InventoryTransaction.type_id == EnumValues.id) .where(or_(EnumValues.code == const.SALES_OUT_INV_TRANS_TYPE_KEY, EnumValues.code == const.PURCHASE_IN_INV_TRANS_KEY))) .label('average_unit_profit')) @hybrid_property def weekly_average_profit(self): if 0 == self.average_unit_profit: return 0 return format_decimal(self.weekly_sold_qty * self.average_unit_profit) @weekly_average_profit.expression def weekly_average_profit(self): from .enum_values import EnumValues return ((select([-func.sum(InventoryTransactionLine.quantity * InventoryTransactionLine.price) / func.greatest(func.sum(InventoryTransactionLine.quantity), 1)]) .where(self.id == InventoryTransactionLine.product_id and InventoryTransactionLine.inventory_transaction_id == InventoryTransaction.id and InventoryTransaction.type_id == EnumValues.id and (EnumValues.code == const.SALES_OUT_INV_TRANS_TYPE_KEY or EnumValues.code == const.PURCHASE_IN_INV_TRANS_KEY))) .label('weekly_average_profit')) @weekly_average_profit.setter def weekly_average_profit(self, value): pass @hybrid_property def gross_profit_rate(self): if self.average_retail_price != 0 and self.average_purchase_price != 0: val = (self.average_retail_price - self.average_purchase_price)/self.average_purchase_price try: fval = float(val) percent = "{:.2%}".format(fval) return percent except Exception as e: return '-' return '-' @gross_profit_rate.setter def gross_profit_rate(self, value): pass @hybrid_property def weekly_sold_qty(self): """ SQL: SELECT p.id, p.name, -sum(itl.quantity), -sum(itl.quantity) / (greatest(date_part('days', max(it.date) - min(it.date)), 1)/7), FROM inventory_transaction_line itl, inventory_transaction it, enum_values ev, product p where itl.inventory_transaction_id = it.id AND itl.product_id = p.id AND ev.code = 'SALES_OUT' AND it.type_id = ev.id GROUP BY p.id, p.name; :return: quantity of sold out product averaged by week. """ i_ts = self.inventory_transaction_lines tot_qty = 0 max_date, min_date = None, None if len(i_ts) > 0: for l in i_ts: if l.type.code == const.SALES_OUT_INV_TRANS_TYPE_KEY: if l.quantity is not None and l.price is not None: tot_qty += abs(l.quantity) if max_date is None or l.inventory_transaction.date > max_date: max_date = l.inventory_transaction.date if min_date is None or l.inventory_transaction.date < min_date: min_date = l.inventory_transaction.date weeks = get_weeks_between(min_date, max_date) if weeks == 0: weeks = 1 return format_decimal(tot_qty / weeks) @weekly_sold_qty.setter def weekly_sold_qty(self, value): pass @weekly_sold_qty.expression def weekly_sold_qty(self): from psi.app.models.sales_order import SalesOrderLine, SalesOrder return ((select([func.sum(SalesOrderLine.quantity)]) .where(self.id == SalesOrderLine.product_id) .where(SalesOrderLine.sales_order_id == SalesOrder.id) .where(SalesOrder.order_date > func.now() - 7)).label('weekly_sold_qty')) def cal_inv_trans_average(self, transaction_type): i_ts = self.inventory_transaction_lines tot_amt = 0 tot_qty = 0 if len(i_ts) > 0: for l in i_ts: if l.type.code == transaction_type: if l.quantity is not None and l.price is not None: tot_qty += abs(l.quantity) tot_amt += abs(l.quantity) * l.price if tot_amt != 0 and tot_qty != 0: return format_decimal(tot_amt / tot_qty) return 0
41.005376
136
0.64075
871
7,627
5.342135
0.132032
0.055878
0.047281
0.067054
0.600473
0.457339
0.425317
0.40533
0.363636
0.326886
0
0.004375
0.280713
7,627
185
137
41.227027
0.843784
0.057296
0
0.447552
0
0
0.014902
0.006103
0
0
0
0
0
1
0.146853
false
0.055944
0.083916
0.013986
0.363636
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
cf18350dca3c8a011e1f04f49243469e79dd2045
1,484
py
Python
run.py
wallarelvo/SmallCartography
007e621386eb86d904fefef3f518b1d5f1dc7fe6
[ "Apache-2.0" ]
null
null
null
run.py
wallarelvo/SmallCartography
007e621386eb86d904fefef3f518b1d5f1dc7fe6
[ "Apache-2.0" ]
null
null
null
run.py
wallarelvo/SmallCartography
007e621386eb86d904fefef3f518b1d5f1dc7fe6
[ "Apache-2.0" ]
null
null
null
import carto import argparse def main(): parser = argparse.ArgumentParser( description="Runs programs for the carto MapReduce library" ) parser.add_argument( "--host", dest="host", type=str, default="localhost", help="Host of the program" ) parser.add_argument( "--port", dest="port", type=int, default=8000, help="Port of the program" ) parser.add_argument( "--name", dest="name", type=str, help="Name used by the worker" ) parser.add_argument( "--program", dest="program", type=str, default="client", help="Used to determine what program will run" ) parser.add_argument( "--ns-host", dest="ns_host", type=str, default="localhost", help="Host of the name server" ) parser.add_argument( "--ns-port", dest="ns_port", type=int, default="8080", help="Port used by the name server" ) args = parser.parse_args() if args.program == carto.master.worker.WorkerType.MASTER: carto.master.run(args.host, args.port) elif args.program == carto.master.worker.WorkerType.MAPPER: carto.mapper.run(args.host, args.port, args.ns_host, args.ns_port, args.name) elif args.program == carto.master.worker.WorkerType.REDUCER: carto.reducer.run(args.host, args.port, args.ns_host, args.ns_port, args.name) if __name__ == "__main__": main()
26.981818
67
0.607143
185
1,484
4.756757
0.264865
0.061364
0.115909
0.075
0.418182
0.396591
0.293182
0.197727
0.197727
0.106818
0
0.007266
0.258086
1,484
54
68
27.481481
0.792007
0
0
0.195122
0
0
0.209036
0
0
0
0
0
0
1
0.02439
false
0
0.04878
0
0.073171
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cf1bdfaeda3c9d3dd53a3e8c1108702ddef142c8
3,623
py
Python
microservices_miner/control/issue_mgr.py
IBM/microservices-miner
b7befa1c97930b1e7347c9e386a4bb5c5f2d2198
[ "MIT" ]
null
null
null
microservices_miner/control/issue_mgr.py
IBM/microservices-miner
b7befa1c97930b1e7347c9e386a4bb5c5f2d2198
[ "MIT" ]
4
2021-06-08T22:11:29.000Z
2022-01-14T21:21:04.000Z
microservices_miner/control/issue_mgr.py
IBM/microservices-miner
b7befa1c97930b1e7347c9e386a4bb5c5f2d2198
[ "MIT" ]
1
2020-08-06T14:53:05.000Z
2020-08-06T14:53:05.000Z
# (C) Copyright IBM Corporation 2017, 2018, 2019 # U.S. Government Users Restricted Rights: Use, duplication or disclosure restricted # by GSA ADP Schedule Contract with IBM Corp. # # Author: Leonardo P. Tizzei <ltizzei@br.ibm.com> from microservices_miner.control.database_conn import IssueConn, UserConn, RepositoryConn from microservices_miner.model.repository import Repository import logging logging.basicConfig(filename='github_miner.log', level=logging.DEBUG, format='%(asctime)s %(message)s') class IssueMgr: def __init__(self, path_to_db): self.db_path = path_to_db self.issue_conn = IssueConn(path_to_db) self.user_conn = UserConn(path_to_db) self.repo_conn = RepositoryConn(path_to_db) def insert_issue_into_db(self, repo): """ Parameters ---------- repo: Repository Returns ------- """ for issue in repo.issues: updated_at = issue.updated_at if updated_at is not None: updated_at_str = updated_at.isoformat() else: updated_at_str = None if issue.closed_at is None: closed_at_str = None else: closed_at_str = issue.closed_at.isoformat() user_id = issue.user.commit_id issue_id = self.issue_conn.insert_issue(title=issue.title, body=issue.body, repository_id=repo.repository_id, closed_at=closed_at_str, updated_at=updated_at_str, created_at=issue.created_at.isoformat(), user_id=user_id, state=issue.state) for assignee in issue.assignees: assignee_id = self.issue_conn.insert_assignee(assignee) self.issue_conn.insert_issue_assignee(assignee_id=assignee_id, issue_id=issue_id) for label in issue.labels: label_id = self.issue_conn.insert_label(label) self.issue_conn.insert_issue_label(issue_id=issue_id, label_id=label_id) def get_issues_by_label(self, repository_id: int): """ Parameters ---------- repository_id: int Returns ------- List[Issue] """ issues = self.issue_conn.get_issues(repository_id=repository_id) return issues def get_label(self, name): """ Parameters ---------- name Returns ------- Label """ labels = self.issue_conn.get_labels(name=name) if len(labels) == 0: return None else: label = labels.pop() return label def get_assignee(self, login): """ Parameters ---------- login Returns ------- Assignee """ assignees = self.issue_conn.get_assignee(login) if len(assignees) == 0: return None else: assignee = assignees.pop() return assignee def insert_assignee(self, assignee): """ Parameters ---------- assignee: Assignee Returns ------- int """ rowid = self.issue_conn.insert_assignee(assignee) return rowid def insert_label(self, label): """ Parameters ---------- label: Label Returns ------- int """ row_id = self.issue_conn.insert_label(label) return row_id
27.44697
121
0.548993
384
3,623
4.921875
0.265625
0.052381
0.075661
0.07037
0.110053
0.069841
0.032804
0
0
0
0
0.005957
0.351366
3,623
131
122
27.656489
0.798298
0.157604
0
0.109091
0
0
0.014607
0
0
0
0
0
0
1
0.127273
false
0
0.054545
0
0.327273
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cf1d3c9ee4fa3f3a46513695b9bd7c1714c7aef5
10,893
py
Python
custom_components/skyq/config_flow.py
TomBrien/Home_Assistant_SkyQ_MediaPlayer
50f9ad0d3b7a3bc2acc652415ff59740bf3ace10
[ "MIT" ]
null
null
null
custom_components/skyq/config_flow.py
TomBrien/Home_Assistant_SkyQ_MediaPlayer
50f9ad0d3b7a3bc2acc652415ff59740bf3ace10
[ "MIT" ]
null
null
null
custom_components/skyq/config_flow.py
TomBrien/Home_Assistant_SkyQ_MediaPlayer
50f9ad0d3b7a3bc2acc652415ff59740bf3ace10
[ "MIT" ]
null
null
null
"""Configuration flow for the skyq platform.""" import ipaddress import json import logging import re from operator import attrgetter import homeassistant.helpers.config_validation as cv import pycountry import voluptuous as vol from homeassistant import config_entries, exceptions from homeassistant.const import CONF_HOST, CONF_NAME from homeassistant.core import callback from pyskyqremote.const import KNOWN_COUNTRIES from pyskyqremote.skyq_remote import SkyQRemote from .const import ( CHANNEL_DISPLAY, CHANNEL_SOURCES_DISPLAY, CONF_CHANNEL_SOURCES, CONF_COUNTRY, CONF_EPG_CACHE_LEN, CONF_GEN_SWITCH, CONF_LIVE_TV, CONF_OUTPUT_PROGRAMME_IMAGE, CONF_ROOM, CONF_SOURCES, CONF_VOLUME_ENTITY, CONST_DEFAULT, CONST_DEFAULT_EPGCACHELEN, DOMAIN, LIST_EPGCACHELEN, SKYQREMOTE, ) from .schema import DATA_SCHEMA from .utils import convert_sources_JSON SORT_CHANNELS = False _LOGGER = logging.getLogger(__name__) def host_valid(host): """Return True if hostname or IP address is valid.""" try: if ipaddress.ip_address(host).version == (4 or 6): return True except ValueError: disallowed = re.compile(r"[^a-zA-Z\d\-]") return all(x and not disallowed.search(x) for x in host.split(".")) class SkyqConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): """Example config flow.""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL def __init__(self): """Initiliase the configuration flow.""" @staticmethod @callback def async_get_options_flow(config_entry): """Sky Q options callback.""" return SkyQOptionsFlowHandler(config_entry) async def async_step_user(self, user_input=None): """Handle the initial step.""" errors = {} if user_input: if host_valid(user_input[CONF_HOST]): host = user_input[CONF_HOST] name = user_input[CONF_NAME] try: await self._async_setUniqueID(host) except CannotConnect: errors["base"] = "cannot_connect" else: return self.async_create_entry(title=name, data=user_input) errors[CONF_HOST] = "invalid_host" return self.async_show_form( step_id="user", data_schema=vol.Schema(DATA_SCHEMA), errors=errors ) async def _async_setUniqueID(self, host): remote = await self.hass.async_add_executor_job(SkyQRemote, host) if not remote.deviceSetup: raise CannotConnect() deviceInfo = await self.hass.async_add_executor_job(remote.getDeviceInformation) await self.async_set_unique_id( deviceInfo.countryCode + "".join(e for e in deviceInfo.serialNumber.casefold() if e.isalnum()) ) self._abort_if_unique_id_configured() class SkyQOptionsFlowHandler(config_entries.OptionsFlow): """Config flow options for Sky Q.""" def __init__(self, config_entry): """Initialize Sky Q options flow.""" self._name = config_entry.title self._config_entry = config_entry self._remote = None self._channel_sources = config_entry.options.get(CONF_CHANNEL_SOURCES, []) self._sources = convert_sources_JSON( sources_list=config_entry.options.get(CONF_SOURCES) ) self._room = config_entry.options.get(CONF_ROOM) self._volume_entity = config_entry.options.get(CONF_VOLUME_ENTITY) self._gen_switch = config_entry.options.get(CONF_GEN_SWITCH, False) self._live_tv = config_entry.options.get(CONF_LIVE_TV, True) self._country = config_entry.options.get(CONF_COUNTRY, CONST_DEFAULT) if self._country != CONST_DEFAULT: self._country = self._convertCountry(alpha_3=self._country) self._output_programme_image = config_entry.options.get( CONF_OUTPUT_PROGRAMME_IMAGE, True ) self._epg_cache_len = config_entry.options.get( CONF_EPG_CACHE_LEN, CONST_DEFAULT_EPGCACHELEN ) self._channelDisplay = [] self._channel_list = [] async def async_step_init(self, user_input=None): """Set up the option flow.""" self._remote = self.hass.data[DOMAIN][self._config_entry.entry_id][SKYQREMOTE] s = set(KNOWN_COUNTRIES[country] for country in KNOWN_COUNTRIES) countryNames = [] for alpha3 in s: countryName = self._convertCountry(alpha_3=alpha3) countryNames.append(countryName) self._country_list = [CONST_DEFAULT] + sorted(countryNames) if self._remote.deviceSetup: channelData = await self.hass.async_add_executor_job( self._remote.getChannelList ) self._channel_list = channelData.channels for channel in self._channel_list: self._channelDisplay.append( CHANNEL_DISPLAY.format(channel.channelno, channel.channelname) ) self._channel_sources_display = [] for channel in self._channel_sources: try: channelData = next( c for c in self._channel_list if c.channelname == channel ) self._channel_sources_display.append( CHANNEL_DISPLAY.format( channelData.channelno, channelData.channelname ) ) except StopIteration: pass return await self.async_step_user() return await self.async_step_retry() async def async_step_user(self, user_input=None): """Handle a flow initialized by the user.""" errors = {} if user_input: self._channel_sources_display = user_input[CHANNEL_SOURCES_DISPLAY] user_input.pop(CHANNEL_SOURCES_DISPLAY) if len(self._channel_sources_display) > 0: channelitems = [] for channel in self._channel_sources_display: channelData = next( c for c in self._channel_list if channel == CHANNEL_DISPLAY.format(c.channelno, c.channelname) ) channelitems.append(channelData) if SORT_CHANNELS: channelnosorted = sorted(channelitems, key=attrgetter("channelno")) channelsorted = sorted( channelnosorted, key=attrgetter("channeltype"), reverse=True ) channel_sources = [] for c in channelsorted: channel_sources.append(c.channelname) else: channel_sources = [] for c in channelitems: channel_sources.append(c.channelname) user_input[CONF_CHANNEL_SOURCES] = channel_sources self._gen_switch = user_input.get(CONF_GEN_SWITCH) self._live_tv = user_input.get(CONF_LIVE_TV) self._output_programme_image = user_input.get(CONF_OUTPUT_PROGRAMME_IMAGE) self._room = user_input.get(CONF_ROOM) self._volume_entity = user_input.get(CONF_VOLUME_ENTITY) self._country = user_input.get(CONF_COUNTRY) if self._country == CONST_DEFAULT: user_input.pop(CONF_COUNTRY) else: user_input[CONF_COUNTRY] = self._convertCountry(name=self._country) self._epg_cache_len = user_input.get(CONF_EPG_CACHE_LEN) try: self._sources = user_input.get(CONF_SOURCES) if self._sources: user_input[CONF_SOURCES] = convert_sources_JSON( sources_json=self._sources ) for source in user_input[CONF_SOURCES]: self._validate_commands(source) return self.async_create_entry(title="", data=user_input) except json.decoder.JSONDecodeError: errors["base"] = "invalid_sources" except InvalidCommand: errors["base"] = "invalid_command" return self.async_show_form( step_id="user", description_placeholders={CONF_NAME: self._name}, data_schema=vol.Schema( { vol.Optional( CHANNEL_SOURCES_DISPLAY, default=self._channel_sources_display ): cv.multi_select(self._channelDisplay), vol.Optional( CONF_OUTPUT_PROGRAMME_IMAGE, default=self._output_programme_image, ): bool, vol.Optional(CONF_LIVE_TV, default=self._live_tv): bool, vol.Optional(CONF_GEN_SWITCH, default=self._gen_switch): bool, vol.Optional( CONF_ROOM, description={"suggested_value": self._room} ): str, vol.Optional(CONF_COUNTRY, default=self._country): vol.In( self._country_list ), vol.Optional( CONF_VOLUME_ENTITY, description={"suggested_value": self._volume_entity}, ): str, vol.Optional( CONF_EPG_CACHE_LEN, default=self._epg_cache_len ): vol.In(LIST_EPGCACHELEN), vol.Optional( CONF_SOURCES, description={"suggested_value": self._sources} ): str, } ), errors=errors, ) async def async_step_retry(self, user_input=None): """Handle a failed connection.""" errors = {} errors["base"] = "cannot_connect" return self.async_show_form( step_id="retry", data_schema=vol.Schema({}), errors=errors, ) def _convertCountry(self, alpha_3=None, name=None): if name: return pycountry.countries.get(name=name).alpha_3 if alpha_3: return pycountry.countries.get(alpha_3=alpha_3).name def _validate_commands(self, source): commands = source[1].split(",") for command in commands: if command not in SkyQRemote.commands: raise InvalidCommand() class CannotConnect(exceptions.HomeAssistantError): """Error to indicate we cannot connect.""" class InvalidCommand(exceptions.HomeAssistantError): """Error to indicate we cannot connect."""
36.431438
88
0.597815
1,133
10,893
5.425419
0.185349
0.038067
0.034163
0.030747
0.228729
0.109647
0.076948
0.056613
0.027005
0.027005
0
0.0019
0.323602
10,893
298
89
36.553691
0.832383
0.028
0
0.177215
0
0
0.017202
0
0
0
0
0
0
1
0.025316
false
0.004219
0.067511
0
0.168776
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cf1e1fc048aed029497d762bdbe8c8befabdb682
2,045
py
Python
tradssat/out/soilni.py
shreyayadav/traDSSAT
cc9650f896910c0d0a7a382aff36bef89aba70f2
[ "MIT" ]
null
null
null
tradssat/out/soilni.py
shreyayadav/traDSSAT
cc9650f896910c0d0a7a382aff36bef89aba70f2
[ "MIT" ]
null
null
null
tradssat/out/soilni.py
shreyayadav/traDSSAT
cc9650f896910c0d0a7a382aff36bef89aba70f2
[ "MIT" ]
null
null
null
from tradssat.tmpl.output import OutFile from tradssat.tmpl.var import FloatVar, IntegerVar class SoilNiOut(OutFile): """ Reader for DSSAT soil nitrogen (SOILNI.OUT) files. """ filename = 'SoilNi.Out' def _get_var_info(self): return vars_ vars_ = { IntegerVar('YEAR', 4, info='Year'), IntegerVar('DOY', 3, info='Day of year starting on Jan 1.'), IntegerVar('DAS', 5, info='Day after start'), IntegerVar('NAPC', 5, info='Cumulative inorganic N applied, kg/ha'), IntegerVar('NI#M', 5, info='N application numbers'), FloatVar('NIAD', 7, 1, info='Inorganic N in soil, kg/ha'), FloatVar('NITD', 6, 1, info='Amount of total NO3, kg/ha'), FloatVar('NHTD', 6, 1, info='Amount of total NH4, kg/ha'), FloatVar('NI1D', 7, 2, info='NO3 at 0-5 cm soil depth, ppm'), FloatVar('NI2D', 7, 2, info='NO3 at 5-15 cm soil depth, ppm'), FloatVar('NI3D', 7, 2, info='NO3 at 15-30 cm soil depth, ppm'), FloatVar('NI4D', 7, 2, info='NO3 at 30-45 cm soil depth, ppm'), FloatVar('NI5D', 7, 2, info='NO3 at 45-60 cm soil depth, ppm'), FloatVar('NI6D', 7, 2, info='NO3 at 60-90 cm soil depth, ppm'), FloatVar('NI7D', 7, 2, info='NO3 at 90-110 cm soil depth, ppm'), FloatVar('NH1D', 7, 2, info='NH4 at 0-5 cm soil depth, ppm'), FloatVar('NH2D', 7, 2, info='NH4 at 5-15 cm soil depth, ppm'), FloatVar('NH3D', 7, 2, info='NH4 at 15-30 cm soil depth, ppm'), FloatVar('NH4D', 7, 2, info='NH4 at 30-45 cm soil depth, ppm'), FloatVar('NH5D', 7, 2, info='NH4 at 45-60 cm soil depth, ppm'), FloatVar('NH6D', 7, 2, info='NH4 at 60-90 cm soil depth, ppm'), FloatVar('NH7D', 7, 2, info='NH4 at 90-110 cm soil depth, ppm'), FloatVar('NMNC', 7, 0, info=''), FloatVar('NITC', 7, 0, info=''), FloatVar('NDNC', 7, 0, info=''), FloatVar('NIMC', 7, 0, info=''), FloatVar('AMLC', 7, 0, info=''), FloatVar('NNMNC', 7, 0, info=''), FloatVar('NUCM', 7, 0, info='N uptake, kg/ha'), FloatVar('NLCC', 7, 0, info='Cumulative N leached, kg/ha'), }
43.510638
72
0.604401
340
2,045
3.620588
0.285294
0.022746
0.068237
0.15922
0.448416
0.34606
0.315191
0.315191
0
0
0
0.084611
0.202445
2,045
46
73
44.456522
0.670141
0.02445
0
0
0
0
0.39717
0
0
0
0
0
0
1
0.026316
false
0
0.052632
0.026316
0.157895
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cf1edbd7a30a852f3ca1224c69d6e47997c186c3
4,888
py
Python
project/scripts/run-cooja.py
nfi/multitrace
7a043f4c3f580ca87c39f23337322b98594f3a51
[ "BSD-3-Clause" ]
4
2021-12-20T12:25:56.000Z
2022-03-23T20:39:16.000Z
project/scripts/run-cooja.py
nfi/multitrace
7a043f4c3f580ca87c39f23337322b98594f3a51
[ "BSD-3-Clause" ]
null
null
null
project/scripts/run-cooja.py
nfi/multitrace
7a043f4c3f580ca87c39f23337322b98594f3a51
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python3 import argparse import sys import os import time import traceback import subprocess from subprocess import PIPE, STDOUT, CalledProcessError # Find path to this script SELF_PATH = os.path.dirname(os.path.abspath(__file__)) # Find path to Contiki-NG relative to this script CONTIKI_PATH = os.path.dirname(os.path.dirname(SELF_PATH)) cooja_jar = os.path.normpath(os.path.join(CONTIKI_PATH, "tools", "cooja", "dist", "cooja.jar")) cooja_output = 'COOJA.testlog' cooja_log = 'COOJA.log' ####################################################### # Run a child process and get its output def _run_command(command): try: proc = subprocess.run(command, stdout=PIPE, stderr=STDOUT, shell=True, universal_newlines=True) return proc.returncode, proc.stdout if proc.stdout else '' except CalledProcessError as e: print(f"Command failed: {e}", file=sys.stderr) return e.returncode, e.stdout if e.stdout else '' except (OSError, Exception) as e: traceback.print_exc() return -1, str(e) def _remove_file(filename): try: os.remove(filename) except FileNotFoundError: pass ############################################################# # Run a single instance of Cooja on a given simulation script def run_simulation(cooja_file, output_path=None): # Remove any old simulation logs _remove_file(cooja_output) _remove_file(cooja_log) target_basename = cooja_file if target_basename.endswith('.csc.gz'): target_basename = target_basename[:-7] elif target_basename.endswith('.csc'): target_basename = target_basename[:-4] simulation_id = str(round(time.time() * 1000)) if output_path is not None: target_basename = os.path.join(output_path, target_basename) target_basename += '-dt-' + simulation_id target_basename_fail = target_basename + '-fail' target_output = target_basename + '/cooja.testlog' target_log_output = target_basename + '/cooja.log' # filename = os.path.join(SELF_PATH, cooja_file) command = (f"java -Djava.awt.headless=true -jar {cooja_jar} -nogui={cooja_file} -contiki={CONTIKI_PATH}" f" -datatrace={target_basename}") sys.stdout.write(f" Running Cooja:\n {command}\n") start_time = time.perf_counter_ns() (return_code, output) = _run_command(command) end_time = time.perf_counter_ns() with open(cooja_log, 'a') as f: f.write(f'\nSimulation execution time: {end_time - start_time} ns.\n') if not os.path.isdir(target_basename): os.mkdir(target_basename) has_cooja_output = os.path.isfile(cooja_output) if has_cooja_output: os.rename(cooja_output, target_output) os.rename(cooja_log, target_log_output) if return_code != 0 or not has_cooja_output: print(f"Failed, ret code={return_code}, output:", file=sys.stderr) print("-----", file=sys.stderr) print(output, file=sys.stderr, end='') print("-----", file=sys.stderr) if not has_cooja_output: print("No Cooja simulation script output!", file=sys.stderr) os.rename(target_basename, target_basename_fail) return False print(" Checking for output...") is_done = False with open(target_output, "r") as f: for line in f.readlines(): line = line.strip() if line == "TEST OK": is_done = True continue if not is_done: print(" test failed.") os.rename(target_basename, target_basename_fail) return False print(f" test done in {round((end_time - start_time) / 1000000)} milliseconds.") return True ####################################################### # Run the application def main(parser=None): if not os.access(cooja_jar, os.R_OK): sys.exit(f'The file "{cooja_jar}" does not exist, did you build Cooja?') if not parser: parser = argparse.ArgumentParser() parser.add_argument('-o', dest='output_path') parser.add_argument('input', nargs='+') try: conopts = parser.parse_args(sys.argv[1:]) except Exception as e: sys.exit(f"Illegal arguments: {e}") if conopts.output_path and not os.path.isdir(conopts.output_path): os.mkdir(conopts.output_path) for simulation_file in conopts.input: if not os.access(simulation_file, os.R_OK): print(f'Can not read simulation script "{simulation_file}"', file=sys.stderr) sys.exit(1) print(f'Running simulation "{simulation_file}"') if not run_simulation(simulation_file, conopts.output_path): sys.exit(f'Failed to run simulation "{simulation_file}"') print('Done. No more simulation files specified.') ####################################################### if __name__ == '__main__': main()
33.479452
108
0.63748
635
4,888
4.711811
0.264567
0.098262
0.030414
0.046791
0.081551
0.052807
0.037433
0.037433
0.037433
0.037433
0
0.004639
0.206219
4,888
145
109
33.710345
0.766495
0.059534
0
0.088235
0
0.009804
0.18336
0.01719
0
0
0
0
0
1
0.039216
false
0.009804
0.068627
0
0.166667
0.127451
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cf1f3cd2308c871fbca4d806dda3a4b0a43ddbe0
711
py
Python
Recent Excel Documents.lbaction/Contents/Scripts/default.py
nriley/LBOfficeMRU
e2df583cdb32a066f3ab002d4182fa40759839a6
[ "Apache-2.0" ]
13
2016-08-21T12:18:42.000Z
2022-02-01T22:03:45.000Z
Recent Excel Documents.lbaction/Contents/Scripts/default.py
nriley/LBOfficeMRU
e2df583cdb32a066f3ab002d4182fa40759839a6
[ "Apache-2.0" ]
1
2017-02-11T10:46:12.000Z
2017-03-31T04:20:01.000Z
Recent Excel Documents.lbaction/Contents/Scripts/default.py
nriley/LBOfficeMRU
e2df583cdb32a066f3ab002d4182fa40759839a6
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 import json, operator import mruservice, mruuserdata APP_NAME = 'Excel' APP_BUNDLE_ID = 'com.microsoft.Excel' APP_URL_PREFIX = 'ms-excel:ofe|u|' EXTENSION_TO_ICON_NAME = dict( slk='XLS8', dif='XLS8', ods='ODS', xls='XLS8', xlsx='XLSX', xltx='XLTX', xlsm='XLSM', xltm='XLTM', xlsb='XLSB', xlam='XLAM', xlw='XLW8', xla='XLA8', xlb='XLB8', xlt='XLT', xld='XLD5', xlm='XLM4', xll='XLL', csv='CSV', txt='TEXT', xml='XMLS', tlb='OTLB', _='TEXT') items = mruuserdata.items_for_app(APP_NAME) items += mruservice.items_for_app(APP_NAME, APP_BUNDLE_ID, APP_URL_PREFIX, EXTENSION_TO_ICON_NAME) items.sort(key=operator.itemgetter('Timestamp'), reverse=True) print(json.dumps(items))
35.55
98
0.703235
110
711
4.345455
0.590909
0.043933
0.046025
0.079498
0.075314
0
0
0
0
0
0
0.014063
0.099859
711
19
99
37.421053
0.732813
0.029536
0
0
0
0
0.191582
0
0
0
0
0
0
1
0
false
0
0.153846
0
0.153846
0.076923
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cf1f9a3ecc8549d804dcf2f5aef38297dc7945b8
2,458
py
Python
3sum_medium.py
victorsemenov1980/LeetCodeDailyFun
f66273a9868ede5e2337f586e21eaf9e771b9b48
[ "MIT" ]
null
null
null
3sum_medium.py
victorsemenov1980/LeetCodeDailyFun
f66273a9868ede5e2337f586e21eaf9e771b9b48
[ "MIT" ]
null
null
null
3sum_medium.py
victorsemenov1980/LeetCodeDailyFun
f66273a9868ede5e2337f586e21eaf9e771b9b48
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat May 22 12:03:16 2021 @author: user """ ''' Given an integer array nums, return all the triplets [nums[i], nums[j], nums[k]] such that i != j, i != k, and j != k, and nums[i] + nums[j] + nums[k] == 0. Notice that the solution set must not contain duplicate triplets. Example 1: Input: nums = [-1,0,1,2,-1,-4] Output: [[-1,-1,2],[-1,0,1]] Example 2: Input: nums = [] Output: [] Example 3: Input: nums = [0] Output: [] Constraints: 0 <= nums.length <= 3000 -105 <= nums[i] <= 105 Accepted 1,304,501 Submissions 4,576,232 ''' ''' Slow brutforce ''' class Solution: def threeSum(self, nums): if len(nums)<3: return [] else: out=[] import itertools indices=[x for x in range(0,len(nums))] combs=list(itertools.combinations(indices, 3)) for i in combs: summ=[] for j in i: summ.append(nums[j]) if sum(summ)==0 and sorted(summ) not in out: out.append(sorted(summ)) return out y=Solution() nums = [-1,0,1,2,-1,-4] print(y.threeSum(nums)) nums = [0,0,0] print(y.threeSum(nums)) # nums = [0] # print(y.threeSum(nums)) ''' Faster ''' class Solution: def threeSum(self, nums): if len(nums)<3: return [] else: out=[] indices = {} nums=sorted(nums) for key ,value in enumerate(nums): indices[value]=key for first_ind,first_num in enumerate(nums): if first_num>0:#no reason to continue break else: for second_ind,second_num in enumerate(nums[first_ind+1:]): zero=-(first_num+second_num) if zero in indices.keys() and indices[zero]>first_ind+second_ind+1: temp=sorted([zero,first_num,second_num]) if temp not in out: out.append(temp) return out y=Solution() nums = [-1,0,1,2,-1,-4] print(y.threeSum(nums)) # nums = [0,0,0] # print(y.threeSum(nums)) # nums = [0] # print(y.threeSum(nums))
22.550459
156
0.47559
311
2,458
3.720257
0.321543
0.031115
0.072602
0.093345
0.361279
0.331893
0.266206
0.257563
0.257563
0.257563
0
0.055814
0.387714
2,458
108
157
22.759259
0.712957
0.091131
0
0.488889
0
0
0
0
0
0
0
0
0
1
0.044444
false
0
0.022222
0
0.2
0.066667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cf2003bb501336b11e7299124a046e9392cff5a7
799
py
Python
trader/lib/definitions.py
ajhenri/trading-simulator
90738710d50a89be553910bc5d1e0fff80e3c6a1
[ "MIT" ]
null
null
null
trader/lib/definitions.py
ajhenri/trading-simulator
90738710d50a89be553910bc5d1e0fff80e3c6a1
[ "MIT" ]
4
2020-10-12T22:49:02.000Z
2022-02-27T10:23:56.000Z
trader/lib/definitions.py
ajhenri/trading-simulator
90738710d50a89be553910bc5d1e0fff80e3c6a1
[ "MIT" ]
null
null
null
class ResponseErrors: DEFAULT = 'An unexpected error occurred while processing this request' INVALID_JSON = 'Unable to parse JSON from the request body' INVALID_LOGIN = 'The username/password you specified is invalid' USER_DNE = 'User does not exist' ACCOUNT_DNE = 'Account does not exist' ACCOUNT_NO_ACCESS = 'Invalid account' ACCOUNT_EXISTS = 'Account already exists for this user' ACCOUNT_INVALID_ACTION = 'Invalid action for account' ACCOUNT_INSUFFICIENTFUNDS = 'Insufficient funds' STOCK_DNE = 'Stock does not exist' STOCK_EXISTS = 'Stock already exists' STOCK_DATA_UNAVAILABLE = 'Stock data is currently unavailable' NOT_ENOUGH_FUNDS = 'Not enough funds to make this trade' TOO_MANY_SHARES = 'Shares passed is greater than what is owned'
53.266667
74
0.749687
106
799
5.481132
0.518868
0.036145
0.061962
0.065404
0
0
0
0
0
0
0
0
0.195244
799
15
75
53.266667
0.903577
0
0
0
0
0
0.54375
0
0
0
0
0
0
1
0
false
0.133333
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
2
cf20204aba78a60c893f6561c24e36c3ce30077f
651
py
Python
tests/test_loss.py
MartinXPN/abcde
13192c5f7dfb32a461b9205aed4b0b21e79d8285
[ "MIT" ]
4
2021-01-20T09:15:37.000Z
2022-03-03T13:58:18.000Z
tests/test_loss.py
MartinXPN/abcde
13192c5f7dfb32a461b9205aed4b0b21e79d8285
[ "MIT" ]
null
null
null
tests/test_loss.py
MartinXPN/abcde
13192c5f7dfb32a461b9205aed4b0b21e79d8285
[ "MIT" ]
null
null
null
from unittest import TestCase from torch import Tensor from abcde.loss import PairwiseRankingCrossEntropyLoss class TestPairwiseRankingLoss(TestCase): def test_simple_case(self): loss = PairwiseRankingCrossEntropyLoss() res = loss(pred_betweenness=Tensor([[0.5], [0.7], [3]]), target_betweenness=Tensor([[0.2], [1], [2]]), src_ids=Tensor([0, 1, 2, 2, 1, 0, 1, 2, 2, 1, 0, 1, 2, 2, 1, ]).long(), targ_ids=Tensor([1, 0, 0, 1, 2, 1, 0, 0, 1, 2, 1, 0, 0, 1, 2, ]).long()) # This number is taken from the tensorflow implementation self.assertAlmostEqual(res, 0.636405362070762)
40.6875
110
0.623656
91
651
4.395604
0.428571
0.035
0.045
0.03
0.075
0.075
0.075
0.075
0.075
0.075
0
0.109344
0.227343
651
15
111
43.4
0.685885
0.084485
0
0
0
0
0
0
0
0
0
0
0.1
1
0.1
false
0
0.3
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cf21edee740920f2ebaec338a4fef6781808e844
1,088
py
Python
datasets/mvda_peas_raw/__init__.py
ryuzakyl/data-bloodhound
ae0413e748e55a0d2dbae35bbe96a672f313a64b
[ "Apache-2.0" ]
3
2019-03-18T03:22:06.000Z
2021-04-06T07:53:51.000Z
datasets/mvda_peas_raw/__init__.py
ryuzakyl/data-bloodhound
ae0413e748e55a0d2dbae35bbe96a672f313a64b
[ "Apache-2.0" ]
null
null
null
datasets/mvda_peas_raw/__init__.py
ryuzakyl/data-bloodhound
ae0413e748e55a0d2dbae35bbe96a672f313a64b
[ "Apache-2.0" ]
2
2020-10-05T08:22:25.000Z
2020-10-05T08:24:02.000Z
#!/usr/bin/env # -*- coding: utf-8 -*- # Copyright (C) Victor M. Mendiola Lau - All Rights Reserved # Unauthorized copying of this file, via any medium is strictly prohibited # Proprietary and confidential # Written by Victor M. Mendiola Lau <ryuzakyl@gmail.com>, January 2017 import os import scipy.io as sio import utils.datasets as utils # --------------------------------------------------------------- # data set paths __data_set_path = "{}/data/peasraw-dataset.mat".format(os.path.split(__file__)[0]) __pickle_path = "{}/cache/mvda_peas_raw.pickle".format(os.path.split(__file__)[0]) # --------------------------------------------------------------- # TODO: Add docstring with usage examples (see 'uv_fuel' data set) @utils.load_data_from_pickle(__pickle_path) def load_mvda_peas_raw(): # loading matlab data set raw_data = sio.loadmat(__data_set_path) features_labels = raw_data['var_labels_all'] data = raw_data['data_all'] samples_labels = list(range(1, data.shape[0] + 1)) return utils.build_data_set(data, samples_labels, features_labels)
28.631579
82
0.649816
146
1,088
4.547945
0.575342
0.063253
0.045181
0.054217
0.066265
0.066265
0
0
0
0
0
0.010582
0.131434
1,088
37
83
29.405405
0.692063
0.456801
0
0
0
0
0.134483
0.096552
0
0
0
0.027027
0
1
0.083333
false
0
0.25
0
0.416667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
cf2226ec55301df910a7704bea76af8d31c1db00
472
py
Python
AULAS/Caixa de entrada/selectionSortEu.py
junioralecrim/Python
a7f716e0e2b3d17ea20c76987788831e102c4765
[ "MIT" ]
null
null
null
AULAS/Caixa de entrada/selectionSortEu.py
junioralecrim/Python
a7f716e0e2b3d17ea20c76987788831e102c4765
[ "MIT" ]
null
null
null
AULAS/Caixa de entrada/selectionSortEu.py
junioralecrim/Python
a7f716e0e2b3d17ea20c76987788831e102c4765
[ "MIT" ]
null
null
null
#selection sort #Passo a passo: # 1° Descobrir o menor item da lista # 2° colocar dentro de uma função #Rascunho do algoritimo de ordenação para mudar as posições # if (lista[2] < minimo): # aux = lista[0] # minimo = lista[2] # lista[2] = aux lista = [7, 5, 1, 3, 8] n = len(lista) minimo = lista[0] for i in range(n): if(lista[i]< minimo): minimo = lista[i] for j in range(n): print(lista)
17.481481
59
0.552966
73
472
3.60274
0.575342
0.091255
0.060837
0
0
0
0
0
0
0
0
0.037736
0.326271
472
26
60
18.153846
0.783019
0.557203
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0.125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
cf248937a1f0ddbbed80fc10763c1ab5067ed373
380
py
Python
tests/test_ffwd_send.py
smstone/ffwd-client-python
9a904ea9eecea3ab0a9c90160910bba2600d48f0
[ "Apache-2.0" ]
null
null
null
tests/test_ffwd_send.py
smstone/ffwd-client-python
9a904ea9eecea3ab0a9c90160910bba2600d48f0
[ "Apache-2.0" ]
null
null
null
tests/test_ffwd_send.py
smstone/ffwd-client-python
9a904ea9eecea3ab0a9c90160910bba2600d48f0
[ "Apache-2.0" ]
null
null
null
import argparse import unittest from ffwd.ffwd_send import tag_type class TestFFWDSend(unittest.TestCase): def test_tag_type(self): self.assertEquals(('hello', 'world'), tag_type("hello:world")) self.assertEquals(('hello', 'world:two'), tag_type("hello:world:two")) with self.assertRaises(argparse.ArgumentTypeError): tag_type('hello')
27.142857
78
0.694737
46
380
5.586957
0.456522
0.136187
0.140078
0.202335
0
0
0
0
0
0
0
0
0.171053
380
13
79
29.230769
0.815873
0
0
0
0
0
0.144737
0
0
0
0
0
0.333333
1
0.111111
false
0
0.333333
0
0.555556
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
cf252db5b4b95b5dc23031fed96cc1a89f8cd949
2,475
py
Python
openGaussBase/testcase/SECURITY/MODIFY/Opengauss_Function_Security_Mgt_Manamegent_Case0001.py
opengauss-mirror/Yat
aef107a8304b94e5d99b4f1f36eb46755eb8919e
[ "MulanPSL-1.0" ]
null
null
null
openGaussBase/testcase/SECURITY/MODIFY/Opengauss_Function_Security_Mgt_Manamegent_Case0001.py
opengauss-mirror/Yat
aef107a8304b94e5d99b4f1f36eb46755eb8919e
[ "MulanPSL-1.0" ]
null
null
null
openGaussBase/testcase/SECURITY/MODIFY/Opengauss_Function_Security_Mgt_Manamegent_Case0001.py
opengauss-mirror/Yat
aef107a8304b94e5d99b4f1f36eb46755eb8919e
[ "MulanPSL-1.0" ]
null
null
null
""" Copyright (c) 2022 Huawei Technologies Co.,Ltd. openGauss is licensed under Mulan PSL v2. You can use this software according to the terms and conditions of the Mulan PSL v2. You may obtain a copy of Mulan PSL v2 at: http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. """ """ Case Type : 防篡改 Case Name : 验证历史记录数可配置 Description : 1.查询参数password_reuse_max值 2.修改password_reuse_max参数值为3 3.恢复默认值 Expect : 1.显示默认值0 2.显示设置后的值3 3.默认值恢复成功 History : """ import os import unittest from testcase.utils.Constant import Constant from testcase.utils.Common import Common from testcase.utils.CommonSH import CommonSH from testcase.utils.Logger import Logger class ModifyCase(unittest.TestCase): def setUp(self): self.logger = Logger() self.logger.info(f'-----{os.path.basename(__file__)} start-----') self.primary_sh = CommonSH('PrimaryDbUser') self.Constant = Constant() self.common = Common() self.default_value = self.common.show_param('password_reuse_max') def test_security(self): text = '----step1:查询参数password_reuse_max值; expect:默认值0----' self.logger.info(text) show_para = self.default_value self.logger.info(show_para) self.assertEqual("0", show_para, "执行失败:" + text) text = '----step2:修改password_reuse_max参数值为3 expect:成功----' self.logger.info(text) sql_cmd = self.primary_sh.execut_db_sql(f''' alter system set password_reuse_max to 3; select pg_sleep(2); show password_reuse_max;''') self.logger.info(sql_cmd) self.assertEqual("3", sql_cmd.split("\n")[-2].strip(), "执行失败:" + text) def tearDown(self): text = '----step3:恢复默认值 expect:成功----' self.logger.info(text) sql_cmd = self.primary_sh.execut_db_sql(f''' alter system set password_reuse_max to {self.default_value}; select pg_sleep(2); show password_reuse_max;''') self.logger.info(sql_cmd) self.assertEqual("0", sql_cmd.split("\n")[-2].strip(), "执行失败:" + text) self.logger.info(f'-----{os.path.basename(__file__)} end-----')
33
84
0.645657
327
2,475
4.737003
0.431193
0.058102
0.072305
0.034861
0.280181
0.280181
0.280181
0.280181
0.204003
0.204003
0
0.016367
0.234747
2,475
74
85
33.445946
0.801478
0.205253
0
0.325
0
0
0.304571
0.089143
0
0
0
0
0.075
1
0.075
false
0.175
0.15
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
cf259671523b5fd8d5e44da5a9826f127e7fbc84
162
py
Python
big-meme/src/model/meme.py
neovasili/101_serverless_workshop
a005ab4af620c3c1a522aab8d201378ea7840ab5
[ "MIT" ]
4
2019-11-13T17:58:15.000Z
2020-03-12T12:24:10.000Z
big-meme/src/model/meme.py
neovasili/101_serverless_workshop
a005ab4af620c3c1a522aab8d201378ea7840ab5
[ "MIT" ]
null
null
null
big-meme/src/model/meme.py
neovasili/101_serverless_workshop
a005ab4af620c3c1a522aab8d201378ea7840ab5
[ "MIT" ]
null
null
null
class Meme( object ): def __init__( self, name, image, themes ): self.__name = name self.__image = image self.__themes = themes
20.25
46
0.574074
18
162
4.611111
0.5
0.192771
0
0
0
0
0
0
0
0
0
0
0.333333
162
8
47
20.25
0.768519
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0
0
0.4
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
cf27715d55b617221f21406b94ab34e0ac04baac
5,981
py
Python
lib/surface/compute/instance_groups/managed/abandon_instances.py
eyalev/gcloud
421ee63a0a6d90a097e8530d53a6df5b905a0205
[ "Apache-2.0" ]
null
null
null
lib/surface/compute/instance_groups/managed/abandon_instances.py
eyalev/gcloud
421ee63a0a6d90a097e8530d53a6df5b905a0205
[ "Apache-2.0" ]
null
null
null
lib/surface/compute/instance_groups/managed/abandon_instances.py
eyalev/gcloud
421ee63a0a6d90a097e8530d53a6df5b905a0205
[ "Apache-2.0" ]
2
2020-11-04T03:08:21.000Z
2020-11-05T08:14:41.000Z
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Command for abandoning instances owned by a managed instance group.""" from googlecloudsdk.api_lib.compute import base_classes from googlecloudsdk.api_lib.compute import instance_groups_utils from googlecloudsdk.calliope import arg_parsers from googlecloudsdk.calliope import base from googlecloudsdk.command_lib.compute import flags def _AddArgs(parser, multizonal): """Adds args.""" parser.add_argument('name', help='The managed instance group name.') parser.add_argument( '--instances', type=arg_parsers.ArgList(min_length=1), action=arg_parsers.FloatingListValuesCatcher(), metavar='INSTANCE', required=True, help='Names of instances to abandon.') if multizonal: scope_parser = parser.add_mutually_exclusive_group() flags.AddRegionFlag( scope_parser, resource_type='instance group', operation_type='abandon instances', explanation=flags.REGION_PROPERTY_EXPLANATION_NO_DEFAULT) flags.AddZoneFlag( scope_parser, resource_type='instance group manager', operation_type='abandon instances', explanation=flags.ZONE_PROPERTY_EXPLANATION_NO_DEFAULT) else: flags.AddZoneFlag( parser, resource_type='instance group manager', operation_type='abandon instances') @base.ReleaseTracks(base.ReleaseTrack.GA, base.ReleaseTrack.BETA) class AbandonInstances(base_classes.BaseAsyncMutator): """Abandon instances owned by a managed instance group.""" @staticmethod def Args(parser): _AddArgs(parser=parser, multizonal=False) @property def method(self): return 'AbandonInstances' @property def service(self): return self.compute.instanceGroupManagers @property def resource_type(self): return 'instanceGroupManagers' def CreateRequests(self, args): zone_ref = self.CreateZonalReference(args.name, args.zone) instance_refs = self.CreateZonalReferences( args.instances, zone_ref.zone, resource_type='instances') instances = [instance_ref.SelfLink() for instance_ref in instance_refs] return [(self.method, self.messages.ComputeInstanceGroupManagersAbandonInstancesRequest( instanceGroupManager=zone_ref.Name(), instanceGroupManagersAbandonInstancesRequest=( self.messages.InstanceGroupManagersAbandonInstancesRequest( instances=instances, ) ), project=self.project, zone=zone_ref.zone, ),),] @base.ReleaseTracks(base.ReleaseTrack.ALPHA) class AbandonInstancesAlpha(base_classes.BaseAsyncMutator, instance_groups_utils.InstancesReferenceMixin): """Abandon instances owned by a managed instance group.""" @staticmethod def Args(parser): _AddArgs(parser=parser, multizonal=True) @property def method(self): return 'AbandonInstances' @property def service(self): return self.compute.instanceGroupManagers @property def resource_type(self): return 'instanceGroupManagers' def CreateRequests(self, args): errors = [] group_ref = instance_groups_utils.CreateInstanceGroupReference( scope_prompter=self, compute=self.compute, resources=self.resources, name=args.name, region=args.region, zone=args.zone) instances = self.CreateInstanceReferences( group_ref, args.instances, errors) if group_ref.Collection() == 'compute.instanceGroupManagers': service = self.compute.instanceGroupManagers request = ( self.messages. ComputeInstanceGroupManagersAbandonInstancesRequest( instanceGroupManager=group_ref.Name(), instanceGroupManagersAbandonInstancesRequest=( self.messages.InstanceGroupManagersAbandonInstancesRequest( instances=instances, ) ), project=self.project, zone=group_ref.zone, )) else: service = self.compute.regionInstanceGroupManagers request = ( self.messages. ComputeRegionInstanceGroupManagersAbandonInstancesRequest( instanceGroupManager=group_ref.Name(), regionInstanceGroupManagersAbandonInstancesRequest=( self.messages. RegionInstanceGroupManagersAbandonInstancesRequest( instances=instances, ) ), project=self.project, region=group_ref.region, )) return [(service, self.method, request)] AbandonInstances.detailed_help = { 'brief': 'Abandon instances owned by a managed instance group.', 'DESCRIPTION': """ *{command}* abandons one or more instances from a managed instance group, thereby reducing the targetSize of the group. Once instances have been abandoned, the currentSize of the group is automatically reduced as well to reflect the change. Abandoning an instance does not delete the underlying virtual machine instances, but just removes the instances from the instance group. If you would like the delete the underlying instances, use the delete-instances command instead. """, } AbandonInstancesAlpha.detailed_help = AbandonInstances.detailed_help
35.390533
80
0.69587
590
5,981
6.947458
0.325424
0.031715
0.029275
0.025616
0.312515
0.303733
0.260795
0.251769
0.241034
0.241034
0
0.00195
0.22839
5,981
168
81
35.60119
0.886241
0.126066
0
0.434109
0
0
0.165383
0.01367
0
0
0
0
0
1
0.085271
false
0
0.03876
0.046512
0.20155
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cf289c374ee47f4952cddf28f571e3c1c464ba43
1,185
py
Python
day:22/isBinaryTreeSymmetric.py
hawaijar/FireLeetcode
e981e96f6a38a3b08e9b7ef59aec65f6e0e5728a
[ "MIT" ]
1
2020-10-21T12:28:23.000Z
2020-10-21T12:28:23.000Z
day:22/isBinaryTreeSymmetric.py
hawaijar/FireLeetcode
e981e96f6a38a3b08e9b7ef59aec65f6e0e5728a
[ "MIT" ]
null
null
null
day:22/isBinaryTreeSymmetric.py
hawaijar/FireLeetcode
e981e96f6a38a3b08e9b7ef59aec65f6e0e5728a
[ "MIT" ]
1
2020-10-21T12:28:24.000Z
2020-10-21T12:28:24.000Z
# Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right class Solution: def isSymmetric(self, root: TreeNode) -> bool: #base case(s) if(root is None): return True; if(root.left is None and root.right is None): return True; q = [root]; while(len(q) > 0): list = []; qq = []; while(len(q) > 0): temp = q.pop(0); if(temp is None): list.append('null'); else: list.append(temp.val); qq.append(temp.left); qq.append(temp.right); if(self.isPalindrome(list) is False): return False; q = qq; return True; def isPalindrome(self, list): [i , j] = [0, len(list) - 1]; while(i < j): if(list[i] == list[j]): i += 1; j -= 1; continue; else: return False; return True;
28.902439
66
0.420253
131
1,185
3.770992
0.335878
0.048583
0.048583
0.064777
0
0
0
0
0
0
0
0.012422
0.45654
1,185
40
67
29.625
0.754658
0.161181
0
0.206897
0
0
0.004057
0
0
0
0
0
0
1
0.068966
false
0
0
0
0.206897
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0