hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
4dd256c4f089e9ad67a06acbe8f272e2a293bfe0
94
py
Python
release/stubs.min/Tekla/Structures/ModelInternal_parts/dotPolymeshValidateInvalidInfo_t.py
htlcnn/ironpython-stubs
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
[ "MIT" ]
182
2017-06-27T02:26:15.000Z
2022-03-30T18:53:43.000Z
release/stubs.min/Tekla/Structures/ModelInternal_parts/dotPolymeshValidateInvalidInfo_t.py
htlcnn/ironpython-stubs
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
[ "MIT" ]
28
2017-06-27T13:38:23.000Z
2022-03-15T11:19:44.000Z
release/stubs.min/Tekla/Structures/ModelInternal_parts/dotPolymeshValidateInvalidInfo_t.py
htlcnn/ironpython-stubs
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
[ "MIT" ]
67
2017-06-28T09:43:59.000Z
2022-03-20T21:17:10.000Z
class dotPolymeshValidateInvalidInfo_t(object): # no doc ClientId=None nInvalidFaces=None
15.666667
47
0.819149
10
94
7.6
0.9
0
0
0
0
0
0
0
0
0
0
0
0.117021
94
5
48
18.8
0.915663
0.06383
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
4de41394c9e4f0e77736d64e08a2a321e52d6fba
2,276
py
Python
test/test_data_processing_agreements_api.py
My-Data-My-Consent/python-sdk
414640bcda6350e6f5e74e42442737eb8d5b7447
[ "Apache-2.0" ]
null
null
null
test/test_data_processing_agreements_api.py
My-Data-My-Consent/python-sdk
414640bcda6350e6f5e74e42442737eb8d5b7447
[ "Apache-2.0" ]
5
2021-12-19T10:29:43.000Z
2022-03-31T22:15:37.000Z
test/test_data_processing_agreements_api.py
mydatamyconsent/python-sdk
414640bcda6350e6f5e74e42442737eb8d5b7447
[ "Apache-2.0" ]
null
null
null
""" My Data My Consent - Developer API Unleashing the power of data consent by establishing trust. The Platform Core Developer API defines a set of capabilities that can be used to request, issue, manage and update data, documents and credentials by organizations. The API can be used to request, manage and update Decentralised Identifiers, Financial Data, Health Data issue Documents, Credentials directly or using OpenID Connect flows, and verify Messages signed with DIDs and much more. # noqa: E501 The version of the OpenAPI document: v1 Contact: support@mydatamyconsent.com Generated by: https://openapi-generator.tech """ import unittest import com.mydatamyconsent from com.mydatamyconsent.api.data_processing_agreements_api import DataProcessingAgreementsApi # noqa: E501 class TestDataProcessingAgreementsApi(unittest.TestCase): """DataProcessingAgreementsApi unit test stubs""" def setUp(self): self.api = DataProcessingAgreementsApi() # noqa: E501 def tearDown(self): pass def test_v1_data_agreements_get(self): """Test case for v1_data_agreements_get Get all data processing agreements. # noqa: E501 """ pass def test_v1_data_agreements_id_delete(self): """Test case for v1_data_agreements_id_delete Delete a data processing agreement. This will not delete a published or a agreement in use with consents. # noqa: E501 """ pass def test_v1_data_agreements_id_get(self): """Test case for v1_data_agreements_id_get Get data processing agreement by Id. # noqa: E501 """ pass def test_v1_data_agreements_id_put(self): """Test case for v1_data_agreements_id_put Update a data processing agreement. # noqa: E501 """ pass def test_v1_data_agreements_id_terminate_put(self): """Test case for v1_data_agreements_id_terminate_put Terminate a data processing agreement. # noqa: E501 """ pass def test_v1_data_agreements_post(self): """Test case for v1_data_agreements_post Create a data processing agreement. # noqa: E501 """ pass if __name__ == '__main__': unittest.main()
31.611111
469
0.703427
292
2,276
5.267123
0.342466
0.046814
0.124837
0.093628
0.373862
0.338752
0.307542
0.263979
0.197009
0.076723
0
0.022989
0.235501
2,276
71
470
32.056338
0.86092
0.579525
0
0.318182
0
0
0.010114
0
0
0
0
0
0
1
0.363636
false
0.318182
0.136364
0
0.545455
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
3
4de5fa0a4d2f2388e56a4b71e084e4c4d1f90880
145
py
Python
tmrwppk/__init__.py
hobakill/tmrwppk
956cf034b93ba92f67c1bd173bbd6da4416f011e
[ "MIT" ]
null
null
null
tmrwppk/__init__.py
hobakill/tmrwppk
956cf034b93ba92f67c1bd173bbd6da4416f011e
[ "MIT" ]
1
2021-11-15T17:49:13.000Z
2021-11-15T17:49:13.000Z
tmrwppk/__init__.py
hobakill/tmrwppk
956cf034b93ba92f67c1bd173bbd6da4416f011e
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """Top-level package for tmrwppk.""" __author__ = """Nick Hobart""" __email__ = 'nick@hobart.io' __version__ = '0.0.6'
18.125
36
0.62069
19
145
4.105263
0.842105
0.25641
0
0
0
0
0
0
0
0
0
0.032258
0.144828
145
7
37
20.714286
0.596774
0.365517
0
0
0
0
0.348837
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
4df9601e57fc2bd477bc589b9111b33d0c20824d
1,318
py
Python
grr/test/grr_response_test/api_regression_test_generate.py
tsehori/grr
048506f22f74642bfe61749069a45ddf496fdab3
[ "Apache-2.0" ]
1
2021-07-24T17:22:50.000Z
2021-07-24T17:22:50.000Z
grr/test/grr_response_test/api_regression_test_generate.py
tsehori/grr
048506f22f74642bfe61749069a45ddf496fdab3
[ "Apache-2.0" ]
44
2021-05-14T22:49:24.000Z
2022-03-13T21:54:02.000Z
grr/test/grr_response_test/api_regression_test_generate.py
tsehori/grr
048506f22f74642bfe61749069a45ddf496fdab3
[ "Apache-2.0" ]
2
2022-02-25T08:34:51.000Z
2022-03-16T17:29:44.000Z
#!/usr/bin/env python """Program that generates golden regression data.""" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from absl import app from grr_response_server.gui import api_regression_test_lib # pylint: disable=unused-import from grr_response_server.gui.api_plugins import artifact_regression_test from grr_response_server.gui.api_plugins import client_regression_test from grr_response_server.gui.api_plugins import config_regression_test from grr_response_server.gui.api_plugins import cron_regression_test from grr_response_server.gui.api_plugins import flow_regression_test from grr_response_server.gui.api_plugins import hunt_regression_test from grr_response_server.gui.api_plugins import output_plugin_regression_test from grr_response_server.gui.api_plugins import reflection_regression_test from grr_response_server.gui.api_plugins import stats_regression_test from grr_response_server.gui.api_plugins import user_regression_test from grr_response_server.gui.api_plugins import vfs_regression_test # pylint: enable=unused-import def main(argv): """Entry function.""" api_regression_test_lib.main(argv) def DistEntry(): """The main entry point for packages.""" app.run(main) if __name__ == "__main__": app.run(main)
33.794872
77
0.852807
196
1,318
5.295918
0.285714
0.175337
0.17341
0.242775
0.581888
0.558767
0.558767
0.558767
0.520231
0.520231
0
0
0.088771
1,318
38
78
34.684211
0.86428
0.134294
0
0.090909
1
0
0.007124
0
0
0
0
0
0
1
0.090909
false
0
0.727273
0
0.818182
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
151cbd3eae8197b33fcc0379a280059a09b0bfc2
4,763
py
Python
docs/code/p3/ntumc.py
bond-lab/Language-and-the-Computer
58d808cdbe6873256f1d0fa091ebc5d909f211bb
[ "CC-BY-4.0" ]
null
null
null
docs/code/p3/ntumc.py
bond-lab/Language-and-the-Computer
58d808cdbe6873256f1d0fa091ebc5d909f211bb
[ "CC-BY-4.0" ]
null
null
null
docs/code/p3/ntumc.py
bond-lab/Language-and-the-Computer
58d808cdbe6873256f1d0fa091ebc5d909f211bb
[ "CC-BY-4.0" ]
null
null
null
from collections import defaultdict as dd import sqlite3 import json corpusdb='/home/bond/dbs/2020-hg2002/final/eng.db' #corpusdb='eng.good.db' conn = sqlite3.connect(corpusdb) c = conn.cursor() def sents(did=440): """Return the sentences for a given docid (default 440)""" stype = dd(str) c.execute("""SELECT sid, sent, comment FROM sent WHERE docid=? ORDER BY sid""", (did,)) sents = list() for (sid, sent, comment) in c: sid = int(sid) if sid in (10000,): stype[sid]='h1' elif sid in (10001, 10006, 10008, 10010, 10011, 10017, 10018, 10021, 10027, 10028, 10029, 10034, 10037, 10038, 10040, 10041, 10045, 10053, 10054, 10060, 10064, 10065, 10066, 10068, 10074, 10080, 10084, 10087, 10090, 10091, 10096, 10097, 10098, 10104, 10105, 10109, 10110, 10111, 10112, 10114, 10118, 10120, 10122, 10123, 10125, 10127, 10128, 10129, 10131, 10133, 10155, 10157, 10159, 10160, 10162, 10165, 10171, 10172, 10173, 10174, 10175, 10176, 10177, 10178, 10180, 10181, 10182, 10190, 10192, 10193, 10195, 10196, 10198, 10199, 10201, 10202, 10206, 10209, 10211, 10212, 10214, 10216, 10219, 10223, 10224, 10225, 10226, 10227, 10228, 10229, 10230, 10231, 10232, 10233, 10237, 10241, 10242, 10243, 10244, 10246, 10250, 10251, 10252, 10253, 10257, 10258, 10259, 10260, 10261, 10264, 10271, 10272, 10275, 10278, 10280, 10290, 10296, 10297, 10299, 10300, 10301, 10302, 10304, 10306, 10307, 10311, 10315, 10317, 10318, 10319, 10321, 10325, 10330, 10331, 10333, 10335, 10337, 10341, 10343, 10345, 10351, 10358, 10359, 10360, 10361, 10362, 10363, 10365, 10371, 10372, 10373, 10376, 10378, 10381, 10382, 10383, 10384, 10386, 10389, 10390, 10391, 10393, 10395, 10396, 10398, 10400, 10402, 10406, 10410, 10412, 10413, 10415, 10419, 10421, 10422, 10423, 10425, 10426, 10427, 10428, 10431, 10432, 10434, 10435, 10438, 10439, 10440, 10441, 10442, 10443, 10444, 10445, 10446, 10447, 10451, 10456, 10458, 10459, 10460, 10461, 10462, 10464, 10466, 10467, 10468, 10470, 10471, 10472, 10477, 10478, 10481, 10482, 10483, 10484, 10486, 10487, 10489, 10491, 10497, 10499, 10500, 10502, 10504, 10505, 10508, 10510, 10516, 10517, 10519, 10520, 10523, 10524, 10527, 10534, 10540, 10542, 10545, 10549, 10550, 10553, 10556, 10563, 10565, 10567, 10571, 10572, 10575, 10589, 10595, 10596): stype[sid]='p' sents.append((sid, sent, stype[sid], comment)) return sents def tagged_words(did=440): """Return the sentences for a given docid (default 440) list of (sid, wid, word, pos, lemma, cid, clemma, tag, comment)""" ### get concept word link c.execute("""SELECT cwl.sid, cwl.wid, cwl.cid FROM (SELECT sid FROM sent WHERE docid=? ORDER BY sid) as sent JOIN cwl on sent.sid=cwl.sid""", (did,)) clink = dd(lambda: dd(list)) for (sid, wid, cid) in c: clink[sid][wid].append(cid) c.execute("""SELECT sent.sid, cid, clemma, tag, comment FROM (SELECT sid FROM sent WHERE docid=? ORDER BY sid) as sent JOIN concept on sent.sid=concept.sid""", (did,)) concept = dd(lambda: dd()) for (sid, cid, clemma, tag, comment) in c: if tag and tag not in ['x', 'w', 'e']: ### check 'w' and 'e' later ##print(sid, cid, clemma, tag, comment) concept[sid][cid] = (clemma, tag, comment) c.execute("""SELECT sent.sid, cid, score FROM (SELECT sid FROM sent WHERE docid=? ORDER BY sid) as sent JOIN sentiment on sent.sid=sentiment.sid""", (did,)) sentiment = dd(lambda: dd()) for sid, cid, score in c: sentiment[sid][cid] = score c.execute("""SELECT word.sid, wid, word, lemma, pos, word.comment FROM (SELECT sid FROM sent WHERE docid=? ORDER BY sid) as sent JOIN word on sent.sid=word.sid ORDER by word.sid, wid""", (did,)) words = list() for (sid, wid, word, lemma, pos, comment) in c: ##print (sid, wid, word, lemma, pos, comment) ok_clemma = '' ok_cid = -1 ok_tag = '' ok_sentiment = 0.0 for cid in clink[sid][wid]: if cid in concept[sid]: (clemma, tag, comment) = concept[sid][cid] ok_clemma = clemma ok_cid = cid ok_tag = tag if cid in sentiment[sid]: ok_sentiment = sentiment[sid][cid] / 100 words.append((sid, wid, word, lemma, pos, ok_cid, ok_clemma, ok_tag, ok_sentiment, comment)) return words data = sents() with open('sents.json', 'w') as outfile: json.dump(data, outfile, indent=2) data = tagged_words() with open('words.json', 'w') as outfile: json.dump(data, outfile, indent=2)
51.215054
1,771
0.620827
676
4,763
4.35355
0.505917
0.018349
0.03262
0.030581
0.237513
0.20931
0.143391
0.133877
0.133877
0.133877
0
0.355543
0.240605
4,763
92
1,772
51.771739
0.458114
0.066765
0
0.138889
0
0
0.170136
0.013801
0
0
0
0
0
1
0.027778
false
0
0.041667
0
0.097222
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
1274beb2be25373cf963408f49826fcb43903ab4
109
py
Python
turtle/static/game/level1/prompt1.py
ramimanna/turtle-playground
6d06a10bb06ea2cf5148022c5098cf172352f800
[ "MIT" ]
null
null
null
turtle/static/game/level1/prompt1.py
ramimanna/turtle-playground
6d06a10bb06ea2cf5148022c5098cf172352f800
[ "MIT" ]
null
null
null
turtle/static/game/level1/prompt1.py
ramimanna/turtle-playground
6d06a10bb06ea2cf5148022c5098cf172352f800
[ "MIT" ]
null
null
null
#The game has started! #Level 1 #You are Player 1 #You have a clone of turtle called p1 def play_turn(p1):
18.166667
37
0.724771
22
109
3.545455
0.863636
0.102564
0
0
0
0
0
0
0
0
0
0.045977
0.201835
109
6
38
18.166667
0.850575
0.733945
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3
1296eeb7b61297fcb047fa8ca955edd95fb5dc10
22
py
Python
src/tempra/config.py
jasondraether/tempra
832b319960ac3ec312dc240b6c7b1101d6bb5b78
[ "MIT" ]
null
null
null
src/tempra/config.py
jasondraether/tempra
832b319960ac3ec312dc240b6c7b1101d6bb5b78
[ "MIT" ]
null
null
null
src/tempra/config.py
jasondraether/tempra
832b319960ac3ec312dc240b6c7b1101d6bb5b78
[ "MIT" ]
null
null
null
supported_units = [ ]
7.333333
19
0.681818
2
22
7
1
0
0
0
0
0
0
0
0
0
0
0
0.181818
22
3
20
7.333333
0.777778
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
12b4f485b973b0f35c403bc70b9c444e4fd56889
70
py
Python
setup.py
biomedical-cybernetics/pypsis
987cea5e2895a9744a266787b7f7f5b702e128ca
[ "MIT" ]
2
2021-08-03T09:38:33.000Z
2021-11-12T21:23:00.000Z
setup.py
biomedical-cybernetics/pypsis
987cea5e2895a9744a266787b7f7f5b702e128ca
[ "MIT" ]
null
null
null
setup.py
biomedical-cybernetics/pypsis
987cea5e2895a9744a266787b7f7f5b702e128ca
[ "MIT" ]
null
null
null
from setuptools import setup setup( use_scm_version=True, )
11.666667
29
0.7
9
70
5.222222
0.888889
0
0
0
0
0
0
0
0
0
0
0
0.242857
70
5
30
14
0.886792
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.25
0
0.25
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
12f42f92075c6af3d1295415f2dd35955d9e28fb
509
py
Python
6 kyu - Multiples of 3 or 5.py
ricardopizzimenti/Codewars
bbebc3f62642e22109d6a5ac0cdd638bca02e6ca
[ "MIT" ]
null
null
null
6 kyu - Multiples of 3 or 5.py
ricardopizzimenti/Codewars
bbebc3f62642e22109d6a5ac0cdd638bca02e6ca
[ "MIT" ]
null
null
null
6 kyu - Multiples of 3 or 5.py
ricardopizzimenti/Codewars
bbebc3f62642e22109d6a5ac0cdd638bca02e6ca
[ "MIT" ]
null
null
null
""" If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23. Finish the solution so that it returns the sum of all the multiples of 3 or 5 below the number passed in. Note: If the number is a multiple of both 3 and 5, only count it once. Also, if a number is negative, return 0(for languages that do have them) """ def solution(number): return sum([i for i in range(3, number) if i % 3 == 0 or i % 5 == 0]) print(solution(10))
31.8125
105
0.699411
105
509
3.390476
0.495238
0.033708
0.067416
0.078652
0.08427
0
0
0
0
0
0
0.055696
0.223969
509
16
106
31.8125
0.84557
0.748527
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0.333333
0.666667
0.333333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
12f4f51baaa7ab46639b05a15fb7e30c04ec5672
714
py
Python
saturpy/datasets/load_datasets.py
sciencedt/saturpy
7edd20988ee550b4441053bbb6aae90dfbd80fb5
[ "Apache-2.0" ]
1
2020-05-30T16:12:43.000Z
2020-05-30T16:12:43.000Z
saturpy/datasets/load_datasets.py
sciencedt/saturpy
7edd20988ee550b4441053bbb6aae90dfbd80fb5
[ "Apache-2.0" ]
null
null
null
saturpy/datasets/load_datasets.py
sciencedt/saturpy
7edd20988ee550b4441053bbb6aae90dfbd80fb5
[ "Apache-2.0" ]
null
null
null
""" loads different data sets """ import os import pandas as pd def load_iris(): """ load iris data sets sepal_length, sepal_width, petal_length, petal_width, species 150x4 0=setosa, 1=versicolor, 2=virginica :return: """ return pd.read_csv(get_file_path('data/iris.csv')) def get_file_path(file_to_load): """ append the path file path into data path :param file_to_load: :return: """ # TODO manage proper data set loading return os.path.join(os.getcwd(), 'saturpy/datasets', file_to_load) class DataSet: """ load dataset into class """ def __init__(self): self.data = [] self.feature = [] self.targets = []
19.297297
70
0.627451
96
714
4.458333
0.520833
0.056075
0.070093
0
0
0
0
0
0
0
0
0.013183
0.256303
714
36
71
19.833333
0.792844
0.406162
0
0
0
0
0.083573
0
0
0
0
0.027778
0
1
0.272727
false
0
0.181818
0
0.727273
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
0
0
0
1
0
0
3
12f5884ef2b975c043d06c56a5071664e3878103
634
py
Python
filters/gaussian_filter.py
neodyme60/raypy
a898ba46e07ba299fcb1729dca45abdc1e944f83
[ "MIT" ]
1
2018-05-15T04:00:17.000Z
2018-05-15T04:00:17.000Z
filters/gaussian_filter.py
neodyme60/raypy
a898ba46e07ba299fcb1729dca45abdc1e944f83
[ "MIT" ]
null
null
null
filters/gaussian_filter.py
neodyme60/raypy
a898ba46e07ba299fcb1729dca45abdc1e944f83
[ "MIT" ]
null
null
null
import math from core.filter import Filter class GaussianFilter(Filter): def __init__(self, width: float, height: float, alpha: float): super().__init__(width, height) self.alpha = alpha self.exp_x = math.exp(-self.alpha * self.width * self.width) self.exp_y = math.exp(-self.alpha * self.height * self.height) def evaluate(self, x: float, y: float) -> float: return self.get_gaussian(x, self.exp_x) * self.get_gaussian(y, self.exp_y) def get_gaussian(self, d: float, exp_v: float) -> float: return max(0.0, float(math.exp(-self.alpha * d * d) - exp_v))
37.294118
83
0.634069
94
634
4.095745
0.276596
0.093506
0.085714
0.124675
0.103896
0
0
0
0
0
0
0.004098
0.230284
634
17
84
37.294118
0.784836
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.166667
0.166667
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
420392830975d2ff85e7a0d8f9ae1d1b2d0319e5
452
py
Python
ecommercejockey/premier/migrations/0008_relevancy_and_notes.py
anniethiessen/dieselr-ecommerce
9268b72553845a4650cdfe7c88b398db3cf92258
[ "MIT" ]
null
null
null
ecommercejockey/premier/migrations/0008_relevancy_and_notes.py
anniethiessen/dieselr-ecommerce
9268b72553845a4650cdfe7c88b398db3cf92258
[ "MIT" ]
11
2020-06-06T00:04:26.000Z
2022-03-12T00:57:41.000Z
ecommercejockey/premier/migrations/0008_relevancy_and_notes.py
anniethiessen/ecommerce-jockey
9268b72553845a4650cdfe7c88b398db3cf92258
[ "MIT" ]
null
null
null
# Generated by Django 2.2.5 on 2019-11-26 00:53 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('premier', '0007_premierproduct_notes'), ] operations = [ migrations.RemoveField( model_name='premiermanufacturer', name='notes', ), migrations.RemoveField( model_name='premierproduct', name='notes', ), ]
20.545455
49
0.579646
41
452
6.292683
0.658537
0.162791
0.20155
0.232558
0
0
0
0
0
0
0
0.061093
0.311947
452
21
50
21.52381
0.768489
0.099558
0
0.4
1
0
0.185185
0.061728
0
0
0
0
0
1
0
false
0
0.066667
0
0.266667
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
420aa866c4ab52c748c5e15dd211bc9255a49b8a
509
py
Python
qtpy/tests/test_qtwebsockets.py
n-elie/qtpy
24fbb72216f18621735cce76022d8eb512468d85
[ "MIT" ]
632
2015-05-31T09:55:21.000Z
2022-03-24T13:37:01.000Z
qtpy/tests/test_qtwebsockets.py
n-elie/qtpy
24fbb72216f18621735cce76022d8eb512468d85
[ "MIT" ]
259
2015-05-04T16:33:45.000Z
2022-03-24T19:48:27.000Z
qtpy/tests/test_qtwebsockets.py
n-elie/qtpy
24fbb72216f18621735cce76022d8eb512468d85
[ "MIT" ]
140
2015-05-04T16:34:13.000Z
2022-03-30T21:05:06.000Z
import pytest from qtpy import PYQT5, PYSIDE2 @pytest.mark.skipif(not (PYQT5 or PYSIDE2), reason="Only available in Qt5 bindings") def test_qtwebsockets(): """Test the qtpy.QtWebSockets namespace""" from qtpy import QtWebSockets assert QtWebSockets.QMaskGenerator is not None assert QtWebSockets.QWebSocket is not None assert QtWebSockets.QWebSocketCorsAuthenticator is not None assert QtWebSockets.QWebSocketProtocol is not None assert QtWebSockets.QWebSocketServer is not None
36.357143
84
0.787819
62
509
6.451613
0.467742
0.225
0.1125
0.15
0.27
0
0
0
0
0
0
0.011682
0.159136
509
13
85
39.153846
0.922897
0.070727
0
0
0
0
0.06424
0
0
0
0
0
0.5
1
0.1
true
0
0.3
0
0.4
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
1
0
0
0
0
0
0
3
422de53b0a59b1eecbe77b4474e1dac61e350a6c
7,342
py
Python
tests/main/test_views.py
uk-gov-mirror/alphagov.digitalmarketplace-antivirus-api
99876531d4edc4a8c3a8eeb72a57ef3db788847c
[ "MIT" ]
1
2021-10-12T00:23:37.000Z
2021-10-12T00:23:37.000Z
tests/main/test_views.py
uk-gov-mirror/alphagov.digitalmarketplace-antivirus-api
99876531d4edc4a8c3a8eeb72a57ef3db788847c
[ "MIT" ]
22
2018-09-11T09:44:57.000Z
2021-06-02T14:46:37.000Z
tests/main/test_views.py
uk-gov-mirror/alphagov.digitalmarketplace-antivirus-api
99876531d4edc4a8c3a8eeb72a57ef3db788847c
[ "MIT" ]
3
2019-08-29T13:57:58.000Z
2021-04-10T18:07:00.000Z
from functools import partial import json import mock import boto3 import botocore import pytest from ..helpers import BaseApplicationTest @pytest.mark.parametrize("method", ("PUT", "POST",)) class TestScanS3Object(BaseApplicationTest): @mock.patch("app.main.views.scan.scan_and_tag_s3_object", autospec=True) def test_missing_json_keys(self, mock_scan_and_tag_s3_object, method): client = self.get_authorized_client() res = client.open( "/scan/s3-object", method=method, data=json.dumps({ "bucketName": "defense-duriner", "objectVersionId": "abcdef54321wxyz", }), content_type="application/json", ) assert res.status_code == 400 assert res.content_type == "application/json" assert json.loads(res.get_data()) == { "error": "Expected top-level JSON keys: ['objectKey']", } assert mock_scan_and_tag_s3_object.called is False @mock.patch("app.main.views.scan.scan_and_tag_s3_object", autospec=True) def test_json_not_obj(self, mock_scan_and_tag_s3_object, method): client = self.get_authorized_client() res = client.open( "/scan/s3-object", method=method, data=json.dumps(54321), content_type="application/json", ) assert res.status_code == 400 assert res.content_type == "application/json" assert json.loads(res.get_data()) == { "error": "Expected content to be JSON object", } assert mock_scan_and_tag_s3_object.called is False @mock.patch("app.main.views.scan.scan_and_tag_s3_object", autospec=True) def test_nonexistent_bucket(self, mock_scan_and_tag_s3_object, s3_mock, method): client = self.get_authorized_client() res = client.open( "/scan/s3-object", method=method, data=json.dumps({ "bucketName": "defense-duriner", "objectKey": "sublime-mason.odt", "objectVersionId": "abcdef54321wxyz", }), content_type="application/json", ) assert res.status_code == 400 assert res.content_type == "application/json" assert json.loads(res.get_data()) == { "error": "Bucket 'defense-duriner' not found", } assert mock_scan_and_tag_s3_object.called is False @mock.patch("app.main.views.scan.scan_and_tag_s3_object", autospec=True) def test_nonexistent_object(self, mock_scan_and_tag_s3_object, empty_bucket, method): client = self.get_authorized_client() res = client.open( "/scan/s3-object", method=method, data=json.dumps({ "bucketName": empty_bucket.name, "objectKey": "sublime-mason.odt", "objectVersionId": "abcdef54321wxyz", }), content_type="application/json", ) assert res.status_code == 400 assert res.content_type == "application/json" assert json.loads(res.get_data()) == { "error": "Object with key 'sublime-mason.odt' and version 'abcdef54321wxyz' not found in bucket 'spade'", } assert mock_scan_and_tag_s3_object.called is False @mock.patch("app.main.views.scan.scan_and_tag_s3_object", autospec=True) def test_nonexistent_version(self, mock_scan_and_tag_s3_object, bucket_with_file, method): bucket, objver = bucket_with_file client = self.get_authorized_client() res = client.open( "/scan/s3-object", method=method, data=json.dumps({ "bucketName": bucket.name, "objectKey": objver.Object().key, "objectVersionId": "abcdef54321wxyz", }), content_type="application/json", ) assert res.status_code == 400 assert res.content_type == "application/json" assert json.loads(res.get_data()) == { "error": ( "Object with key 'sandman/+4321 billy-winks☾.pdf' and version 'abcdef54321wxyz' not found in " "bucket 'spade'" ), } assert mock_scan_and_tag_s3_object.called is False @mock.patch("app.main.views.scan.scan_and_tag_s3_object", autospec=True) def test_bucket_forbidden(self, mock_scan_and_tag_s3_object, bucket_with_file, method): bucket, objver = bucket_with_file unmocked_boto3_client = boto3.client with mock.patch("boto3.client", autospec=True) as mock_boto_client: # these tests are run against the aws-mocking library moto so that we can be more certain about exact # behaviours of boto given certain service responses (consider exact contents of exception objects). # currently the only capacity moto seems to have around policing permissions is rejecting anonymous access # so that's what we'll have to use to generate our 403s. wrap the target's boto3.client to inject this # option. mock_boto_client.side_effect = partial( unmocked_boto3_client, config=botocore.client.Config(signature_version=botocore.UNSIGNED), ) client = self.get_authorized_client() res = client.open( "/scan/s3-object", method=method, data=json.dumps({ "bucketName": bucket.name, "objectKey": objver.Object().key, "objectVersionId": objver.id, }), content_type="application/json", ) assert res.status_code == 400 assert res.content_type == "application/json" assert json.loads(res.get_data()) == { "error": "Access to key 'sandman/+4321 billy-winks☾.pdf' version '0' in bucket 'spade' forbidden", } assert mock_scan_and_tag_s3_object.called is False @mock.patch("app.main.views.scan.scan_and_tag_s3_object", autospec=True) def test_correct_passthrough(self, mock_scan_and_tag_s3_object, bucket_with_file, method): bucket, objver = bucket_with_file client = self.get_authorized_client() mock_scan_and_tag_s3_object.return_value = {"delectable": "swig"}, True, {"gurgling": "noise"} res = client.open( "/scan/s3-object", method=method, data=json.dumps({ "bucketName": bucket.name, "objectKey": objver.Object().key, "objectVersionId": objver.id, }), content_type="application/json", ) assert res.status_code == 200 assert res.content_type == "application/json" assert json.loads(res.get_data()) == { "existingAvStatus": {"delectable": "swig"}, "avStatusApplied": True, "newAvStatus": {"gurgling": "noise"}, } assert mock_scan_and_tag_s3_object.call_args_list == [ mock.call( s3_client=mock.ANY, s3_bucket_name=bucket.name, s3_object_key=objver.Object().key, s3_object_version=objver.id, ) ]
38.239583
118
0.601199
830
7,342
5.084337
0.198795
0.058768
0.052133
0.062559
0.718957
0.718957
0.713744
0.682227
0.682227
0.682227
0
0.020998
0.292972
7,342
191
119
38.439791
0.791562
0.056116
0
0.622642
0
0.018868
0.215049
0.042461
0
0
0
0
0.176101
1
0.044025
false
0.006289
0.044025
0
0.09434
0
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
4236fff67417156f2d217307257982e1c5e264af
1,160
py
Python
fsuipc_airspaces/fs_position.py
tjensen/fsuipc-airspaces
40100e1b88548350445818a7debd6f26d94f92ad
[ "MIT" ]
null
null
null
fsuipc_airspaces/fs_position.py
tjensen/fsuipc-airspaces
40100e1b88548350445818a7debd6f26d94f92ad
[ "MIT" ]
null
null
null
fsuipc_airspaces/fs_position.py
tjensen/fsuipc-airspaces
40100e1b88548350445818a7debd6f26d94f92ad
[ "MIT" ]
null
null
null
from typing import List, Tuple, Union, cast from fsuipc_airspaces import offsets from fsuipc_airspaces import position class FSPosition(): def data_specification(self) -> List[Tuple[int, Union[str, int]]]: return [ (offsets.TRANSPONDER, "H"), (offsets.FS_LATITUDE, "l"), (offsets.FS_LONGITUDE, "l"), (offsets.FS_ALTITUDE, "l") ] def process_data(self, data: List[Union[int, float, bytes]]) -> position.Position: return position.Position( transponder=self._transponder(cast(int, data[0])), latitude=self._latitude(cast(int, data[1])), longitude=self._longitude(cast(int, data[2])), altitude=self._altitude(cast(int, data[3])) ) def _transponder(self, raw: int) -> int: return int(f"{raw:x}") def _latitude(self, raw: int) -> float: return float(raw) * 90 / (10001750 * 65536 * 65536) def _longitude(self, raw: int) -> float: return float(raw) * 360 / (65536 * 65536 * 65536 * 65536) def _altitude(self, raw: int) -> float: return float(raw) * 3.28084 / (65536 * 65536)
33.142857
86
0.599138
139
1,160
4.892086
0.302158
0.073529
0.064706
0.066176
0.127941
0.127941
0.127941
0
0
0
0
0.073341
0.259483
1,160
34
87
34.117647
0.718277
0
0
0
0
0
0.009483
0
0
0
0
0
0
1
0.230769
false
0
0.115385
0.230769
0.615385
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
4244b9b66f009ddd053a97d70ee30af86aeb4742
223
py
Python
projects/convai/__init__.py
ricsinaruto/ParlAI
733b627ae456d6b11a2fc4624088a781bc6c1d03
[ "MIT" ]
24
2019-09-16T00:10:54.000Z
2021-09-08T19:31:51.000Z
projects/convai/__init__.py
ricsinaruto/ParlAI
733b627ae456d6b11a2fc4624088a781bc6c1d03
[ "MIT" ]
3
2021-03-11T06:04:15.000Z
2021-08-31T15:44:42.000Z
projects/convai/__init__.py
ricsinaruto/ParlAI
733b627ae456d6b11a2fc4624088a781bc6c1d03
[ "MIT" ]
7
2019-09-16T02:37:31.000Z
2021-09-01T06:06:17.000Z
#!/usr/bin/env python3 # Copyright (c) 2017-present, Moscow Institute of Physics and Technology. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree.
31.857143
73
0.766816
36
223
4.75
0.805556
0.116959
0
0
0
0
0
0
0
0
0
0.027027
0.170404
223
6
74
37.166667
0.897297
0.955157
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
424fcee9e1151328a057c64bf4def4bf5a641ff4
869
py
Python
manga_py/providers/leviatanscans_com.py
paulolimac/manga-py
3d180846750a4e770b5024eb8cd15629362875b1
[ "MIT" ]
1
2020-11-19T00:40:49.000Z
2020-11-19T00:40:49.000Z
manga_py/providers/leviatanscans_com.py
paulolimac/manga-py
3d180846750a4e770b5024eb8cd15629362875b1
[ "MIT" ]
null
null
null
manga_py/providers/leviatanscans_com.py
paulolimac/manga-py
3d180846750a4e770b5024eb8cd15629362875b1
[ "MIT" ]
null
null
null
from manga_py.provider import Provider from .helpers.std import Std class LeviatanScansCom(Provider, Std): def get_chapter_index(self) -> str: return self.re.search(r'.+/(\d+/\d+)', self.chapter).group(1).replace('/', '-') def get_main_content(self): return self.http_get(self.get_url()) def get_manga_name(self) -> str: return self.text_content(self.content, '.text-highlight') def get_chapters(self): return self._elements('.list a.item-author') def get_files(self): content = self.http_get(self.chapter) images = self.re.search(r'chapterPages\s*=\s*(\[.+?\])', content).group(1) return self.json.loads(images) def get_cover(self) -> str: image = self._elements('.media-comic-card .media-content')[0] return self.parse_background(image) main = LeviatanScansCom
28.966667
87
0.653625
117
869
4.709402
0.42735
0.065336
0.047187
0.061706
0
0
0
0
0
0
0
0.004261
0.189873
869
29
88
29.965517
0.778409
0
0
0
0
0
0.124281
0.032221
0
0
0
0
0
1
0.315789
false
0
0.105263
0.210526
0.789474
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
4264444d701adc2d74f9b53f2207f25bdfcbd983
249
py
Python
Curso em Video/Desafio_031.py
tonmarcondes/UNIVESP
a66a623d4811e8f3f9e2999f09e38a4470035ae2
[ "MIT" ]
null
null
null
Curso em Video/Desafio_031.py
tonmarcondes/UNIVESP
a66a623d4811e8f3f9e2999f09e38a4470035ae2
[ "MIT" ]
null
null
null
Curso em Video/Desafio_031.py
tonmarcondes/UNIVESP
a66a623d4811e8f3f9e2999f09e38a4470035ae2
[ "MIT" ]
null
null
null
viagem = eval(input('\nQue distância pretende viajar?\n\t')) if viagem <= 200: print('\nO O preço de sua passagem é de R${:.2f}\n'.format(viagem * 0.5)) else: print('\nO O preço de sua passagem é de R${:.2f}\n'.format(viagem * 0.45))
35.571429
79
0.618474
45
249
3.422222
0.577778
0.090909
0.103896
0.168831
0.597403
0.597403
0.597403
0.597403
0.597403
0.597403
0
0.049751
0.192771
249
6
80
41.5
0.716418
0
0
0
0
0
0.502058
0
0
0
0
0
0
1
0
false
0.4
0
0
0
0.4
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
3
427602869edf5c902e31a21af5a2ee3c9c1b7de4
137
py
Python
6.py
IuryBRIGNOLI/tarefas181021
e0fc761dc9f79f80dcc48e2a9f677eb04643b0cd
[ "MIT" ]
null
null
null
6.py
IuryBRIGNOLI/tarefas181021
e0fc761dc9f79f80dcc48e2a9f677eb04643b0cd
[ "MIT" ]
null
null
null
6.py
IuryBRIGNOLI/tarefas181021
e0fc761dc9f79f80dcc48e2a9f677eb04643b0cd
[ "MIT" ]
null
null
null
rz = int(input('Digite a razao da PA: ')) n = int(input('Digite o primeiro número da PA: ')) for i in range(10): print(n) n += rz
27.4
50
0.59854
26
137
3.153846
0.692308
0.195122
0.341463
0
0
0
0
0
0
0
0
0.019048
0.233577
137
5
51
27.4
0.761905
0
0
0
0
0
0.391304
0
0
0
0
0
0
1
0
false
0
0
0
0
0.2
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
427d1cfcd647c877e98f2d2a16bc667e18079e5d
359
py
Python
pyta/candlesticks/new_price_lines.py
gslinger/pyta
d69cde971d43ef813c0bb96ce97bc7b4a22baf42
[ "MIT" ]
1
2021-10-10T08:00:21.000Z
2021-10-10T08:00:21.000Z
pyta/candlesticks/new_price_lines.py
gslinger/pyta
d69cde971d43ef813c0bb96ce97bc7b4a22baf42
[ "MIT" ]
null
null
null
pyta/candlesticks/new_price_lines.py
gslinger/pyta
d69cde971d43ef813c0bb96ce97bc7b4a22baf42
[ "MIT" ]
null
null
null
import pandas as pd # TODO docstring def new_price_lines(h: pd.Series, n: int = 8) -> pd.Series: res: pd.Series = pd.Series(h.rolling(n + 1).apply(lambda x: x.is_monotonic_increasing)) return res """ n New Price Lines. n Consecutively higher highs. Usual n values = 8, 10, 12, 13. Theoretically a bearish reversal signal, but weak performance. """
27.615385
91
0.707521
59
359
4.237288
0.694915
0.128
0.104
0
0
0
0
0
0
0
0
0.030405
0.175487
359
13
92
27.615385
0.814189
0.038997
0
0
0
0
0
0
0
0
0
0.076923
0
1
0.25
false
0
0.25
0
0.75
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
0
0
0
1
0
0
3
c41dd4c0742ed1890f2fd8e68288d217c1171d2e
10,500
py
Python
tetrimino.py
vinc-r/Tetris
f5afe1d8420e26e2c565c20c60207245f1b12b8c
[ "MIT" ]
null
null
null
tetrimino.py
vinc-r/Tetris
f5afe1d8420e26e2c565c20c60207245f1b12b8c
[ "MIT" ]
1
2020-08-24T14:41:40.000Z
2020-09-09T10:59:15.000Z
tetrimino.py
vinc-r/Tetris
f5afe1d8420e26e2c565c20c60207245f1b12b8c
[ "MIT" ]
null
null
null
import pygame import random from cube import Cube from constants import MARGIN, CUBE_SIZE, NEXT_TETRIS_POS, TIME_BETWEEN_MOVE, TETRIMINO_ID, NEXT_TETRIS_CUBES_POS def convert_pixel_positions(positions_pixels): return [convert_pixel_position(pos) for pos in positions_pixels] def convert_pixel_position(positions_pixels): return int((positions_pixels[0] - MARGIN) / CUBE_SIZE), int((positions_pixels[1] - MARGIN) / CUBE_SIZE) # translation_dict to spin tetrimino who can hold in 3*3 square (L, J, S, T, Z) SPIN_3X3 = { "top_left": (2, 0, "top_right"), "top": (1, 1, "right"), "top_right": (0, 2, "bottom_right"), "right": (-1, 1, "bottom"), "bottom_right": (-2, 0, "bottom_left"), "bottom": (-1, -1, "left"), "bottom_left": (0, -2, "top_left"), "left": (1, -1, "top") } # translation_dict to spin tetrimino who can hold in 4*4 square => only for I # ex : line_1_col_0 will be change to line_0_col_2 after spin SPIN_4X4 = { "l0_c1": (2, 1, "l1_c3"), "l0_c2": (1, 2, "l2_c3"), "l1_c3": (-1, 2, "l3_c2"), "l2_c3": (-2, 1, "l3_c1"), "l3_c2": (-2, -1, "l2_c0"), "l3_c1": (-1, -2, "l1_c0"), "l2_c0": (1, -2, "l0_c1"), "l1_c0": (2, -1, "l0_c2"), "l1_c1": (1, 0, "l1_c2"), "l1_c2": (0, 1, "l2_c2"), "l2_c2": (-1, 0, "l2_c1"), "l2_c1": (0, -1, "l1_c1") } class Tetrimino: def __init__(self, shape=random.choice(TETRIMINO_ID)): self.shape = shape # initialize moving clocks self.clock_down = pygame.time.Clock() self.time_last_move_down = 0 # init 0 sec (not moving yet) self.clock_left = pygame.time.Clock() self.time_last_move_left = TIME_BETWEEN_MOVE # init able to move self.clock_right = pygame.time.Clock() self.time_last_move_right = TIME_BETWEEN_MOVE # init able to move # initialize empty cubes self.cubes = [] self.spin_state = 0 def move_down(self, grid): positions_pixels = [(cube.rect.x, cube.rect.y) for cube in self.cubes] positions = convert_pixel_positions(positions_pixels) for pos in positions: if grid[pos[1]+1][pos[0]] != "e": return positions for cube in self.cubes: cube.rect.y += CUBE_SIZE self.time_last_move_down = 0 self.clock_down.tick() return True def move_left(self, grid): positions_pixels = [(cube.rect.x, cube.rect.y) for cube in self.cubes] positions = convert_pixel_positions(positions_pixels) for pos in positions: if grid[pos[1]][pos[0]-1] != "e": return False for cube in self.cubes: cube.rect.x -= CUBE_SIZE self.time_last_move_left = 0 self.clock_left.tick() return True def move_right(self, grid): positions_pixels = [(cube.rect.x, cube.rect.y) for cube in self.cubes] positions = convert_pixel_positions(positions_pixels) for pos in positions: if grid[pos[1]][pos[0]+1] != "e": return False for cube in self.cubes: cube.rect.x += CUBE_SIZE self.time_last_move_right = 0 self.clock_right.tick() return True def drop_tetrimino(self, grid): positions = self.move_down(grid) while type(positions) == bool and positions: positions = self.move_down(grid) return positions def spin_tetrimino_3X3(self, grid): movement_allowed = True for cube in self.cubes: if cube.pos != "center": future_pos = convert_pixel_position(( cube.rect.x + CUBE_SIZE * SPIN_3X3[cube.pos][0], cube.rect.y + CUBE_SIZE * SPIN_3X3[cube.pos][1] )) if grid[future_pos[1]][future_pos[0]] != "e": movement_allowed = False break if movement_allowed: for cube in self.cubes: if cube.pos != "center": cube.rect.x += CUBE_SIZE * SPIN_3X3[cube.pos][0] cube.rect.y += CUBE_SIZE * SPIN_3X3[cube.pos][1] cube.pos = SPIN_3X3[cube.pos][2] print("SPIN") def is_over_other_cube(self, grid): for cube in self.cubes: pos = convert_pixel_position((cube.rect.x, cube.rect.y)) if grid[pos[1]][pos[0]] != "e": return True return False class NextTetrimino: def __init__(self, shape=random.choice(TETRIMINO_ID)): assert shape in TETRIMINO_ID self.shape = shape self.cubes = [ Cube(type=shape, x=NEXT_TETRIS_POS[0] + CUBE_SIZE * NEXT_TETRIS_CUBES_POS[shape][0][0], y=NEXT_TETRIS_POS[1] + CUBE_SIZE * NEXT_TETRIS_CUBES_POS[shape][0][1]), Cube(type=shape, x=NEXT_TETRIS_POS[0] + CUBE_SIZE * NEXT_TETRIS_CUBES_POS[shape][1][0], y=NEXT_TETRIS_POS[1] + CUBE_SIZE * NEXT_TETRIS_CUBES_POS[shape][1][1]), Cube(type=shape, x=NEXT_TETRIS_POS[0] + CUBE_SIZE * NEXT_TETRIS_CUBES_POS[shape][2][0], y=NEXT_TETRIS_POS[1] + CUBE_SIZE * NEXT_TETRIS_CUBES_POS[shape][2][1]), Cube(type=shape, x=NEXT_TETRIS_POS[0] + CUBE_SIZE * NEXT_TETRIS_CUBES_POS[shape][3][0], y=NEXT_TETRIS_POS[1] + CUBE_SIZE * NEXT_TETRIS_CUBES_POS[shape][3][1]) ] class O(Tetrimino): def __init__(self): super().__init__(shape="O") self.cubes = [ Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 5, y=MARGIN + CUBE_SIZE * 1), Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 6, y=MARGIN + CUBE_SIZE * 1), Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 5, y=MARGIN + CUBE_SIZE * 2), Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 6, y=MARGIN + CUBE_SIZE * 2) ] def spin_tetrimino(self, grid): # no spin for Tetrimino O (always the same position) pass class I(Tetrimino): def __init__(self): super().__init__(shape="I") self.cubes = [ Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 4, y=MARGIN + CUBE_SIZE * 2, pos="l1_c0"), Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 5, y=MARGIN + CUBE_SIZE * 2, pos="l1_c1"), Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 6, y=MARGIN + CUBE_SIZE * 2, pos="l1_c2"), Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 7, y=MARGIN + CUBE_SIZE * 2, pos="l1_c3") ] def spin_tetrimino(self, grid): movement_allowed = True for cube in self.cubes: future_pos = convert_pixel_position(( cube.rect.x + CUBE_SIZE * SPIN_4X4[cube.pos][0], cube.rect.y + CUBE_SIZE * SPIN_4X4[cube.pos][1] )) if grid[future_pos[1]][future_pos[0]] != "e": movement_allowed = False break if movement_allowed: for cube in self.cubes: cube.rect.x += CUBE_SIZE * SPIN_4X4[cube.pos][0] cube.rect.y += CUBE_SIZE * SPIN_4X4[cube.pos][1] cube.pos = SPIN_4X4[cube.pos][2] self.spin_state = (self.spin_state + 1) % 4 print("SPIN") class J(Tetrimino): def __init__(self): super().__init__(shape="J") self.cubes = [ Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 4, y=MARGIN + CUBE_SIZE * 1, pos="top_left"), Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 4, y=MARGIN + CUBE_SIZE * 2, pos="left"), Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 5, y=MARGIN + CUBE_SIZE * 2, pos="center"), Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 6, y=MARGIN + CUBE_SIZE * 2, pos="right") ] def spin_tetrimino(self, grid): self.spin_tetrimino_3X3(grid) class S(Tetrimino): def __init__(self): super().__init__(shape="S") self.cubes = [ Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 4, y=MARGIN + CUBE_SIZE * 2, pos="left"), Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 5, y=MARGIN + CUBE_SIZE * 2, pos="center"), Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 5, y=MARGIN + CUBE_SIZE * 1, pos="top"), Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 6, y=MARGIN + CUBE_SIZE * 1, pos="top_right") ] def spin_tetrimino(self, grid): self.spin_tetrimino_3X3(grid) class T(Tetrimino): def __init__(self): super().__init__(shape="T") self.cubes = [ Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 4, y=MARGIN + CUBE_SIZE * 2, pos="left"), Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 5, y=MARGIN + CUBE_SIZE * 2, pos="center"), Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 5, y=MARGIN + CUBE_SIZE * 1, pos="top"), Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 6, y=MARGIN + CUBE_SIZE * 2, pos="right") ] def spin_tetrimino(self, grid): self.spin_tetrimino_3X3(grid) class Z(Tetrimino): def __init__(self): super().__init__(shape="Z") self.cubes = [ Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 4, y=MARGIN + CUBE_SIZE * 1, pos="top_left"), Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 5, y=MARGIN + CUBE_SIZE * 1, pos="top"), Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 5, y=MARGIN + CUBE_SIZE * 2, pos="center"), Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 6, y=MARGIN + CUBE_SIZE * 2, pos="right") ] def spin_tetrimino(self, grid): self.spin_tetrimino_3X3(grid) class L(Tetrimino): def __init__(self): super().__init__(shape="L") self.cubes = [ Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 4, y=MARGIN + CUBE_SIZE * 2, pos="left"), Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 5, y=MARGIN + CUBE_SIZE * 2, pos="center"), Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 6, y=MARGIN + CUBE_SIZE * 2, pos="right"), Cube(type=self.shape, x=MARGIN + CUBE_SIZE * 6, y=MARGIN + CUBE_SIZE * 1, pos="top_right") ] def spin_tetrimino(self, grid): self.spin_tetrimino_3X3(grid)
39.033457
113
0.564762
1,475
10,500
3.781017
0.081356
0.111888
0.148108
0.085351
0.789313
0.742155
0.728707
0.649453
0.62184
0.601399
0
0.034699
0.300095
10,500
269
114
39.033457
0.72418
0.03581
0
0.464455
0
0
0.040418
0
0
0
0
0
0.004739
1
0.113744
false
0.004739
0.018957
0.009479
0.227488
0.009479
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
c422fdf80d7c357a3fd3ccc97e09ec30dc957c3f
1,094
py
Python
app/util.py
KingsleyBell/anna_website
280ab439ca2351ba204bb6117748d537af03e8b1
[ "MIT" ]
null
null
null
app/util.py
KingsleyBell/anna_website
280ab439ca2351ba204bb6117748d537af03e8b1
[ "MIT" ]
null
null
null
app/util.py
KingsleyBell/anna_website
280ab439ca2351ba204bb6117748d537af03e8b1
[ "MIT" ]
null
null
null
from datetime import datetime import json import os from flask import Flask application = Flask(__name__) @application.context_processor def inject_now(): return {'now': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')} @application.context_processor def inject_year(): return {'year': datetime.utcnow().year} db_path = os.path.join(application.static_folder, 'db/db.json') about_path = os.path.join(application.static_folder, 'db/about.json') contact_path = os.path.join(application.static_folder, 'db/contact.json') def get_section_by_id(db, section_id): return [s for s in db if s['id'] == section_id][0] def update_db_file(db_file_path, new_db): with open(db_file_path, 'w') as db_write: db_write.write(json.dumps(new_db)) def get_image_from_db(db, section_id, image_id): section = get_section_by_id(db, section_id) return [i for i in section["images"] if i["id"] == image_id][0] def delete_image_file(filename): upload_folder = os.path.join(application.static_folder, 'images/uploads') os.remove(os.path.join(upload_folder, filename))
26.682927
77
0.731261
175
1,094
4.32
0.308571
0.039683
0.066138
0.111111
0.375661
0.280423
0.236772
0.236772
0
0
0
0.002101
0.129799
1,094
41
78
26.682927
0.792017
0
0
0.08
0
0
0.079452
0
0
0
0
0
0
1
0.24
false
0
0.16
0.12
0.56
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
c44447289911936b07650070dbe523a0c2ff9b67
72
py
Python
venv/Lib/site-packages/qtconsole/_version.py
BoxicaLion/BasicMathFormulas
4d9782f2c0c75ecccf4c0ea995f324f93e4fb6e2
[ "MIT" ]
15
2020-06-29T08:33:39.000Z
2022-02-12T00:28:51.000Z
venv/Lib/site-packages/qtconsole/_version.py
BoxicaLion/BasicMathFormulas
4d9782f2c0c75ecccf4c0ea995f324f93e4fb6e2
[ "MIT" ]
10
2019-12-28T21:31:19.000Z
2020-04-12T20:01:58.000Z
venv/Lib/site-packages/qtconsole/_version.py
BoxicaLion/BasicMathFormulas
4d9782f2c0c75ecccf4c0ea995f324f93e4fb6e2
[ "MIT" ]
11
2020-06-29T08:40:24.000Z
2022-02-24T17:39:16.000Z
version_info = (4, 7, 3) __version__ = '.'.join(map(str, version_info))
24
46
0.666667
11
72
3.818182
0.727273
0.52381
0
0
0
0
0
0
0
0
0
0.047619
0.125
72
2
47
36
0.619048
0
0
0
0
0
0.013889
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
c450b542724c1b5dd04f4ee473712ef9b05fcac4
228
py
Python
otp/movement/CMover.py
LittleNed/toontown-stride
1252a8f9a8816c1810106006d09c8bdfe6ad1e57
[ "Apache-2.0" ]
3
2020-01-02T08:43:36.000Z
2020-07-05T08:59:02.000Z
otp/movement/CMover.py
NoraTT/Historical-Commits-Project-Altis-Source
fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179
[ "Apache-2.0" ]
null
null
null
otp/movement/CMover.py
NoraTT/Historical-Commits-Project-Altis-Source
fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179
[ "Apache-2.0" ]
4
2019-06-20T23:45:23.000Z
2020-10-14T20:30:15.000Z
from pandac.PandaModules import * class CMover: def __init__(self, objNodePath, fwdSpeed=1, rotSpeed=1): self.objNodePath = objNodePath self.fwdSpeed = fwdSpeed self.rotSpeed = rotSpeed
25.333333
60
0.653509
23
228
6.304348
0.565217
0.206897
0
0
0
0
0
0
0
0
0
0.012048
0.27193
228
9
61
25.333333
0.861446
0
0
0
0
0
0
0
0
0
0
0
0
1
0.166667
false
0
0.166667
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
c4542186007e38403036d11fefef210290a04de7
182
py
Python
lightautoml/text/__init__.py
alexeytomashyov/LightAutoML
41e8e10d430344dbdb5e39dd48342653ee31988c
[ "Apache-2.0" ]
1
2021-09-03T21:37:58.000Z
2021-09-03T21:37:58.000Z
lightautoml/text/__init__.py
alexeytomashyov/LightAutoML
41e8e10d430344dbdb5e39dd48342653ee31988c
[ "Apache-2.0" ]
null
null
null
lightautoml/text/__init__.py
alexeytomashyov/LightAutoML
41e8e10d430344dbdb5e39dd48342653ee31988c
[ "Apache-2.0" ]
1
2021-12-08T13:52:45.000Z
2021-12-08T13:52:45.000Z
"""Provides an internal interface for working with text features.""" __all__ = ['tokenizer', 'dl_transformers', 'sentence_pooling', 'weighted_average_transformer', 'embed_dataset']
45.5
111
0.774725
20
182
6.6
1
0
0
0
0
0
0
0
0
0
0
0
0.093407
182
3
112
60.666667
0.8
0.340659
0
0
0
0
0.710526
0.245614
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
c458429badd223bc17b8658102393d5f43df019f
1,333
py
Python
json_config.py
Gorg212/PyFlappy
869724411de8245804079f0d6353a0ea3f6a4a74
[ "MIT" ]
19
2021-03-30T20:42:30.000Z
2022-01-12T18:41:50.000Z
json_config.py
OmerFI/PyFlappy
e34101e130a784848da97c8ae4d8f15137d4a969
[ "MIT" ]
null
null
null
json_config.py
OmerFI/PyFlappy
e34101e130a784848da97c8ae4d8f15137d4a969
[ "MIT" ]
1
2021-04-16T09:43:32.000Z
2021-04-16T09:43:32.000Z
import json language_dict = {'en': {'TEXTS': {'WINNER_TEXT': 'YOU WON!', 'LINE_TEXTS': [None, 'Coding:', 'Ömer Furkan İşleyen', 'Design:', 'Ömer Furkan İşleyen', 'Icons made by Freepik from www.flaticon.com', 'Background vector created by freepik - www.freepik.com', 'Music:', '"beeps-18 1.wav" by Greencouch ( https://freesound.org/people/Greencouch/ )', 'licensed under CCBY 3.0']}, 'IMAGES': {'PLAY_BUTTON': 'play_button_en.png', 'CONTRIBUTORS_BUTTON': 'contributors_button_en.png', 'INFO_SCREEN': 'info_screen_en.jpg'}}, 'tr': {'TEXTS': {'WINNER_TEXT': 'KAZANDIN!', 'LINE_TEXTS': [None, 'Kodlama:', 'Ömer Furkan İşleyen', 'Tasarım:', 'Ömer Furkan İşleyen', 'İkonlar www.flaticon.com adresinden Freepik tarafından yapılmıştır', 'Arka plan vektörü freepik tarafından oluşturulmuştur - www.freepik.com', 'Müzik:', 'Greencouch tarafından "beeps-18 1.wav" ( https://freesound.org/people/Greencouch/ )', 'CCBY 3.0 altında lisanslıdır']}, 'IMAGES': {'PLAY_BUTTON': 'play_button_tr.png', 'CONTRIBUTORS_BUTTON': 'contributors_button_tr.png', 'INFO_SCREEN': 'info_screen_tr.jpg'}}} config_dict = {'LEVEL': 1, 'FIRST_OPENING': True, 'LANGUAGE': 'en'} with open("language.json", "w") as f: json.dump(language_dict, f, indent=4) with open("config.json", "w") as f: json.dump(config_dict, f, indent=4)
102.538462
568
0.694674
189
1,333
4.783069
0.428571
0.011062
0.048673
0.053097
0.382743
0.035398
0
0
0
0
0
0.011111
0.122281
1,333
12
569
111.083333
0.757265
0
0
0
0
0.25
0.685087
0.039364
0
0
0
0
0
1
0
false
0
0.125
0
0.125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
c45859cfffc6c3681a9b0416586a3f2b4bd04acb
149
py
Python
utils/__init__.py
davidhwyllie/findNeighbour4
d42e10711e59e93ebf0e798fbb1598929f662c9c
[ "MIT" ]
null
null
null
utils/__init__.py
davidhwyllie/findNeighbour4
d42e10711e59e93ebf0e798fbb1598929f662c9c
[ "MIT" ]
14
2021-11-26T14:43:25.000Z
2022-03-22T00:39:17.000Z
utils/__init__.py
davidhwyllie/findNeighbour4
d42e10711e59e93ebf0e798fbb1598929f662c9c
[ "MIT" ]
null
null
null
from pathlib import Path BASE_PATH = Path(__file__).resolve().parent.parent DEFAULT_CONFIG_FILE = BASE_PATH / "config" / "default_test_config.json"
29.8
71
0.791946
21
149
5.142857
0.571429
0.148148
0
0
0
0
0
0
0
0
0
0
0.100671
149
4
72
37.25
0.80597
0
0
0
0
0
0.201342
0.161074
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
c458e039c403c716fcc473700f32a7112a32e95b
63
py
Python
learn_python/test.py
Milon34/Python_Learning
b24efbba4751ed51758bf6b907e71f384415a9d5
[ "MIT" ]
null
null
null
learn_python/test.py
Milon34/Python_Learning
b24efbba4751ed51758bf6b907e71f384415a9d5
[ "MIT" ]
null
null
null
learn_python/test.py
Milon34/Python_Learning
b24efbba4751ed51758bf6b907e71f384415a9d5
[ "MIT" ]
null
null
null
s=input() d={} if s in d: print(s+str(d[s])) d[s] += 1
10.5
22
0.428571
15
63
1.8
0.533333
0.148148
0
0
0
0
0
0
0
0
0
0.022222
0.285714
63
6
23
10.5
0.577778
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.2
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
c47cb95b9c4eb047a59c2577ead1ada49e550413
200
py
Python
contests_atcoder/abc177/e_testcase.py
takelifetime/competitive-programming
e7cf8ef923ccefad39a1727ca94c610d650fcb76
[ "BSD-2-Clause" ]
null
null
null
contests_atcoder/abc177/e_testcase.py
takelifetime/competitive-programming
e7cf8ef923ccefad39a1727ca94c610d650fcb76
[ "BSD-2-Clause" ]
1
2021-01-02T06:36:51.000Z
2021-01-02T06:36:51.000Z
contests_atcoder/abc177/e_testcase.py
takelifetime/competitive-programming
e7cf8ef923ccefad39a1727ca94c610d650fcb76
[ "BSD-2-Clause" ]
null
null
null
import random with open("test.txt", "w") as f: f.write("") n = 100000 print(n, file=open("test.txt", "a")) li = [random.randint(1, n) for _ in range(n)] print(*li, file=open("test.txt", "a"))
16.666667
45
0.59
36
200
3.25
0.583333
0.205128
0.282051
0.25641
0.273504
0
0
0
0
0
0
0.041916
0.165
200
12
46
16.666667
0.658683
0
0
0
0
0
0.134328
0
0
0
0
0
0
1
0
false
0
0.142857
0
0.142857
0.285714
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
c47ffc4ca90d2a5de11409f4ac1ccaa4da64439e
303
py
Python
env/lib/python3.8/site-packages/plotly/io/base_renderers.py
acrucetta/Chicago_COVI_WebApp
a37c9f492a20dcd625f8647067394617988de913
[ "MIT", "Unlicense" ]
11,750
2015-10-12T07:03:39.000Z
2022-03-31T20:43:15.000Z
env/lib/python3.8/site-packages/plotly/io/base_renderers.py
acrucetta/Chicago_COVI_WebApp
a37c9f492a20dcd625f8647067394617988de913
[ "MIT", "Unlicense" ]
2,951
2015-10-12T00:41:25.000Z
2022-03-31T22:19:26.000Z
env/lib/python3.8/site-packages/plotly/io/base_renderers.py
acrucetta/Chicago_COVI_WebApp
a37c9f492a20dcd625f8647067394617988de913
[ "MIT", "Unlicense" ]
2,623
2015-10-15T14:40:27.000Z
2022-03-28T16:05:50.000Z
from ._base_renderers import ( MimetypeRenderer, PlotlyRenderer, JsonRenderer, ImageRenderer, PngRenderer, SvgRenderer, PdfRenderer, JpegRenderer, HtmlRenderer, ColabRenderer, KaggleRenderer, NotebookRenderer, ExternalRenderer, BrowserRenderer, )
17.823529
30
0.69637
18
303
11.611111
1
0
0
0
0
0
0
0
0
0
0
0
0.247525
303
16
31
18.9375
0.916667
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.0625
0
0.0625
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
c4873fd9674166e386ffe1608fbd5884aa234c8e
40
py
Python
Language Skills/Python/Unit 05 Lists & Dictionaries/02 A Day at the Supermarket/Shopping Trip/10-Shopping at the market.py
rhyep/Python_tutorials
f5c8a64b91802b005dfe7dd9035f8d8daae8c3e3
[ "MIT" ]
346
2016-02-22T20:21:10.000Z
2022-01-27T20:55:53.000Z
Language Skills/Python/Unit 5/2-A Day at the Supermarket/Shopping Trip/10-Shopping at the market.py
vpstudios/Codecademy-Exercise-Answers
ebd0ee8197a8001465636f52c69592ea6745aa0c
[ "MIT" ]
55
2016-04-07T13:58:44.000Z
2020-06-25T12:20:24.000Z
Language Skills/Python/Unit 5/2-A Day at the Supermarket/Shopping Trip/10-Shopping at the market.py
vpstudios/Codecademy-Exercise-Answers
ebd0ee8197a8001465636f52c69592ea6745aa0c
[ "MIT" ]
477
2016-02-21T06:17:02.000Z
2021-12-22T10:08:01.000Z
groceries = ["banana","orange","apple"]
20
39
0.65
4
40
6.5
1
0
0
0
0
0
0
0
0
0
0
0
0.075
40
1
40
40
0.702703
0
0
0
0
0
0.425
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
670a5f9d771f1732c047d97f9cf5219b6b5f5643
310
py
Python
array_compare.py
NathanKr/numpy-playground
5f36f7471fd7d182a8cbb6097f5d7332991bc7de
[ "MIT" ]
null
null
null
array_compare.py
NathanKr/numpy-playground
5f36f7471fd7d182a8cbb6097f5d7332991bc7de
[ "MIT" ]
null
null
null
array_compare.py
NathanKr/numpy-playground
5f36f7471fd7d182a8cbb6097f5d7332991bc7de
[ "MIT" ]
null
null
null
import numpy as np ar1 = np.array([1,2,3]) ar2 = np.array([4,5,6]) ar3 = np.array([1,2,3]) # compare array , == compare the data element wise print(ar1 == ar1) print(ar1 == ar3) print(ar1 == ar2) # compare array print(np.array_equal(ar1, ar1)) print(np.array_equal(ar1, ar3)) print(np.array_equal(ar1, ar2))
20.666667
50
0.670968
58
310
3.534483
0.362069
0.204878
0.17561
0.24878
0.390244
0
0
0
0
0
0
0.089888
0.13871
310
15
51
20.666667
0.677903
0.2
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.1
0
0.1
0.6
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
3
67269b726ec85ac0110308ac4c71fb589e74a716
226
py
Python
maas/app.py
ThalesSathler/maas-power
094d5c68fe0c962185a0cbac1672d4b70a1dd42a
[ "MIT" ]
null
null
null
maas/app.py
ThalesSathler/maas-power
094d5c68fe0c962185a0cbac1672d4b70a1dd42a
[ "MIT" ]
4
2019-06-13T19:44:12.000Z
2021-06-01T23:55:14.000Z
maas/app.py
ThalesSathler/maas-power
094d5c68fe0c962185a0cbac1672d4b70a1dd42a
[ "MIT" ]
1
2019-06-18T19:50:55.000Z
2019-06-18T19:50:55.000Z
from aiohttp import web from asyncworker import App, RouteTypes app = App("", "", "", 1) @app.route(["/"], type=RouteTypes.HTTP, methods=["POST"]) async def operation(request: web.Request): return web.json_response({})
22.6
57
0.681416
29
226
5.275862
0.689655
0
0
0
0
0
0
0
0
0
0
0.005128
0.137168
226
9
58
25.111111
0.779487
0
0
0
0
0
0.022124
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
672c30f63ccc8cf75fdbbfda0dbb890a64dc6cc9
112
py
Python
examples/python/example_1.py
dom-lis/tulip
b9b5645b4233befc79729eeb3ba6f9b3ecb14553
[ "MIT" ]
2
2021-07-27T19:33:39.000Z
2021-12-23T19:45:30.000Z
examples/python/example_1.py
dom-lis/tulip
b9b5645b4233befc79729eeb3ba6f9b3ecb14553
[ "MIT" ]
null
null
null
examples/python/example_1.py
dom-lis/tulip
b9b5645b4233befc79729eeb3ba6f9b3ecb14553
[ "MIT" ]
null
null
null
from _cmn import flush_line while True: i = input() if i == 'key:Esc': break flush_line(i)
14
27
0.571429
17
112
3.588235
0.764706
0.295082
0
0
0
0
0
0
0
0
0
0
0.321429
112
7
28
16
0.802632
0
0
0
0
0
0.0625
0
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
6744ce1abaa78bbe19a5d2085f00b1afe7d0a214
487
py
Python
models/layers/common/sentence.py
20000607-lxc/BERT-NER-Pytorch-master
47f2e1291ab53674986eb91abdb72693eafe9b61
[ "MIT" ]
null
null
null
models/layers/common/sentence.py
20000607-lxc/BERT-NER-Pytorch-master
47f2e1291ab53674986eb91abdb72693eafe9b61
[ "MIT" ]
null
null
null
models/layers/common/sentence.py
20000607-lxc/BERT-NER-Pytorch-master
47f2e1291ab53674986eb91abdb72693eafe9b61
[ "MIT" ]
null
null
null
# # @author: Allan # from typing import List class Sentence: def __init__(self, words: List[str], heads: List[int]=None , dep_labels: List[str]=None, pos_tags:List[str] = None): self.words = words self.heads = heads self.dep_labels = dep_labels self.pos_tags = pos_tags def __len__(self): return len(self.words) # if __name__ == "__main__": # # words = ["a" ,"sdfsdf"] # sent = Sentence(words) # # print(len(sent))
16.233333
120
0.595483
63
487
4.253968
0.460317
0.100746
0.08209
0
0
0
0
0
0
0
0
0
0.264887
487
30
121
16.233333
0.748603
0.240246
0
0
0
0
0
0
0
0
0
0
0
1
0.222222
false
0
0.111111
0.111111
0.555556
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
67593d4a846b76ae2244cc058e2ede2fec48a12d
372
py
Python
mmdet/ops/psroi_align_rotated/setup.py
TJUsym/TJU_Advanced_CV_Homework
2d85943390e9ba53b80988e0ab8d50aef0cd17da
[ "Apache-2.0" ]
643
2020-01-28T11:29:49.000Z
2022-03-31T08:28:32.000Z
mmdet/ops/psroi_align_rotated/setup.py
TJUsym/TJU_Advanced_CV_Homework
2d85943390e9ba53b80988e0ab8d50aef0cd17da
[ "Apache-2.0" ]
148
2021-03-18T09:44:02.000Z
2022-03-31T06:01:39.000Z
mmdet/ops/psroi_align_rotated/setup.py
TJUsym/TJU_Advanced_CV_Homework
2d85943390e9ba53b80988e0ab8d50aef0cd17da
[ "Apache-2.0" ]
197
2020-01-29T09:58:27.000Z
2022-03-25T12:08:56.000Z
from setuptools import setup from torch.utils.cpp_extension import BuildExtension, CUDAExtension setup( name='psroi_align_rotated_cuda', ext_modules=[ CUDAExtension('psroi_align_rotated_cuda', [ 'src/psroi_align_rotated_cuda.cpp', 'src/psroi_align_rotated_kernel.cu', ]), ], cmdclass={'build_ext': BuildExtension})
28.615385
67
0.693548
41
372
5.926829
0.536585
0.164609
0.279835
0.259259
0
0
0
0
0
0
0
0
0.206989
372
12
68
31
0.823729
0
0
0
0
0
0.327957
0.303763
0
0
0
0
0
1
0
true
0
0.181818
0
0.181818
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
675ff1661e5caf39a727dd71ad91cb58081a049b
912
py
Python
src/geocat/comp/errors.py
erogluorhan/geocat-comp
eb42aec750e42303e6e67d664d54ace8b0cae126
[ "Apache-2.0" ]
76
2019-09-20T20:13:45.000Z
2022-03-30T22:50:13.000Z
src/geocat/comp/errors.py
erogluorhan/geocat-comp
eb42aec750e42303e6e67d664d54ace8b0cae126
[ "Apache-2.0" ]
154
2019-07-24T20:02:27.000Z
2022-03-29T20:32:20.000Z
src/geocat/comp/errors.py
erogluorhan/geocat-comp
eb42aec750e42303e6e67d664d54ace8b0cae126
[ "Apache-2.0" ]
34
2019-07-18T20:02:38.000Z
2022-03-31T13:40:22.000Z
class Error(Exception): """Base class for exceptions in this module.""" pass class AttributeError(Error): """Exception raised when the arguments of GeoCAT-comp functions argument has a mismatch of attributes with other arguments.""" pass class ChunkError(Error): """Exception raised when a Dask array is chunked in a way that is incompatible with an f2py function.""" pass class CoordinateError(Error): """Exception raised when a GeoCAT-comp function is passed a NumPy array as an argument without a required coordinate array being passed separately.""" pass class DimensionError(Error): """Exception raised when the arguments of GeoCAT-comp functions argument has a mismatch of the necessary dimensionality.""" pass class MetaError(Error): """Exception raised when the support for the retention of metadata is not supported.""" pass
26.823529
79
0.723684
120
912
5.5
0.458333
0.127273
0.151515
0.181818
0.356061
0.239394
0.239394
0.239394
0.239394
0.239394
0
0.001383
0.207237
912
33
80
27.636364
0.91148
0.664474
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
3
6760f252df0f29983ee33f9395f868e86520ec50
176
py
Python
1044 - Multiplos.py
le16bits/URI---Python
9d22ae74f008104bc9c3c0e2d5f8cd59303bc1db
[ "Apache-2.0" ]
null
null
null
1044 - Multiplos.py
le16bits/URI---Python
9d22ae74f008104bc9c3c0e2d5f8cd59303bc1db
[ "Apache-2.0" ]
null
null
null
1044 - Multiplos.py
le16bits/URI---Python
9d22ae74f008104bc9c3c0e2d5f8cd59303bc1db
[ "Apache-2.0" ]
null
null
null
vl=input().split() A=int(vl[0]) B=int(vl[1]) if A==B: print("Nao sao Multiplos") elif A%B==0 or B%A==0: print("Sao Multiplos") else: print("Nao sao Multiplos")
16
31
0.585227
34
176
3.029412
0.470588
0.349515
0.213592
0.38835
0
0
0
0
0
0
0
0.028169
0.193182
176
10
32
17.6
0.697183
0
0
0.222222
0
0
0.267045
0
0
0
0
0
0
1
0
false
0
0
0
0
0.333333
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
676175ab9dc8bd9abb907c2abe2280ac8fb6f4c9
24,098
py
Python
aleph/tests/test_entities_api.py
tolgatasci/aleph
fe5f16f40bc6e7b5882e6efec98d043882534675
[ "MIT" ]
1,213
2017-03-15T08:10:52.000Z
2022-03-29T13:57:44.000Z
aleph/tests/test_entities_api.py
tolgatasci/aleph
fe5f16f40bc6e7b5882e6efec98d043882534675
[ "MIT" ]
1,374
2017-03-14T18:23:10.000Z
2022-03-31T18:42:20.000Z
aleph/tests/test_entities_api.py
malteos/qaleph
b1ab64e9595ac20358bb122cfa919502d10d1dd4
[ "MIT" ]
217
2017-03-17T12:04:22.000Z
2022-03-20T11:17:16.000Z
import json import datetime import logging from pprint import pformat from followthemoney.types import registry from aleph.core import db, settings from aleph.index.entities import index_entity from aleph.views.util import validate from aleph.tests.util import TestCase, get_caption, JSON log = logging.getLogger(__name__) class EntitiesApiTestCase(TestCase): def setUp(self): super(EntitiesApiTestCase, self).setUp() self.rolex = self.create_user(foreign_id="user_3") self.col = self.create_collection() book = { "schema": "PlainText", "properties": { "name": "The Book", "fileName": "book.txt", }, } self.book = self.create_entity(book, self.col) self.book_id = self.col.ns.sign(self.book.id) self.data = { "schema": "LegalEntity", "properties": { "name": "Winnie the Pooh", "country": "pa", "proof": self.book_id, "incorporationDate": datetime.datetime( 1926, 12, 24 ).isoformat(), # noqa }, } self.ent = self.create_entity(self.data, self.col) self.id = self.col.ns.sign(self.ent.id) self.data2 = { "schema": "LegalEntity", "properties": { "name": "Tom & Jerry", "country": "pa", "proof": self.book_id, "incorporationDate": datetime.datetime(1940, 2, 10).isoformat(), # noqa }, } self.ent2 = self.create_entity(self.data2, self.col) self.id2 = self.col.ns.sign(self.ent2.id) db.session.commit() self.col_id = str(self.col.id) index_entity(self.book) index_entity(self.ent) index_entity(self.ent2) def test_index(self): url = "/api/2/entities?filter:schemata=Thing" res = self.client.get(url + "&facet=collection_id") assert res.status_code == 200, res assert res.json["total"] == 0, res.json assert len(res.json["facets"]["collection_id"]["values"]) == 0, res.json settings.REQUIRE_LOGGED_IN = True res = self.client.get(url) assert res.status_code == 403, res settings.REQUIRE_LOGGED_IN = False _, headers = self.login(is_admin=True) res = self.client.get(url + "&facet=collection_id", headers=headers) assert res.status_code == 200, res assert res.json["total"] == 3, res.json assert len(res.json["facets"]["collection_id"]["values"]) == 1, res.json col0 = res.json["facets"]["collection_id"]["values"][0] assert col0["id"] == self.col_id, res.json assert col0["label"] == self.col.label, res.json assert len(res.json["facets"]) == 1, res.json res = self.client.get(url + "&facet=countries", headers=headers) assert len(res.json["facets"]) == 1, res.json assert "values" in res.json["facets"]["countries"], res.json validate(res.json["results"][0], "Entity") def test_export(self): self.load_fixtures() url = "/api/2/search/export?filter:schemata=Thing&q=pakistan" res = self.client.post(url) assert res.status_code == 403, res _, headers = self.login(is_admin=True) res = self.client.post(url, headers=headers) assert res.status_code == 202, res def test_view(self): url = "/api/2/entities/%s" % self.id res = self.client.get(url) assert res.status_code == 403, res _, headers = self.login(is_admin=True) res = self.client.get(url, headers=headers) assert res.status_code == 200, res assert "LegalEntity" in res.json["schema"], res.json assert "Winnie" in get_caption(res.json), res.json validate(res.json, "Entity") def test_update(self): _, headers = self.login(is_admin=True) url = "/api/2/entities/%s" % self.id res = self.client.get(url, headers=headers) assert res.status_code == 200, res data = res.json data["properties"]["name"] = ["Winne the little Shit"] res = self.client.post( url, data=json.dumps(data), headers=headers, content_type=JSON ) assert res.status_code == 200, res.json validate(res.json, "Entity") assert "little" in get_caption(res.json), res.json data["properties"].pop("name", None) res = self.client.post( url + "?validate=true", data=json.dumps(data), headers=headers, content_type=JSON, ) assert res.status_code == 400, res.json def test_create(self): _, headers = self.login(is_admin=True) url = "/api/2/entities" data = { "schema": "RealEstate", "collection_id": self.col_id, "properties": { "name": "Our house", "summary": "In the middle of our street", }, } res = self.client.post( url, data=json.dumps(data), headers=headers, content_type=JSON ) assert res.status_code == 200, res.json assert "middle" in res.json["properties"]["summary"][0], res.json validate(res.json, "Entity") def test_create_collection_object(self): _, headers = self.login(is_admin=True) url = "/api/2/entities" data = { "schema": "RealEstate", "collection": {"id": self.col_id, "label": "blaaa"}, "properties": { "name": "Our house", "summary": "In the middle of our street", }, } res = self.client.post( url, data=json.dumps(data), headers=headers, content_type=JSON ) assert res.status_code == 200, res.json assert res.json["collection"]["id"] == self.col_id, res.json validate(res.json, "Entity") def test_create_nested(self): _, headers = self.login(is_admin=True) url = "/api/2/entities" data = { "schema": "Person", "collection_id": self.col_id, "properties": { "name": "Osama bin Laden", "alias": ["Usama bin Laden", "Osama bin Ladin"], "address": "Home, Netherlands", }, } res = self.client.post( url, data=json.dumps(data), headers=headers, content_type=JSON ) assert res.status_code == 200, res.json assert 2 == len(res.json["properties"].get("alias", [])), res.json def test_remove_nested(self): _, headers = self.login(is_admin=True) url = "/api/2/entities" data = { "schema": "Person", "collection_id": self.col_id, "properties": { "name": "Osama bin Laden", "alias": ["Usama bin Laden", "Osama bin Ladin"], }, } res = self.client.post( url, data=json.dumps(data), headers=headers, content_type=JSON ) assert res.status_code == 200, (res.status_code, res.json) data = res.json data["properties"]["alias"].pop() assert 1 == len(data["properties"]["alias"]), data url = "/api/2/entities/%s" % data["id"] res = self.client.post( url, data=json.dumps(data), headers=headers, content_type=JSON ) assert res.status_code == 200, (res.status_code, res.json) assert 1 == len(res.json["properties"].get("alias")), res.json def test_delete_entity(self): _, headers = self.login(is_admin=True) url = "/api/2/entities" data = { "schema": "Person", "properties": { "name": "Osama bin Laden", }, "collection_id": self.col_id, } res = self.client.post( url, data=json.dumps(data), headers=headers, content_type=JSON ) assert res.status_code == 200, (res.status_code, res.json) data = res.json url = "/api/2/entities/%s" % data["id"] res = self.client.delete(url, headers=headers) assert res.status_code == 204, res.status_code res = self.client.get(url, headers=headers) assert res.status_code == 404, res.status_code def test_similar_entity(self): _, headers = self.login(is_admin=True) url = "/api/2/entities" data = { "schema": "Person", "collection_id": self.col_id, "properties": {"name": "Osama bin Laden"}, } res = self.client.post( url, data=json.dumps(data), headers=headers, content_type=JSON ) data = { "schema": "Person", "collection_id": self.col_id, "properties": {"name": "Osama bin Laden"}, } obj = self.client.post( url, data=json.dumps(data), headers=headers, content_type=JSON ) url = "/api/2/entities/%s/similar" % obj.json["id"] similar = self.client.get(url, headers=headers) assert similar.status_code == 200, (similar.status_code, similar.json) text = similar.data.decode("utf-8") assert obj.json["id"] not in text, obj.id assert obj.json["id"] not in text, obj.id data = similar.json assert len(data["results"]) == 1, data assert "Laden" in get_caption(data["results"][0]["entity"]), data assert b"Pooh" not in res.data, res.data validate(data["results"][0], "Entity") def test_match(self): _, headers = self.login(is_admin=True) data = { "schema": "Person", "collection_id": self.col_id, "properties": { "name": "Osama bin Laden", }, } res = self.client.post( "/api/2/entities", data=json.dumps(data), headers=headers, content_type=JSON, ) data = { "schema": "Person", "properties": { "name": "Osama bin Laden", }, } matches = self.client.post( "/api/2/match", data=json.dumps(data), headers=headers, content_type=JSON, ) assert matches.status_code == 200, (matches.status_code, matches.json) data = matches.json assert len(data["results"]) == 1, data assert "Laden" in get_caption(data["results"][0]), data assert b"Pooh" not in res.data, res.data validate(data["results"][0], "Entity") def test_entity_tags(self): _, headers = self.login(is_admin=True) url = "/api/2/entities" data = { "schema": "Person", "collection_id": self.col_id, "properties": { "name": "Blaaaa blubb", "phone": ["+491769817271", "+491769817999"], }, } resa = self.client.post( url, data=json.dumps(data), headers=headers, content_type=JSON ) data = { "schema": "Person", "collection_id": self.col_id, "properties": { "name": "Nobody Man", "phone": ["+491769817271", "+491769817777"], }, } resa = self.client.post( url, data=json.dumps(data), headers=headers, content_type=JSON ) url = "/api/2/entities/%s/tags" % resa.json["id"] res = self.client.get(url, headers=headers) assert res.status_code == 200, (res.status_code, res.json) results = res.json["results"] assert len(results) == 1, results assert results[0]["value"] == "+491769817271", results validate(res.json["results"][0], "EntityTag") def test_undelete(self): _, headers = self.login(is_admin=True) url = "/api/2/entities" data = { "schema": "Person", "properties": { "name": "Mr. Mango", }, "collection_id": self.col_id, } res = self.client.post( url, data=json.dumps(data), headers=headers, content_type=JSON ) assert res.status_code == 200, (res.status_code, res.json) id1 = res.json["id"] url = "/api/2/entities/%s" % id1 res = self.client.delete(url, headers=headers) assert res.status_code == 204, res.status_code res = self.client.get(url, headers=headers) assert res.status_code == 404, res.status_code # test undelete with property update url = "/api/2/entities/%s" % id1 data = { "schema": "Person", "properties": { "name": "Mr. Mango", "status": "ripe", }, "collection_id": self.col_id, } res = self.client.post( url, data=json.dumps(data), headers=headers, content_type=JSON ) assert res.status_code == 200, res.status_code validate(res.json, "Entity") assert res.json["properties"]["name"] == ["Mr. Mango"], res.json assert res.json["properties"]["status"] == ["ripe"], res.json url = "/api/2/entities/%s" % id1 res = self.client.get(url, headers=headers) assert res.status_code == 200, res.status_code assert res.json["properties"]["name"] == ["Mr. Mango"], res.json assert res.json["properties"]["status"] == ["ripe"], res.json # Test undelete existing entity url = "/api/2/entities/%s" % id1 data = { "schema": "Person", "properties": { "name": "Mr. Mango", "status": "ripe", "email": "mango@mango.yum", }, } res = self.client.post( url, data=json.dumps(data), headers=headers, content_type=JSON ) assert res.status_code == 200, res.status_code validate(res.json, "Entity") assert res.json["properties"]["name"] == ["Mr. Mango"], res.json assert res.json["properties"]["status"] == ["ripe"], res.json assert res.json["properties"]["email"] == ["mango@mango.yum"], res.json # test create entity with undelete id2 = "randomid" url = "/api/2/entities/%s" % id2 data = { "schema": "Person", "properties": { "name": "Mr. Banana", }, } res = self.client.post( url, data=json.dumps(data), headers=headers, content_type=JSON ) assert res.status_code == 404, res.status_code data["collection_id"] = self.col_id res = self.client.post( url, data=json.dumps(data), headers=headers, content_type=JSON ) assert res.status_code == 200, res.status_code validate(res.json, "Entity") assert res.json["id"] != id2, res.json assert res.json["properties"]["name"] == ["Mr. Banana"], res.json def test_recursive_delete(self): _, headers = self.login(is_admin=True) url = "/api/2/entities" headers["Content-Type"] = JSON data1 = json.dumps( { "schema": "Person", "properties": { "name": "Osama bin Laden", }, "collection_id": self.col_id, } ) res1 = self.client.post(url, data=data1, headers=headers) id1 = res1.json["id"] data2 = json.dumps( { "schema": "Organization", "properties": { "name": "Al-Qaeda", }, "collection_id": self.col_id, } ) res2 = self.client.post(url, data=data2, headers=headers) id2 = res2.json["id"] data3 = json.dumps( { "schema": "Membership", "properties": {"member": id1, "organization": id2}, "collection_id": self.col_id, } ) res3 = self.client.post(url, data=data3, headers=headers) id3 = res3.json["id"] # Deleting a thing, deletes associated edge. url = "/api/2/entities/%s" % id1 res = self.client.delete(url, headers=headers) assert res.status_code == 204, res.status_code url = "/api/2/entities/%s" % id3 res = self.client.get(url, headers=headers) assert res.status_code == 404, res.status_code url = "/api/2/entities/%s" % id2 res = self.client.get(url, headers=headers) assert res.status_code == 200, res.status_code # undelete url = "/api/2/entities/%s" % id1 res = self.client.post(url, data=data1, headers=headers) assert res.json["id"] == id1, (res.json["id"], id1) url = "/api/2/entities/%s" % id3 self.client.post(url, data=data3, headers=headers) url = "/api/2/entities/%s" % id1 res = self.client.get(url, headers=headers) assert res.status_code == 200, res.status_code url = "/api/2/entities/%s" % id3 res = self.client.get(url, headers=headers) assert res.status_code == 200, res.status_code # Deleting a edge, should not delete associated things url = "/api/2/entities/%s" % id3 res = self.client.delete(url, headers=headers) assert res.status_code == 204, res.status_code url = "/api/2/entities/%s" % id1 res = self.client.get(url, headers=headers) assert res.status_code == 200, res.status_code url = "/api/2/entities/%s" % id2 res = self.client.get(url, headers=headers) assert res.status_code == 200, res.status_code def test_sort_by_date(self): _, headers = self.login(is_admin=True) url = "/api/2/entities?filter:schemata=Thing&sort=dates%3Aasc" res = self.client.get(url, headers=headers, content_type=JSON) assert res.json["results"][0]["id"] == self.ent.id, res.json assert res.json["results"][1]["id"] == self.ent2.id, res.json assert res.json["results"][2]["id"] == self.book.id, res.json url = "/api/2/entities?filter:schemata=Thing&sort=dates%3Adesc" res = self.client.get(url, headers=headers, content_type=JSON) assert res.json["results"][0]["id"] == self.ent2.id, res.json assert res.json["results"][1]["id"] == self.ent.id, res.json assert res.json["results"][2]["id"] == self.book.id, res.json def test_expand(self): _, headers = self.login(is_admin=True) url = "/api/2/entities" data = { "schema": "Person", "collection_id": self.col_id, "properties": { "name": "Osama bin Laden", "email": ["osama@al-qaeda.org", "o@laden.me"], "status": "dead", "nationality": "sa", }, } person1 = self.client.post( url, data=json.dumps(data), headers=headers, content_type=JSON ) data = { "schema": "Passport", "collection_id": self.col_id, "properties": { "passportNumber": "A1B2C3", "holder": person1.json["id"], }, } passport = self.client.post( url, data=json.dumps(data), headers=headers, content_type=JSON ) col2 = self.create_collection() data = { "schema": "Person", "collection_id": str(col2.id), "properties": { "name": "John Doe", "email": "osama@al-qaeda.org", }, } person1_in_other_collection = self.client.post( # noqa url, data=json.dumps(data), headers=headers, content_type=JSON ) data = { "schema": "Person", "collection_id": self.col_id, "properties": { "name": "Undercover Osama", "email": "osama@al-qaeda.org", }, } self.client.post( url, data=json.dumps(data), headers=headers, content_type=JSON, ) data = { "schema": "Person", "collection_id": self.col_id, "properties": { "name": "John Doe", "email": ["osama@al-qaeda.org", "john@doe.me"], "nationality": "sa", }, } self.client.post( url, data=json.dumps(data), headers=headers, content_type=JSON, ) data = { "schema": "Person", "collection_id": self.col_id, "properties": { "name": "Dead Guy 1", "status": "dead", }, } self.client.post( url, data=json.dumps(data), headers=headers, content_type=JSON, ) data = { "schema": "Company", "collection_id": self.col_id, "properties": { "name": "Al-Qaeda", }, } company1 = self.client.post( url, data=json.dumps(data), headers=headers, content_type=JSON ) data = { "schema": "Ownership", "collection_id": self.col_id, "properties": { "owner": person1.json["id"], "asset": company1.json["id"], }, } self.client.post( url, data=json.dumps(data), headers=headers, content_type=JSON, ) url = "/api/2/entities/%s/expand?limit=100" % (person1.json["id"]) res = self.client.get(url, headers=headers) assert res.status_code == 200, (res.status_code, res.json) validate(res.json, "EntityExpand") assert res.json["total"] == 2, pformat(res.json) results = res.json["results"] assert len(results) == 2, pformat(results) for res in results: prop = res["property"] assert prop in ( "identificiation", "ownershipOwner", ), results if prop == "ownershipOwner": assert res["count"] == 1 assert len(res["entities"]) == 2 for nested in res["entities"]: assert nested["schema"] in ("Ownership", "Company"), nested if prop == "identificiation": assert res["count"] == 1 assert res["entities"][0]["schema"] == "Passport", res url = "/api/2/entities/%s/expand" % (company1.json["id"]) res = self.client.get(url, headers=headers) assert res.status_code == 200, (res.status_code, res.json) validate(res.json, "EntityExpand") assert res.json["total"] == 1, pformat(res.json) results = res.json["results"] assert len(results) == 1, pformat(results) for res in results: prop = res["property"] assert prop == "ownershipAsset", prop assert res["count"] == 1 # assert res['entities'][0]['name'] == 'Osama bin Laden' url = "/api/2/entities/%s/expand" url = url % passport.json["id"] res = self.client.get(url, headers=headers) assert res.status_code == 200, (res.status_code, res.json) validate(res.json, "EntityExpand") assert res.json["total"] == 1, pformat(res.json) results = res.json["results"] assert len(results) == 1, pformat(results) for res in results: prop = res["property"] assert prop == "holder", prop assert res["count"] == 1, pformat(res) assert len(res["entities"]) == 1, pformat(res)
36.6231
88
0.523612
2,692
24,098
4.598068
0.090639
0.057683
0.066166
0.058329
0.768299
0.732025
0.702375
0.681047
0.648166
0.616174
0
0.02151
0.330567
24,098
657
89
36.678843
0.745785
0.011287
0
0.565574
0
0
0.164644
0.013983
0
0
0
0
0.163934
1
0.027869
false
0.009836
0.014754
0
0.044262
0.001639
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
6777bcc8808e02de19da88637c788f622e6661ba
942
py
Python
routines/convert.py
meteostat/routines
8867b96a3fcb254ebcc9623933a76dac44157b70
[ "MIT" ]
7
2020-07-02T09:49:06.000Z
2021-05-24T11:46:00.000Z
routines/convert.py
meteostat/routines
8867b96a3fcb254ebcc9623933a76dac44157b70
[ "MIT" ]
16
2021-03-29T19:45:01.000Z
2021-11-14T11:39:12.000Z
routines/convert.py
meteostat/routines
8867b96a3fcb254ebcc9623933a76dac44157b70
[ "MIT" ]
1
2021-04-06T20:58:42.000Z
2021-04-06T20:58:42.000Z
import math from numpy import isnan # Convert Kelvin to Celsius def kelvin_to_celsius(value): return value - 273.15 if value is not None else None # Convert m/s to km/h def ms_to_kmh(value): return value * 3.6 if value is not None else None # Get relative humidity from temperature and dew point def temp_dwpt_to_rhum(row: dict): return 100 * (math.exp((17.625 * row['dwpt']) / (243.04 + row['dwpt'])) / math.exp((17.625 * row['temp']) / ( 243.04 + row['temp']))) if row['temp'] is not None and row['dwpt'] is not None else None def pres_to_msl(row: dict, altitude: int = None, temp: str = 'tavg'): try: return None if isnan(row['pres']) or isnan(row[temp]) or isnan(altitude) or altitude is None or row['pres'] == - \ 999 else round(row['pres'] * math.pow((1 - ((0.0065 * altitude) / (row[temp] + 0.0065 * altitude + 273.15))), -5.257), 1) except BaseException: return None
31.4
133
0.636943
157
942
3.764331
0.401274
0.059222
0.060914
0.06599
0.160745
0.081218
0.081218
0
0
0
0
0.073171
0.216561
942
29
134
32.482759
0.727642
0.104034
0
0
0
0
0.047619
0
0
0
0
0
0
1
0.266667
false
0
0.133333
0.2
0.733333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
677f4d9d18bd055d71526f163855ffb374bed024
192
py
Python
dla/linalg/__init__.py
mp4096/dla
a3a1ae1c86abe62e321ca2d2fba89d30c39ba6ef
[ "BSD-3-Clause" ]
null
null
null
dla/linalg/__init__.py
mp4096/dla
a3a1ae1c86abe62e321ca2d2fba89d30c39ba6ef
[ "BSD-3-Clause" ]
null
null
null
dla/linalg/__init__.py
mp4096/dla
a3a1ae1c86abe62e321ca2d2fba89d30c39ba6ef
[ "BSD-3-Clause" ]
null
null
null
"""Linear algebra submodule.""" from .linalg_helpers import ( log_rel_error, modified_gram_schmidt, online_variance, projector_onto_kernel, ) from .arnoldi import arnoldi
19.2
31
0.729167
22
192
6
0.863636
0
0
0
0
0
0
0
0
0
0
0
0.192708
192
9
32
21.333333
0.851613
0.130208
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.285714
0
0.285714
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
6788dc31d002251d2bdedb01bbb76e6a60cd9f4f
3,993
py
Python
src/python/dxpy/templating/templates/python/parallelized/src/code.py
scalavision/dx-toolkit
e4a2361b3bb6424d7e1ad2bcfc65d649b02d0496
[ "Apache-2.0" ]
null
null
null
src/python/dxpy/templating/templates/python/parallelized/src/code.py
scalavision/dx-toolkit
e4a2361b3bb6424d7e1ad2bcfc65d649b02d0496
[ "Apache-2.0" ]
null
null
null
src/python/dxpy/templating/templates/python/parallelized/src/code.py
scalavision/dx-toolkit
e4a2361b3bb6424d7e1ad2bcfc65d649b02d0496
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # DX_APP_WIZARD_NAME DX_APP_WIZARD_VERSION # Generated by dx-app-wizard. # # Parallelized execution pattern: Your app will generate multiple jobs # to perform some computation in parallel, followed by a final # "postprocess" stage that will perform any additional computations as # necessary. # # See https://wiki.dnanexus.com/Developer-Portal for documentation and # tutorials on how to modify this file. # # DNAnexus Python Bindings (dxpy) documentation: # http://autodoc.dnanexus.com/bindings/python/current/ import os import dxpy @dxpy.entry_point("postprocess") def postprocess(process_outputs): # Change the following to process whatever input this stage # receives. You may also want to copy and paste the logic to download # and upload files here as well if this stage receives file input # and/or makes file output. for output in process_outputs: pass return { "answer": "placeholder value" } @dxpy.entry_point("process") def process(input1): # Change the following to process whatever input this stage # receives. You may also want to copy and paste the logic to download # and upload files here as well if this stage receives file input # and/or makes file output. print input1 return { "output": "placeholder value" } @dxpy.entry_point("main") def main(DX_APP_WIZARD_INPUT_SIGNATURE): DX_APP_WIZARD_INITIALIZE_INPUTDX_APP_WIZARD_DOWNLOAD_ANY_FILES # Split your work into parallel tasks. As an example, the # following generates 10 subjobs running with the same dummy # input. subjobs = [] for i in range(10): subjob_input = { "input1": True } subjobs.append(dxpy.new_dxjob(subjob_input, "process")) # The following line creates the job that will perform the # "postprocess" step of your app. We've given it an input field # that is a list of job-based object references created from the # "process" jobs we just created. Assuming those jobs have an # output field called "output", these values will be passed to the # "postprocess" job. Because these values are not ready until the # "process" jobs finish, the "postprocess" job WILL NOT RUN until # all job-based object references have been resolved (i.e. the # jobs they reference have finished running). # # If you do not plan to have the "process" jobs create output that # the "postprocess" job will require, then you can explicitly list # the dependencies to wait for those jobs to finish by setting the # "depends_on" field to the list of subjobs to wait for (it # accepts either dxpy handlers or string IDs in the list). We've # included this parameter in the line below as well for # completeness, though it is unnecessary if you are providing # job-based object references in the input that refer to the same # set of jobs. postprocess_job = dxpy.new_dxjob(fn_input={ "process_outputs": [subjob.get_output_ref("output") for subjob in subjobs] }, fn_name="postprocess", depends_on=subjobs) DX_APP_WIZARD_UPLOAD_ANY_FILES # If you would like to include any of the output fields from the # postprocess_job as the output of your app, you should return it # here using a job-based object reference. If the output field in # the postprocess function is called "answer", you can pass that # on here as follows: # # return { "app_output_field": postprocess_job.get_output_ref("answer"), ...} # # Tip: you can include in your output at this point any open # objects (such as files) which will be closed by a job that # finishes later. The system will check to make sure that the # output object is closed and will attempt to clone it out as # output into the parent container only after all subjobs have # finished. output = {} DX_APP_WIZARD_OUTPUT return output dxpy.run()
40.744898
125
0.710744
590
3,993
4.725424
0.361017
0.025825
0.027618
0.025825
0.147059
0.125538
0.125538
0.125538
0.125538
0.125538
0
0.002262
0.225144
3,993
97
126
41.164948
0.898836
0.695467
0
0
1
0
0.097582
0
0
0
0
0.010309
0
0
null
null
0.038462
0.076923
null
null
0.038462
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
1
0
1
0
0
0
0
0
0
0
0
3
678e6092da45b70e2932e611ab78149a0cd4ccec
397
py
Python
amplpy/tests/__main__.py
sbebo/amplpy
ac69a79ce40d5ace93801f89ce6fe8310628cfd7
[ "BSD-3-Clause" ]
47
2017-08-11T16:38:26.000Z
2022-03-24T08:37:40.000Z
amplpy/tests/__main__.py
Seanpm2001-Python/amplpy
26c04134b6c4135a541d54c7873d9b2933df039a
[ "BSD-3-Clause" ]
41
2017-08-05T00:54:27.000Z
2022-03-08T21:56:19.000Z
amplpy/tests/__main__.py
Seanpm2001-Python/amplpy
26c04134b6c4135a541d54c7873d9b2933df039a
[ "BSD-3-Clause" ]
22
2017-08-05T00:38:43.000Z
2022-02-02T20:22:10.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import import unittest from .test_ampl import TestAMPL from .test_entities import TestEntities from .test_iterators import TestIterators from .test_dataframe import TestDataFrame from .test_environment import TestEnvironment from .test_properties import TestProperties if __name__ == '__main__': unittest.main()
24.8125
45
0.806045
49
397
6.142857
0.571429
0.159468
0
0
0
0
0
0
0
0
0
0.002874
0.123426
397
15
46
26.466667
0.862069
0.105793
0
0
0
0
0.022663
0
0
0
0
0
0
1
0
true
0
0.8
0
0.8
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
3
678fb7c903ddd38cbbb4c928ca2edac7cde206ea
217
py
Python
Grupo2_encriptacionCesar/mensajeLista.py
IEEERASDigitalesJaveriana/encriptacion_Cesar_Git
f0f55f97448186b814d0e8567d2f432e0eeed563
[ "CC0-1.0" ]
null
null
null
Grupo2_encriptacionCesar/mensajeLista.py
IEEERASDigitalesJaveriana/encriptacion_Cesar_Git
f0f55f97448186b814d0e8567d2f432e0eeed563
[ "CC0-1.0" ]
null
null
null
Grupo2_encriptacionCesar/mensajeLista.py
IEEERASDigitalesJaveriana/encriptacion_Cesar_Git
f0f55f97448186b814d0e8567d2f432e0eeed563
[ "CC0-1.0" ]
null
null
null
# función para pasar el mensaje que esta en string a lista # parametros de entrada: mensaje - mensaje a encriptar en string def mensaje_a_lista(mensaje): # funcion 2 return # se devuelve la lista con el mensaje
54.25
67
0.760369
35
217
4.657143
0.657143
0.110429
0
0
0
0
0
0
0
0
0
0.005747
0.198157
217
4
68
54.25
0.931034
0.774194
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
3
6793e230f767b7e5c1aabf7061d535301bd46934
443
py
Python
main/pythonDev/TestModels/MiniExamples/miniExamplesFileList.py
eapcivil/EXUDYN
52bddc8c258cda07e51373f68e1198b66c701d03
[ "BSD-3-Clause-Open-MPI" ]
1
2020-10-06T08:06:25.000Z
2020-10-06T08:06:25.000Z
main/pythonDev/TestModels/MiniExamples/miniExamplesFileList.py
eapcivil/EXUDYN
52bddc8c258cda07e51373f68e1198b66c701d03
[ "BSD-3-Clause-Open-MPI" ]
null
null
null
main/pythonDev/TestModels/MiniExamples/miniExamplesFileList.py
eapcivil/EXUDYN
52bddc8c258cda07e51373f68e1198b66c701d03
[ "BSD-3-Clause-Open-MPI" ]
null
null
null
#this file provides a list of file names for mini examples miniExamplesFileList = ['ObjectMassPoint.py', 'ObjectMassPoint2D.py', 'ObjectMass1D.py', 'ObjectRotationalMass1D.py', 'ObjectRigidBody2D.py', 'ObjectGenericODE2.py', 'ObjectConnectorSpringDamper.py', 'ObjectConnectorCartesianSpringDamper.py', 'ObjectConnectorCoordinateSpringDamper.py', 'ObjectConnectorDistance.py', 'ObjectConnectorCoordinate.py', 'MarkerSuperElementPosition.py']
27.6875
58
0.823928
36
443
10.138889
0.666667
0
0
0
0
0
0
0
0
0
0
0.012019
0.060948
443
15
59
29.533333
0.865385
0.128668
0
0
0
0
0.807292
0.565104
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
67d9a95c2072b4cc77b4f10acc8a33a853df5a15
86
py
Python
tests/src/compound/while.py
lindlind/python-interpreter
ffcb38627dc128dddb04e769d0bff6466365271a
[ "MIT" ]
null
null
null
tests/src/compound/while.py
lindlind/python-interpreter
ffcb38627dc128dddb04e769d0bff6466365271a
[ "MIT" ]
null
null
null
tests/src/compound/while.py
lindlind/python-interpreter
ffcb38627dc128dddb04e769d0bff6466365271a
[ "MIT" ]
null
null
null
s = "abacaba" length = 1 while s[0:length] != s: length = length + 1 length == 7
12.285714
23
0.569767
14
86
3.5
0.5
0.285714
0
0
0
0
0
0
0
0
0
0.063492
0.267442
86
6
24
14.333333
0.714286
0
0
0
0
0
0.081395
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
67fec778e4939d81260492b3bd308c4ee5e70a70
89
py
Python
Video 28/Tuplas I.py
Khahory/curso_de_python_yacklyon-trabajo-py
df5ae0d88485124db37a55d69f701ff0f8dfa08b
[ "Apache-2.0" ]
null
null
null
Video 28/Tuplas I.py
Khahory/curso_de_python_yacklyon-trabajo-py
df5ae0d88485124db37a55d69f701ff0f8dfa08b
[ "Apache-2.0" ]
null
null
null
Video 28/Tuplas I.py
Khahory/curso_de_python_yacklyon-trabajo-py
df5ae0d88485124db37a55d69f701ff0f8dfa08b
[ "Apache-2.0" ]
null
null
null
#Tuplas a = 10 b = 5 print(a) (a,b) = (b,a) print(a) #Cambio los valores con las tuplas
11.125
43
0.617978
19
89
2.894737
0.578947
0.218182
0
0
0
0
0
0
0
0
0
0.042857
0.213483
89
8
43
11.125
0.742857
0.438202
0
0.4
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.4
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
db333eb94172e54f58ebf5b2613a0b7f7ec3e67d
894
py
Python
Task/Floyds-triangle/Python/floyds-triangle-1.py
LaudateCorpus1/RosettaCodeData
9ad63ea473a958506c041077f1d810c0c7c8c18d
[ "Info-ZIP" ]
5
2021-01-29T20:08:05.000Z
2022-03-22T06:16:05.000Z
Task/Floyds-triangle/Python/floyds-triangle-1.py
seanwallawalla-forks/RosettaCodeData
9ad63ea473a958506c041077f1d810c0c7c8c18d
[ "Info-ZIP" ]
null
null
null
Task/Floyds-triangle/Python/floyds-triangle-1.py
seanwallawalla-forks/RosettaCodeData
9ad63ea473a958506c041077f1d810c0c7c8c18d
[ "Info-ZIP" ]
1
2021-04-13T04:19:31.000Z
2021-04-13T04:19:31.000Z
>>> def floyd(rowcount=5): rows = [[1]] while len(rows) < rowcount: n = rows[-1][-1] + 1 rows.append(list(range(n, n + len(rows[-1]) + 1))) return rows >>> floyd() [[1], [2, 3], [4, 5, 6], [7, 8, 9, 10], [11, 12, 13, 14, 15]] >>> def pfloyd(rows=[[1], [2, 3], [4, 5, 6], [7, 8, 9, 10]]): colspace = [len(str(n)) for n in rows[-1]] for row in rows: print( ' '.join('%*i' % space_n for space_n in zip(colspace, row))) >>> pfloyd() 1 2 3 4 5 6 7 8 9 10 >>> pfloyd(floyd(5)) 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 >>> pfloyd(floyd(14)) 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 >>>
20.790698
69
0.549217
218
894
2.243119
0.582569
0.051125
0.030675
0.0409
0.173824
0.173824
0.173824
0.173824
0.173824
0.173824
0
0.444094
0.289709
894
42
70
21.285714
0.325984
0
0
0.282051
0
0
0.004474
0
0
0
0
0
0
0
null
null
0
0
null
null
0.025641
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3
db39b507155c05bda16f33fa8a3e79c11eb29fa9
294
py
Python
tools/server/judger/Strategy/settings.example.py
ypm1999/KitJudge
8d399b38318dd6f6eec27185d26bb5146e8ce025
[ "MIT" ]
1
2018-01-17T08:24:03.000Z
2018-01-17T08:24:03.000Z
tools/server/judger/Strategy/settings.example.py
ypm1999/KitJudge
8d399b38318dd6f6eec27185d26bb5146e8ce025
[ "MIT" ]
null
null
null
tools/server/judger/Strategy/settings.example.py
ypm1999/KitJudge
8d399b38318dd6f6eec27185d26bb5146e8ce025
[ "MIT" ]
3
2018-09-23T09:08:01.000Z
2018-10-13T02:02:03.000Z
kitReportMQHost = '***' kitReportMQPort = 5672 kitReportMQUsername = '***' kitReportMQPassword = '***' kitReportMQQueueName = '***' kitReportMQHeartBeat = 20 kitGitHost = '***' kitGitUser = '***' kitDBPort = 3306 kitDBHost = "***" kitDBName = "***" kitDBUsername = "***" kitDBPassword = "***"
19.6
28
0.64966
16
294
11.9375
1
0
0
0
0
0
0
0
0
0
0
0.03937
0.136054
294
15
29
19.6
0.712598
0
0
0
0
0
0.101695
0
0
0
0
0
0
1
0
false
0.153846
0
0
0
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
3
e1d1d2b33661087644872350c56461f1cb268c39
4,638
py
Python
src/m7_summing.py
CSSE120StartingCode/AccumulatorsAndFunctionsWithParameters
f48f71118a317782909873b5220ad79cd67d5449
[ "MIT" ]
null
null
null
src/m7_summing.py
CSSE120StartingCode/AccumulatorsAndFunctionsWithParameters
f48f71118a317782909873b5220ad79cd67d5449
[ "MIT" ]
null
null
null
src/m7_summing.py
CSSE120StartingCode/AccumulatorsAndFunctionsWithParameters
f48f71118a317782909873b5220ad79cd67d5449
[ "MIT" ]
155
2017-09-04T20:11:32.000Z
2020-06-26T07:21:33.000Z
""" This module lets you practice the ACCUMULATOR pattern in its simplest classic forms: SUMMING: total = total + number Authors: David Mutchler, Vibha Alangar, Dave Fisher, Matt Boutell, Mark Hays, Mohammed Noureddine, Sana Ebrahimi, Sriram Mohan, their colleagues and PUT_YOUR_NAME_HERE. """ # TODO: 1. PUT YOUR NAME IN THE ABOVE LINE. ############################################################################### # TODO: 2. Read the following, then change its _TODO_ to DONE. # Throughout these exercises, you must use RANGE statements. # At this point of the course, you are restricted to the SINGLE-ARGUMENT # form of RANGE statements, like this: # range(blah): # There is a MULTIPLE-ARGUMENT form of RANGE statements (e.g. range(a, b)) # but you are NOT permitted to use the MULTIPLE-ARGUMENT form yet, # for pedagogical reasons. ############################################################################### def main(): """ Calls the TEST functions in this module. """ run_test_sum_cosines() run_test_sum_square_roots() def run_test_sum_cosines(): """ Tests the sum_cosines function. """ # ------------------------------------------------------------------------- # TODO: 3. Implement this function. # It TESTS the sum_cosines function defined below. # Include at least ** 3 ** tests. # ___ # Use the same 4-step process as in implementing previous # TEST functions, including the same way to print expected/actual. # ------------------------------------------------------------------------- print() print("--------------------------------------------------") print("Testing the sum_cosines function:") print("--------------------------------------------------") def sum_cosines(n): """ What comes in: A non-negative integer n. What goes out: Returns the sum of the cosines of the integers 0, 1, 2, 3, ... n, inclusive, for the given n. Side effects: None. Example: If n is 3, this function returns cos(0) + cos(1) + cos(2) + cos(3) which is about 0.13416. Type hints: :type n: int :rtype: float """ # ------------------------------------------------------------------------- # TODO: 4. Implement and test this function. # Note that you should write its TEST function first (above). # That is called TEST-FIRST DEVELOPMENT (TFD). # ___ # No fair running the code of sum_cosines to GENERATE # test cases; that would defeat the purpose of TESTING! # ------------------------------------------------------------------------- def run_test_sum_square_roots(): """ Tests the sum_square_roots function. """ # ------------------------------------------------------------------------- # TODO: 5. Implement this function. # It TESTS the sum_square_roots function defined below. # Include at least ** 3 ** tests. # ___ # Use the same 4-step process as in implementing previous # TEST functions, including the same way to print expected/actual. # ------------------------------------------------------------------------- print() print("--------------------------------------------------") print("Testing the sum_square_roots function:") print("--------------------------------------------------") def sum_square_roots(n): """ What comes in: A non-negative integer n. What goes out: Returns the sum of the square roots of the integers 2, 4, 6, 8, ... 2n inclusive, for the given n. So if n is 7, the last term of the sum is the square root of 14 (not 7). Side effects: None. Example: If n is 5, this function returns sqrt(2) + sqrt(4) + sqrt(6) + sqrt(8) + sqrt(10), which is about 11.854408. Type hints: :type n: int :rtype: float """ # ------------------------------------------------------------------------- # TODO: 6. Implement and test this function. # Note that you should write its TEST function first (above). # That is called TEST-FIRST DEVELOPMENT (TFD). # ___ # No fair running the code of sum_square_roots to GENERATE # test cases; that would defeat the purpose of TESTING! # ------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # Calls main to start the ball rolling. # ----------------------------------------------------------------------------- main()
40.684211
79
0.48232
503
4,638
4.355865
0.349901
0.024646
0.044728
0.028754
0.570972
0.482428
0.463715
0.408033
0.379735
0.379735
0
0.013469
0.21561
4,638
113
80
41.044248
0.588785
0.757223
0
0.375
0
0
0.355414
0.254777
0
0
0
0.026549
0
1
0.3125
false
0
0
0
0.3125
0.5
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
0
0
0
0
1
0
3
e1d51b9262e223b9ce3d9fe3da8ad2f56bc24ef4
1,246
py
Python
src/accent_analyser/core/cluster_rules.py
stefantaubert/eng2ipa-accent-transformer
d620c70b06c83119402e255085046747ade87444
[ "MIT" ]
null
null
null
src/accent_analyser/core/cluster_rules.py
stefantaubert/eng2ipa-accent-transformer
d620c70b06c83119402e255085046747ade87444
[ "MIT" ]
null
null
null
src/accent_analyser/core/cluster_rules.py
stefantaubert/eng2ipa-accent-transformer
d620c70b06c83119402e255085046747ade87444
[ "MIT" ]
null
null
null
from collections import OrderedDict from typing import Any, List, Optional from typing import OrderedDict as OrderedDictType from typing import Set, Tuple from accent_analyser.core.rule_detection import (PhonemeOccurrences, PhoneOccurrences, Rule, RuleType, WordEntry, WordRules, rule_to_str, rules_to_str) from accent_analyser.core.rule_stats import (get_rule_occurrences, word_rules_to_rules_dict) from ordered_set import OrderedSet from pandas import DataFrame def get_fingerprint(speaker_word_rules: OrderedDictType[WordEntry, WordRules], speaker_phone_occurrences: PhoneOccurrences, speaker_phoneme_occurrences: PhonemeOccurrences, all_rules: OrderedSet[Rule]) -> Any: speaker_words_to_rules = word_rules_to_rules_dict(speaker_word_rules) speaker_rule_occurrences = get_rule_occurrences(speaker_words_to_rules, speaker_phone_occurrences) def compare_two_fingerprints(fingerprint1: Any, fingerprint2: Any) -> float: pass def cluster_fingerprints(fingerprints: List[Any]) -> float: pass
44.5
209
0.684591
129
1,246
6.271318
0.379845
0.044499
0.059333
0.054388
0.113721
0
0
0
0
0
0
0.002193
0.268058
1,246
27
210
46.148148
0.884868
0
0
0.1
0
0
0
0
0
0
0
0
0
1
0.15
false
0.1
0.4
0
0.55
0.15
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
1
0
0
3
e1d6efe1bc9857177bfacc3a5504aa76163912e9
7,588
py
Python
client/python/modeldb/tests/utils.py
miyamotok0105/modeldb
6b2b7fb598d90be733e4b68efae3165c24efe2d1
[ "MIT" ]
1
2018-08-23T01:15:43.000Z
2018-08-23T01:15:43.000Z
client/python/modeldb/tests/utils.py
miyamotok0105/modeldb
6b2b7fb598d90be733e4b68efae3165c24efe2d1
[ "MIT" ]
1
2018-08-20T17:37:22.000Z
2018-08-20T17:37:22.000Z
client/python/modeldb/tests/utils.py
miyamotok0105/modeldb
6b2b7fb598d90be733e4b68efae3165c24efe2d1
[ "MIT" ]
null
null
null
def validate_fit_event_struct(fitEvent, tester): tester.assertTrue(hasattr(fitEvent, 'df')) tester.assertTrue(hasattr(fitEvent, 'spec')) tester.assertTrue(hasattr(fitEvent, 'model')) tester.assertTrue(hasattr(fitEvent, 'featureColumns')) tester.assertTrue(hasattr(fitEvent, 'predictionColumns')) tester.assertTrue(hasattr(fitEvent, 'labelColumns')) tester.assertTrue(hasattr(fitEvent, 'experimentRunId')) tester.assertTrue(type(fitEvent.experimentRunId), 'int') def validate_project_struct(project, tester): tester.assertTrue(hasattr(project, 'id')) tester.assertTrue(hasattr(project, 'name')) tester.assertTrue(hasattr(project, 'author')) tester.assertTrue(hasattr(project, 'description')) def validate_experiment_struct(experiment, tester): tester.assertTrue(hasattr(experiment, 'projectId')) tester.assertTrue(hasattr(experiment, 'description')) tester.assertTrue(hasattr(experiment, 'id')) tester.assertTrue(hasattr(experiment, 'isDefault')) tester.assertTrue(hasattr(experiment, 'name')) def validate_experiment_run_struct(experiment_run, tester): tester.assertTrue(hasattr(experiment_run, 'id')) tester.assertTrue(hasattr(experiment_run, 'experimentId')) tester.assertTrue(hasattr(experiment_run, 'description')) def validate_transformer_spec_struct(spec, tester): tester.assertTrue(hasattr(spec, 'id')) tester.assertTrue(hasattr(spec, 'transformerType')) tester.assertTrue(hasattr(spec, 'hyperparameters')) tester.assertTrue(hasattr(spec, 'tag')) def validate_transform_event_struct(transformEvent, tester): tester.assertTrue(hasattr(transformEvent, 'oldDataFrame')) tester.assertTrue(hasattr(transformEvent, 'newDataFrame')) tester.assertTrue(hasattr(transformEvent, 'transformer')) tester.assertTrue(hasattr(transformEvent, 'inputColumns')) tester.assertTrue(hasattr(transformEvent, 'outputColumns')) tester.assertTrue(hasattr(transformEvent, 'experimentRunId')) tester.assertTrue(type(transformEvent.experimentRunId), 'int') def validate_dataframe_struct(dataframe, tester): tester.assertTrue(hasattr(dataframe, 'numRows')) tester.assertTrue(hasattr(dataframe, 'tag')) tester.assertTrue(hasattr(dataframe, 'id')) tester.assertTrue(hasattr(dataframe, 'schema')) def validate_transformer_struct(transformer, tester): tester.assertTrue(hasattr(transformer, 'id')) tester.assertTrue(hasattr(transformer, 'transformerType')) tester.assertTrue(hasattr(transformer, 'tag')) def validate_pipeline_event_struct(pipelineEvent, tester): tester.assertTrue(hasattr(pipelineEvent, 'pipelineFit')) tester.assertTrue(hasattr(pipelineEvent, 'transformStages')) tester.assertTrue(hasattr(pipelineEvent, 'fitStages')) tester.assertTrue(hasattr(pipelineEvent, 'experimentRunId')) def validate_pipeline_fit_stages(fitStages, tester): count = 0 for stage in fitStages: tester.assertTrue(hasattr(stage, 'fe')) tester.assertTrue(hasattr(stage, 'stageNumber')) tester.assertEqual(stage.stageNumber, count) validate_fit_event_struct(stage.fe, tester) count += 1 def validate_pipeline_transform_stages(transformStages, tester): count = 0 for stage in transformStages: tester.assertTrue(hasattr(stage, 'te')) tester.assertTrue(hasattr(stage, 'stageNumber')) tester.assertEqual(stage.stageNumber, count) validate_transform_event_struct(stage.te, tester) count += 1 def validate_random_split_event_struct(random_splitEvent, tester): tester.assertTrue(hasattr(random_splitEvent, 'oldDataFrame')) tester.assertTrue(hasattr(random_splitEvent, 'weights')) tester.assertTrue(hasattr(random_splitEvent, 'seed')) tester.assertTrue(hasattr(random_splitEvent, 'splitDataFrames')) tester.assertTrue(hasattr(random_splitEvent, 'experimentRunId')) def validate_metric_event_struct(metric_event, tester): tester.assertTrue(hasattr(metric_event, 'df')) tester.assertTrue(hasattr(metric_event, 'model')) tester.assertTrue(hasattr(metric_event, 'metricType')) tester.assertTrue(hasattr(metric_event, 'metricValue')) tester.assertTrue(hasattr(metric_event, 'labelCol')) tester.assertTrue(hasattr(metric_event, 'predictionCol')) tester.assertTrue(hasattr(metric_event, 'experimentRunId')) def validate_grid_search_cv_event(gridcvEvent, tester): tester.assertTrue(hasattr(gridcvEvent, 'numFolds')) tester.assertTrue(hasattr(gridcvEvent, 'bestFit')) tester.assertTrue(hasattr(gridcvEvent, 'crossValidations')) tester.assertTrue(hasattr(gridcvEvent, 'experimentRunId')) def validate_cross_validate_event(cvEvent, tester): tester.assertTrue(hasattr(cvEvent, 'df')) tester.assertTrue(hasattr(cvEvent, 'spec')) tester.assertTrue(hasattr(cvEvent, 'seed')) tester.assertTrue(hasattr(cvEvent, 'evaluator')) tester.assertTrue(hasattr(cvEvent, 'labelColumns')) tester.assertTrue(hasattr(cvEvent, 'predictionColumns')) tester.assertTrue(hasattr(cvEvent, 'featureColumns')) tester.assertTrue(hasattr(cvEvent, 'folds')) tester.assertTrue(hasattr(cvEvent, 'experimentRunId')) def validate_cross_validation_fold(cvFold, tester): tester.assertTrue(hasattr(cvFold, 'model')) tester.assertTrue(hasattr(cvFold, 'validationDf')) tester.assertTrue(hasattr(cvFold, 'trainingDf')) tester.assertTrue(hasattr(cvFold, 'score')) def is_equal_dataframe(dataframe1, dataframe2, tester): tester.assertEqual(dataframe1.numRows, dataframe2.numRows) tester.assertEqual(dataframe1.tag, dataframe2.tag) tester.assertEqual(dataframe1.id, dataframe2.id) tester.assertEqual(len(dataframe1.schema), len(dataframe2.schema)) # check schema for i in range(len(dataframe1.schema)): tester.assertEqual(dataframe1.schema[ i].name, dataframe2.schema[i].name) tester.assertEqual(dataframe1.schema[ i].type, dataframe2.schema[i].type) def is_equal_transformer_spec(spec1, spec2, tester): tester.assertEqual(spec1.id, spec2.id) tester.assertEqual(spec1.transformerType, spec2.transformerType) tester.assertEqual(spec1.tag, spec2.tag) tester.assertEqual(len(spec1.hyperparameters), len(spec2.hyperparameters)) for i in range(len(spec1.hyperparameters)): tester.assertTrue(spec1.hyperparameters[i] in spec2.hyperparameters) def is_equal_transformer(model1, model2, tester): tester.assertEqual(model1.id, model2.id) tester.assertEqual(model1.transformerType, model2.transformerType) tester.assertEqual(model1.tag, model2.tag) def is_equal_project(project1, project2, tester): tester.assertEqual(project1.id, project2.id) tester.assertEqual(project1.name, project2.name) tester.assertEqual(project1.author, project2.author) tester.assertEqual(project1.description, project2.description) def is_equal_experiment(experiment1, experiment2, tester): tester.assertEqual(experiment1.id, experiment2.id) tester.assertEqual(experiment1.projectId, experiment2.projectId) tester.assertEqual(experiment1.name, experiment2.name) tester.assertEqual(experiment1.description, experiment2.description) tester.assertEqual(experiment1.isDefault, experiment2.isDefault) def is_equal_experiment_run(expRun1, expRun2, tester): tester.assertEqual(expRun1.id, expRun2.id) tester.assertEqual(expRun1.experimentId, expRun2.experimentId) tester.assertEqual(expRun1.description, expRun2.description)
41.23913
78
0.758171
775
7,588
7.314839
0.132903
0.2145
0.296172
0.071618
0.172694
0.037749
0.029988
0.029988
0.029988
0.029988
0
0.010503
0.121639
7,588
183
79
41.464481
0.84006
0.001581
0
0.072993
0
0
0.087008
0
0
0
0
0
0.751825
1
0.160584
false
0
0
0
0.160584
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
3
e1d712ea1cef3a4765c0a11f911c5e58c8c76619
552
py
Python
src/core/engine/pipeline_control.py
KorawichSCG/data-engine
2a76679f113b525d7205812ee02c6b432082aa83
[ "MIT" ]
null
null
null
src/core/engine/pipeline_control.py
KorawichSCG/data-engine
2a76679f113b525d7205812ee02c6b432082aa83
[ "MIT" ]
null
null
null
src/core/engine/pipeline_control.py
KorawichSCG/data-engine
2a76679f113b525d7205812ee02c6b432082aa83
[ "MIT" ]
null
null
null
from typing import Any, Dict, List class Node: def __init__(self, ps_node_name: str, ps_node_processes: Dict[str, Any]): self.ps_node_name = ps_node_name self.ps_node_processes = ps_node_processes @property def processes(self) -> List: return list(self.ps_node_processes.keys()) def __str__(self): return f'{self.__class__.__name__}({self.ps_node_name})' class Pipeline: """ config ------ <pipeline-name>: type: datasets.pipelines.Pipeline nodes: """
23
77
0.625
69
552
4.536232
0.362319
0.153355
0.159744
0.134185
0
0
0
0
0
0
0
0
0.259058
552
24
78
23
0.765281
0.164855
0
0
0
0
0.106977
0.106977
0
0
0
0
0
1
0.272727
false
0
0.090909
0.181818
0.727273
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
c00d09934f69e606b222076dd823ed1ae695a4af
306
py
Python
mishapp_api/default_config.py
gudeg-united/mishapp-api-hackathon
282c59173b41fdc24959f38a6b1699756ecf2780
[ "BSD-3-Clause" ]
null
null
null
mishapp_api/default_config.py
gudeg-united/mishapp-api-hackathon
282c59173b41fdc24959f38a6b1699756ecf2780
[ "BSD-3-Clause" ]
1
2015-01-30T23:40:10.000Z
2015-01-31T03:52:51.000Z
mishapp_api/default_config.py
gudeg-united/mishapp-api-hackathon
282c59173b41fdc24959f38a6b1699756ecf2780
[ "BSD-3-Clause" ]
null
null
null
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals MONGODB_DB = "mishapp" MONGODB_HOST = "localhost" MONGODB_PORT = 27017 MONGODB_USERNAME = "" MONGODB_PASSWORD = "" PUBNUB_PUB_KEY = "" PUBNUB_SUB_KEY = ""
21.857143
39
0.810458
38
306
5.789474
0.578947
0.181818
0.290909
0
0
0
0
0
0
0
0
0.018727
0.127451
306
13
40
23.538462
0.805243
0
0
0
0
0
0.052288
0
0
0
0
0
0
1
0
false
0.090909
0.363636
0
0.363636
0.090909
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
0
0
0
3
c04585e4b1af1366e6e02e09c081b0d1c253a4fc
85
py
Python
src/systa/experimental/decorators/__init__.py
ipronald/systa
0c46c99b9d04cea23d32ec34dd3f8ddadd320625
[ "MIT" ]
9
2021-07-21T01:08:54.000Z
2022-02-21T01:34:31.000Z
src/systa/experimental/decorators/__init__.py
ipronald/systa
0c46c99b9d04cea23d32ec34dd3f8ddadd320625
[ "MIT" ]
28
2019-04-11T14:21:46.000Z
2021-08-09T19:10:02.000Z
src/systa/experimental/decorators/__init__.py
ipronald/systa
0c46c99b9d04cea23d32ec34dd3f8ddadd320625
[ "MIT" ]
1
2021-07-21T15:24:26.000Z
2021-07-21T15:24:26.000Z
from collections import defaultdict func_stack = {} func_ranges = defaultdict(list)
17
35
0.8
10
85
6.6
0.8
0
0
0
0
0
0
0
0
0
0
0
0.129412
85
4
36
21.25
0.891892
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
c0494f81b4db3ac91785fad6c0b44f8e62c5b90e
918
py
Python
terminus/builders/abstract_city_builder.py
ekumenlabs/terminus
ce6dcdc797011155e8fd52d40d910bdaf9bfe397
[ "Apache-2.0" ]
52
2016-10-12T17:54:10.000Z
2021-09-29T04:06:24.000Z
terminus/builders/abstract_city_builder.py
ekumenlabs/terminus
ce6dcdc797011155e8fd52d40d910bdaf9bfe397
[ "Apache-2.0" ]
216
2016-09-14T18:51:36.000Z
2019-01-31T05:57:57.000Z
terminus/builders/abstract_city_builder.py
ekumenlabs/terminus
ce6dcdc797011155e8fd52d40d910bdaf9bfe397
[ "Apache-2.0" ]
28
2016-09-21T17:55:23.000Z
2021-09-29T04:06:25.000Z
""" Copyright (C) 2017 Open Source Robotics Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ class AbstractCityBuilder(object): def name(self): return self.__class__.__name__ def get_city(self): city = self._buid_city() city.put_metadata('builder', self) return city def required_licence(self): return None def _buid_city(self): raise NotImplementedError()
27.818182
72
0.729847
130
918
5.038462
0.638462
0.091603
0.039695
0.048855
0
0
0
0
0
0
0
0.010944
0.203704
918
32
73
28.6875
0.885089
0.626362
0
0
0
0
0.020896
0
0
0
0
0
0
1
0.363636
false
0
0
0.181818
0.727273
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
c05d1a8a0b43b5a0cab74f8a8db1a85dde6fc669
196
py
Python
src/sampler/__init__.py
tristanbrown/sampler
063c4d69698d05798dce9e37ac6463bf2d227023
[ "MIT" ]
null
null
null
src/sampler/__init__.py
tristanbrown/sampler
063c4d69698d05798dce9e37ac6463bf2d227023
[ "MIT" ]
null
null
null
src/sampler/__init__.py
tristanbrown/sampler
063c4d69698d05798dce9e37ac6463bf2d227023
[ "MIT" ]
null
null
null
"""Sampler package.""" import os from .constraints import Constraint from .gibbs import Gibbs example_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), "../../tests/examples"))
21.777778
68
0.719388
26
196
5.230769
0.653846
0.132353
0
0
0
0
0
0
0
0
0
0
0.117347
196
8
69
24.5
0.786127
0.081633
0
0
0
0
0.114943
0
0
0
0
0
0
1
0
false
0
0.6
0
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
fbe53c234b149c2d3910c8b1621e3da7dbc8d1ae
923
py
Python
olivertwist/rules/no_orphaned_models.py
octoenergy/oliver-twist
7496208d9de4c21cd9e0d553f24bf07612ddc720
[ "Apache-2.0" ]
37
2020-12-17T13:32:12.000Z
2022-03-16T07:19:56.000Z
olivertwist/rules/no_orphaned_models.py
Norina-Sun/oliver-twist
5bb9b2cddc097d89d4a3eff78c63036682dd19f8
[ "Apache-2.0" ]
28
2020-12-17T16:20:14.000Z
2022-01-21T09:00:15.000Z
olivertwist/rules/no_orphaned_models.py
octoenergy/oliver-twist
7496208d9de4c21cd9e0d553f24bf07612ddc720
[ "Apache-2.0" ]
2
2021-08-09T17:07:23.000Z
2021-11-05T14:37:18.000Z
# -*- coding: utf-8 -*- """Models should have resolvable dependencies. Copyright (C) 2020, Auto Trader UK Created 16. Dec 2020 12:53 """ from typing import List, Tuple from olivertwist.manifest import Manifest, Node from olivertwist.ruleengine.rule import rule from olivertwist.rules.utils import partition @rule(id="no-orphaned-models", name="No orphaned models allowed") def no_orphaned_models(manifest: Manifest) -> Tuple[List[Node], List[Node]]: """ return [ node for node in dbt_manifest_file['nodes'].values() if is_staging(node) or is_mart(node) if not node['depends_on']['nodes'] ] """ def is_orphan(node: Node) -> bool: dependencies = list(manifest.graph.predecessors(node.id)) return (node.is_staging or node.is_mart) and len(dependencies) < 1 passes, failures = partition(is_orphan, manifest.nodes()) return list(passes), list(failures)
29.774194
76
0.695558
125
923
5.048
0.496
0.071315
0.07607
0
0
0
0
0
0
0
0
0.021164
0.180932
923
30
77
30.766667
0.813492
0.300108
0
0
0
0
0.072488
0
0
0
0
0
0
1
0.181818
false
0.181818
0.363636
0
0.727273
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
1
0
0
3
fbe6803d878ac6edd9f87270f44065dfb7b265d4
2,996
py
Python
axelrod/tests/unit/test_backstabber.py
lipingzhu/Zero-determinant
6e30aa72358d5dfc3975abe433d0d13cc3a750a1
[ "MIT" ]
null
null
null
axelrod/tests/unit/test_backstabber.py
lipingzhu/Zero-determinant
6e30aa72358d5dfc3975abe433d0d13cc3a750a1
[ "MIT" ]
null
null
null
axelrod/tests/unit/test_backstabber.py
lipingzhu/Zero-determinant
6e30aa72358d5dfc3975abe433d0d13cc3a750a1
[ "MIT" ]
null
null
null
import axelrod from .test_player import TestPlayer C, D = axelrod.Actions.C, axelrod.Actions.D class TestBackStabber(TestPlayer): name = "BackStabber" player = axelrod.BackStabber expected_classifier = { 'memory_depth': float('inf'), 'stochastic': False, 'makes_use_of': set(['length']), 'inspects_source': False, 'manipulates_source': False, 'manipulates_state': False } def test_strategy(self): """ Forgives the first 3 defections but on the fourth will defect forever. Defects after the 198th round unconditionally. """ self.first_play_test(C) # Forgives three defections self.responses_test([C], [D], [C], tournament_length=200) self.responses_test([C, C], [D, D], [C], tournament_length=200) self.responses_test([C, C, C], [D, D, D], [C], tournament_length=200) self.responses_test([C, C, C, C], [D, D, D, D], [D], tournament_length=200) # Defects on rounds 199, and 200 no matter what self.responses_test([C] * 197 , [C] * 197, [C, D, D], tournament_length=200) self.responses_test([C] * 198 , [C] * 198, [D, D, D], tournament_length=200) # But only if the tournament is known self.responses_test([C] * 198 , [C] * 198, [C, C, C], tournament_length=-1) class TestDoubleCrosser(TestPlayer): name = "DoubleCrosser" player = axelrod.DoubleCrosser expected_classifier = { 'memory_depth': float('inf'), 'stochastic': False, 'makes_use_of': set(['length']), 'inspects_source': False, 'manipulates_source': False, 'manipulates_state': False } def test_strategy(self): """ Forgives the first 3 defections but on the fourth will defect forever. If the opponent did not defect in the first 6 rounds the player will cooperate until the 180th round. Defects after the 198th round unconditionally. """ self.first_play_test(C) # Forgives three defections self.responses_test([C], [D], [C], tournament_length=200) self.responses_test([C, C], [D, D], [C], tournament_length=200) self.responses_test([C, C, C], [D, D, D], [C], tournament_length=200) self.responses_test([C, C, C, C], [D, D, D, D], [D], tournament_length=200) # If opponent did not defect in the first six rounds, cooperate until # round 180 self.responses_test([C] * 6, [C] * 6, [C] * 174, tournament_length=200) self.responses_test([C] * 12, [C] * 6 + [D] + [C] * 5, [C] * 160, tournament_length=200) # Defects on rounds 199, and 200 no matter what self.responses_test([C] * 198 , [C] * 198, [D, D, D], tournament_length=200)
35.247059
79
0.570761
368
2,996
4.513587
0.225543
0.022878
0.143287
0.151716
0.760987
0.759783
0.759783
0.674895
0.674895
0.674895
0
0.049495
0.305407
2,996
84
80
35.666667
0.748679
0.198932
0
0.68
0
0
0.090752
0
0
0
0
0
0
1
0.04
false
0
0.04
0
0.24
0
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
2210aad9eb79f1baba29974c71cb3685a956bd65
213
py
Python
2022/Design-Patters-2/Factory/connection_factory.py
Muramatsu2602/python-study
c81eb5d2c343817bc29b2763dcdcabed0f6a42c6
[ "MIT" ]
null
null
null
2022/Design-Patters-2/Factory/connection_factory.py
Muramatsu2602/python-study
c81eb5d2c343817bc29b2763dcdcabed0f6a42c6
[ "MIT" ]
null
null
null
2022/Design-Patters-2/Factory/connection_factory.py
Muramatsu2602/python-study
c81eb5d2c343817bc29b2763dcdcabed0f6a42c6
[ "MIT" ]
null
null
null
import MySQLdb class Connection_factory(object): def get_connection(self): connection = MySQLdb.connect( host="localhost", user="root", passwd='', db='alura') return connection
19.363636
65
0.643192
22
213
6.136364
0.818182
0
0
0
0
0
0
0
0
0
0
0
0.239437
213
10
66
21.3
0.833333
0
0
0
0
0
0.084507
0
0
0
0
0
0
1
0.166667
false
0.166667
0.166667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
3
222bc91f83b2cc7ba1d37ad3fb6f6abbcf8920e5
2,149
py
Python
src/sql/ast/crate.py
chicco785/ngsi-timeseries-api
6f8ca2e277960bc9549e3fd79350732e459a345f
[ "MIT" ]
25
2017-07-14T15:16:10.000Z
2020-10-23T19:04:11.000Z
src/sql/ast/crate.py
chicco785/ngsi-timeseries-api
6f8ca2e277960bc9549e3fd79350732e459a345f
[ "MIT" ]
346
2017-07-14T09:13:29.000Z
2021-02-02T11:54:03.000Z
src/sql/ast/crate.py
chicco785/ngsi-timeseries-api
6f8ca2e277960bc9549e3fd79350732e459a345f
[ "MIT" ]
76
2017-08-24T12:11:47.000Z
2021-02-02T05:28:43.000Z
from enum import Enum from geocoding.slf import SlfGeometry, SlfPoint, encode_as_wkt from .terms import * class GeoMatchType(Enum): DISJOINT = 'disjoint' INTERSECTS = 'intersects' WITHIN = 'within' def geo_shape_term(geometry: SlfGeometry) -> str: geo_shape = encode_as_wkt(geometry) return lit(geo_shape).eval() class GeoMatchTerm(Term): def __init__(self, column_name: str, match_type: GeoMatchType, geometry: SlfGeometry): self.column_name = column_name self.match_type = match_type self.geometry = geometry def eval(self): return 'match ({}, {}) using {}'.format( self.column_name, geo_shape_term(self.geometry), self.match_type.value ) class GeoEqualTerm(Term): def __init__(self, column_name: str, geometry: SlfGeometry): self.column_name = column_name self.geometry = geometry def eval(self): geo_shape = geo_shape_term(self.geometry) return 'match ({}, {}) using {} and within({}, {})'.format( self.column_name, geo_shape, GeoMatchType.WITHIN.value, geo_shape, self.column_name ) class GeoDistanceTerm(Term): def __init__(self, column_name: str, point_from: SlfPoint): self.column_name = column_name self.point_from = point_from def eval(self): return 'distance({}, {})'.format( self.column_name, geo_shape_term(self.point_from)) def intersects(column: str, geometry: SlfGeometry) -> GeoMatchTerm: return GeoMatchTerm(column, GeoMatchType.INTERSECTS, geometry) def disjoint(column: str, geometry: SlfGeometry) -> GeoMatchTerm: return GeoMatchTerm(column, GeoMatchType.DISJOINT, geometry) def within(column: str, geometry: SlfGeometry) -> GeoMatchTerm: return GeoMatchTerm(column, GeoMatchType.WITHIN, geometry) def distance(column: str, point_from: SlfPoint): return GeoDistanceTerm(column, point_from) def equals(column: str, geometry: SlfGeometry) -> GeoEqualTerm: return GeoEqualTerm(column, geometry)
26.8625
67
0.662634
236
2,149
5.813559
0.173729
0.094752
0.102041
0.081633
0.454811
0.431487
0.348397
0.287172
0.166181
0
0
0
0.236389
2,149
79
68
27.202532
0.836076
0
0
0.226415
0
0
0.04886
0
0
0
0
0
0
1
0.226415
false
0
0.056604
0.132075
0.584906
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
222dc6c0a7b30c650fe23ae86cbbdbc32be34838
11,963
py
Python
PupilDilationAnalyzer.py
peitek/eyetracking-analyses
3258b8bc7ff4bd8ca952ade900dc83cf06add170
[ "MIT" ]
null
null
null
PupilDilationAnalyzer.py
peitek/eyetracking-analyses
3258b8bc7ff4bd8ca952ade900dc83cf06add170
[ "MIT" ]
null
null
null
PupilDilationAnalyzer.py
peitek/eyetracking-analyses
3258b8bc7ff4bd8ca952ade900dc83cf06add170
[ "MIT" ]
null
null
null
from os.path import join import math import numpy import _pickle as pickle from config import PATH_DATA_PREPROCESSED, PATH_DATA_OUTPUT, PARTICIPANTS # TODO remove required response log file # TODO remove required physio log file # TODO generalize code better (e.g., conditions) # TODO clean up code # TODO comment/document functions def analyze_pupil_dilation_movement_for_all_participants(participants): average_dilation_gaze_pos_over_time = [] for participant in participants: pupil_dilation_over_time = {} pupil_dilation_over_time = analyze_each_frame_movement(participants, participant, pupil_dilation_over_time) print("\n#### ANALYSIS FOR PARTICIPANT") for timestamp in sorted(pupil_dilation_over_time): data = pupil_dilation_over_time[timestamp] average_dilation_gaze_pos_over_time.append({ "participant": participant, "timestamp": timestamp, "average_dilation": math.floor(numpy.mean([item['pupil_dilation'] for item in data])), "gaze_x": math.floor(numpy.mean([item['gaze_x'] for item in data])), "gaze_y": math.floor(numpy.mean([item['gaze_y'] for item in data])), }) # write objects to file as giant csv output_file_path = join(PATH_DATA_OUTPUT, "pupil_dilation_gaze_pos_time_sec.csv") with open(output_file_path, 'w') as output_file: file_write = output_file.write file_write("time;gaze_x;gaze_y;pupil_dilation;subject\n") for line in average_dilation_gaze_pos_over_time: file_write(str(line["timestamp"]) + ";" + str(line["gaze_x"]) + ";"+ str(line["gaze_y"]) + ";"+ str(line["average_dilation"]) + ";" + line["participant"] + "\n") def analyze_pupil_dilation_for_all_participants(participants): average_dilation_per_condition = [] average_dilation_per_snippet_time_participant = [] average_dilation_per_snippet_time = [] average_dilation_per_snippet_raw = {} average_dilation_per_snippet_processed = [] average_dilation_over_time = [] pupil_dilation_per_snippet = {} for participant in participants: pupil_dilation_per_condition = {} pupil_dilation_per_snippet_participant = {} pupil_dilation_over_time = {} [pupil_dilation_over_time, pupil_dilation_per_condition, pupil_dilation_per_snippet_participant, pupil_dilation_per_snippet] = analyze_each_frame(participants, participant, pupil_dilation_over_time, pupil_dilation_per_condition, pupil_dilation_per_snippet_participant, pupil_dilation_per_snippet) print("\n#### ANALYSIS FOR PARTICIPANT") for timestamp in sorted(pupil_dilation_over_time): data = pupil_dilation_over_time[timestamp] average_dilation_over_time.append({ "participant": participant, "timestamp": timestamp, "average_dilation": math.floor(numpy.mean(data)) }) for condition in sorted(pupil_dilation_per_condition): for timestamp in sorted(pupil_dilation_per_condition[condition]): data = pupil_dilation_per_condition[condition][timestamp] average_dilation_per_condition.append({ "participant": participant, "condition": condition, "timestamp": timestamp, "average_dilation": math.floor(numpy.mean(data)) }) for condition in sorted(pupil_dilation_per_snippet_participant): for snippet in sorted(pupil_dilation_per_snippet_participant[condition]): for timestamp in sorted(pupil_dilation_per_snippet_participant[condition][snippet]): data = pupil_dilation_per_snippet_participant[condition][snippet][timestamp] average_dilation_per_snippet_time_participant.append({ "participant": participant, "condition": condition, "snippet": snippet, "timestamp": timestamp, "average_dilation": math.floor(numpy.mean(data)) }) for condition in sorted(pupil_dilation_per_snippet): for snippet in sorted(pupil_dilation_per_snippet[condition]): for timestamp in sorted(pupil_dilation_per_snippet[condition][snippet]): data = pupil_dilation_per_snippet[condition][snippet][timestamp] average = math.floor(numpy.mean(data)) average_dilation_per_snippet_time.append({ "condition": condition, "snippet": snippet, "timestamp": timestamp, "average_dilation": average }) if snippet not in average_dilation_per_snippet_raw: average_dilation_per_snippet_raw[snippet] = [] average_dilation_per_snippet_raw[snippet].append({ "condition": condition, "snippet": snippet, "average_dilation": average }) for condition in sorted(pupil_dilation_per_snippet): for snippet in sorted(pupil_dilation_per_snippet[condition]): for timestamp in sorted(pupil_dilation_per_snippet[condition][snippet]): data = pupil_dilation_per_snippet[condition][snippet][timestamp] average = math.floor(numpy.mean(data)) average_dilation_per_snippet_processed.append({ "condition": condition, "snippet": snippet, "timestamp": timestamp, "average_dilation": average }) print("\n#### WRITE RESULTS TO CSV") write_dilation_over_time(average_dilation_over_time) write_dilation_per_condition(average_dilation_per_condition) write_dilation_per_snippet_participant(average_dilation_per_snippet_time_participant) write_dilation_per_snippet(average_dilation_per_snippet_time) print("-> saving file: done!") def write_dilation_over_time(average_dilation_over_time): # write objects to file as giant csv output_file_path = join(PATH_DATA_OUTPUT, "pupil_dilation_time_sec.csv") with open(output_file_path, 'w') as output_file: file_write = output_file.write file_write("time;pupil_dilation;subject\n") for line in average_dilation_over_time: file_write(str(line["timestamp"]) + ";" + str(line["average_dilation"]) + ";" + line["participant"] + "\n") def write_dilation_per_condition(average_dilation_per_condition): # write objects to file as giant csv output_file_path = join(PATH_DATA_OUTPUT, "pupil_dilation_condition.csv") with open(output_file_path, 'w') as output_file: file_write = output_file.write file_write("condition;time;pupil_dilation;subject\n") for line in average_dilation_per_condition: file_write(line["condition"] + ";" + str(line["timestamp"]) + ";" + str(line["average_dilation"]) + ";" + line["participant"] + "\n") def write_dilation_per_snippet(average_dilation_per_snippet): # write objects to file as giant csv output_file_path = join(PATH_DATA_OUTPUT, "pupil_dilation_snippet.csv") with open(output_file_path, 'w') as output_file: file_write = output_file.write file_write("condition;snippet;time;pupil_dilation;subject\n") for line in average_dilation_per_snippet: file_write(line["condition"] + ";" + line["snippet"] + ";" + str(line["timestamp"]) + ";" + str(line["average_dilation"]) + ";average\n") def write_dilation_per_snippet_participant(average_dilation_per_snippet_participant): # write objects to file as giant csv output_file_path = join(PATH_DATA_OUTPUT, "pupil_dilation_snippet_participant.csv") with open(output_file_path, 'w') as output_file: file_write = output_file.write file_write("condition;snippet;time;pupil_dilation;subject\n") for line in average_dilation_per_snippet_participant: file_write(line["condition"] + ";" + line["snippet"] + ";" + str(line["timestamp"]) + ";" + str(line["average_dilation"]) + ";" + line["participant"] + "\n") def analyze_each_frame_movement(participants, participant_id, pupil_dilation_over_time): with open(join(PATH_DATA_PREPROCESSED, "full", participant_id + "_pupil_data_raw.pkl"), 'rb') as input: eyetracking_data = pickle.load(input) print("\n## " + participant_id) for et_frame in eyetracking_data: # over time grouped_time = math.floor(et_frame["timestamp"]/100) if grouped_time not in pupil_dilation_over_time: pupil_dilation_over_time[grouped_time] = [] pupil_dilation_over_time[grouped_time].append({ "pupil_dilation": math.floor(float(et_frame["pupil_dilation"])), "gaze_x": math.floor(float(et_frame["gaze_x"])), "gaze_y": math.floor(float(et_frame["gaze_y"])), }) return pupil_dilation_over_time def analyze_each_frame(participants, participant_id, pupil_dilation_over_time, pupil_dilation_per_condition, pupil_dilation_per_snippet_participant, pupil_dilation_per_snippet): with open(join(PATH_DATA_PREPROCESSED, "full", participant_id + "_pupil_data_raw.pkl"), 'rb') as input: eyetracking_data = pickle.load(input) print("\n## " + participant_id) trial_category = None for et_frame in eyetracking_data: #print("-> found frame for " + str(et_frame["snippet"]) + " after " + str(et_frame["frames"]) + " frames, pupil dilation: " + et_frame["pupil_dilation"]) if trial_category != et_frame["trial_category"]: #print("switched to " + et_frame["trial_category"] + " after " + str(et_frame["timestamp"])) trial_category = et_frame["trial_category"] # per condition grouped_frame_10 = math.floor(et_frame["frames"]/10) if trial_category not in pupil_dilation_per_condition: pupil_dilation_per_condition[trial_category] = {} if grouped_frame_10 not in pupil_dilation_per_condition[trial_category]: pupil_dilation_per_condition[trial_category][grouped_frame_10] = [] pupil_dilation_per_condition[trial_category][grouped_frame_10].append(math.floor(float(et_frame["pupil_dilation"]))) # per snippet set_snippet_to_data(et_frame, pupil_dilation_per_snippet_participant, trial_category) set_snippet_to_data(et_frame, pupil_dilation_per_snippet, trial_category) # over time grouped_time = math.floor(et_frame["timestamp"]/100) if grouped_time not in pupil_dilation_over_time: pupil_dilation_over_time[grouped_time] = [] pupil_dilation_over_time[grouped_time].append(math.floor(float(et_frame["pupil_dilation"]))) return [pupil_dilation_over_time, pupil_dilation_per_condition, pupil_dilation_per_snippet_participant, pupil_dilation_per_snippet] def set_snippet_to_data(et_frame, pupil_dilation_dict, trial_category): grouped_frame_100 = math.floor(et_frame["frames"] / 100) if trial_category not in pupil_dilation_dict: pupil_dilation_dict[trial_category] = {} snippet = et_frame["snippet"] if snippet not in pupil_dilation_dict[trial_category]: pupil_dilation_dict[trial_category][snippet] = {} if grouped_frame_100 not in pupil_dilation_dict[trial_category][snippet]: pupil_dilation_dict[trial_category][snippet][grouped_frame_100] = [] pupil_dilation_dict[trial_category][snippet][grouped_frame_100].append(math.floor(float(et_frame["pupil_dilation"]))) if __name__ == "__main__": analyze_pupil_dilation_for_all_participants(PARTICIPANTS)
46.368217
304
0.679177
1,399
11,963
5.396712
0.075054
0.148079
0.107285
0.076159
0.884503
0.829536
0.717483
0.658543
0.601722
0.49351
0
0.003344
0.225111
11,963
257
305
46.548638
0.811111
0.053248
0
0.448864
0
0
0.113096
0.031833
0
0
0
0.003891
0
1
0.051136
false
0
0.028409
0
0.090909
0.034091
0
0
0
null
0
0
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
223e3392d634ac4989017aa2a17ebd531e74d154
953
py
Python
ida_plugins/ui/ida_70/sample_plugin_dialog.py
CrackerCat/reverse_engineering_tools
393a26246a4c6150bce9c4a8c80c338d1653058a
[ "MIT" ]
69
2020-08-19T15:10:30.000Z
2022-03-17T08:41:36.000Z
ida_plugins/ui/ida_70/sample_plugin_dialog.py
CrackerCat/reverse_engineering_tools
393a26246a4c6150bce9c4a8c80c338d1653058a
[ "MIT" ]
null
null
null
ida_plugins/ui/ida_70/sample_plugin_dialog.py
CrackerCat/reverse_engineering_tools
393a26246a4c6150bce9c4a8c80c338d1653058a
[ "MIT" ]
15
2020-01-20T04:47:17.000Z
2022-02-25T06:32:01.000Z
""" Sample dialog box UI for IDA 7.0 plugins which lets the user choose between several options """ from idaapi import * def dummy1(): print('inside dummy1\n') def dummy2(): print('inside dummy2\n') options = [ ('First option', dummy1), ('Second option', dummy2), ] def main(): title = 'My title' class MainMenu(Choose): def __init__(self): Choose.__init__(self, title=title, cols=[["Option", 10]], flags=0x11) def OnClose(self): pass def OnGetLine(self, n): return [options[n][0]] def OnGetSize(self): return len(options) def OnSelectLine(self, n): f = options[n][1] f() pass menu = MainMenu() menu.Show(modal=True) class MyPlugin(plugin_t): flags = PLUGIN_FIX comment = 'My Comment' help = 'My Help' wanted_name = 'My Plugin Name' wanted_hotkey = 'Alt-1' def init(self): return PLUGIN_KEEP def run(self, arg): main() def term(self): pass def PLUGIN_ENTRY(): return MyPlugin()
14.661538
91
0.656873
136
953
4.5
0.5
0.039216
0.035948
0
0
0
0
0
0
0
0
0.02097
0.19937
953
64
92
14.890625
0.781127
0.095488
0
0.075
0
0
0.122951
0
0
0
0.004684
0
0
1
0.3
false
0.075
0.025
0.1
0.6
0.05
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
3
2262370847bb13eeb66c044712636a9b4fa8dab3
65
py
Python
sqlfocus/__init__.py
XuHg-zjcn/sqlfocus
9d3bacbe82564f7e8414cf252c45f9cc4b9b91d4
[ "MIT" ]
1
2021-03-18T23:05:21.000Z
2021-03-18T23:05:21.000Z
sqlfocus/__init__.py
XuHg-zjcn/sqlfocus
9d3bacbe82564f7e8414cf252c45f9cc4b9b91d4
[ "MIT" ]
1
2021-11-02T18:43:45.000Z
2021-11-02T18:43:45.000Z
sqlfocus/__init__.py
XuHg-zjcn/sqlfocus
9d3bacbe82564f7e8414cf252c45f9cc4b9b91d4
[ "MIT" ]
1
2021-11-02T17:27:40.000Z
2021-11-02T17:27:40.000Z
from .table import SQLTable, SQLTableBase __version__ = "0.3.4"
16.25
41
0.753846
9
65
5
1
0
0
0
0
0
0
0
0
0
0
0.053571
0.138462
65
3
42
21.666667
0.75
0
0
0
0
0
0.076923
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
97db3aba9685655dca6d7d7db801e90842b4323e
2,335
py
Python
d4rl/gym_minigrid/fourroom_controller.py
chappers/d4rl
b838da60b51c98c1d673a81657f58a44ccf5d3fe
[ "Apache-2.0" ]
552
2020-04-20T01:07:02.000Z
2022-03-31T16:47:39.000Z
d4rl/gym_minigrid/fourroom_controller.py
chappers/d4rl
b838da60b51c98c1d673a81657f58a44ccf5d3fe
[ "Apache-2.0" ]
103
2020-04-20T14:18:32.000Z
2022-03-30T14:33:45.000Z
d4rl/gym_minigrid/fourroom_controller.py
chappers/d4rl
b838da60b51c98c1d673a81657f58a44ccf5d3fe
[ "Apache-2.0" ]
135
2020-04-21T16:57:52.000Z
2022-03-30T14:29:55.000Z
import numpy as np import random from d4rl.pointmaze import q_iteration from d4rl.pointmaze.gridcraft import grid_env from d4rl.pointmaze.gridcraft import grid_spec MAZE = \ "###################\\"+\ "#OOOOOOOO#OOOOOOOO#\\"+\ "#OOOOOOOO#OOOOOOOO#\\"+\ "#OOOOOOOOOOOOOOOOO#\\"+\ "#OOOOOOOO#OOOOOOOO#\\"+\ "#OOOOOOOO#OOOOOOOO#\\"+\ "#OOOOOOOO#OOOOOOOO#\\"+\ "#OOOOOOOO#OOOOOOOO#\\"+\ "#OOOOOOOO#OOOOOOOO#\\"+\ "####O#########O####\\"+\ "#OOOOOOOO#OOOOOOOO#\\"+\ "#OOOOOOOO#OOOOOOOO#\\"+\ "#OOOOOOOO#OOOOOOOO#\\"+\ "#OOOOOOOO#OOOOOOOO#\\"+\ "#OOOOOOOO#OOOOOOOO#\\"+\ "#OOOOOOOO#OOOOOOOO#\\"+\ "#OOOOOOOOOOOOOOOOO#\\"+\ "#OOOOOOOO#OOOOOOOO#\\"+\ "###################\\" # NLUDR -> RDLU TRANSLATE_DIRECTION = { 0: None, 1: 3,#3, 2: 1,#1, 3: 2,#2, 4: 0,#0, } RIGHT = 1 LEFT = 0 FORWARD = 2 class FourRoomController(object): def __init__(self): self.env = grid_env.GridEnv(grid_spec.spec_from_string(MAZE)) self.reset_locations = list(zip(*np.where(self.env.gs.spec == grid_spec.EMPTY))) def sample_target(self): return random.choice(self.reset_locations) def set_target(self, target): self.target = target self.env.gs[target] = grid_spec.REWARD self.q_values = q_iteration.q_iteration(env=self.env, num_itrs=32, discount=0.99) self.env.gs[target] = grid_spec.EMPTY def get_action(self, pos, orientation): if tuple(pos) == tuple(self.target): done = True else: done = False env_pos_idx = self.env.gs.xy_to_idx(pos) qvalues = self.q_values[env_pos_idx] direction = TRANSLATE_DIRECTION[np.argmax(qvalues)] #tgt_pos, _ = self.env.step_stateless(env_pos_idx, np.argmax(qvalues)) #tgt_pos = self.env.gs.idx_to_xy(tgt_pos) #print('\tcmd_dir:', direction, np.argmax(qvalues), qvalues, tgt_pos) #infos = {} #infos['tgt_pos'] = tgt_pos if orientation == direction or direction == None: return FORWARD, done else: return get_turn(orientation, direction), done #RDLU TURN_DIRS = [ [None, RIGHT, RIGHT, LEFT], #R [LEFT, None, RIGHT, RIGHT], #D [RIGHT, LEFT, None, RIGHT], #L [RIGHT, RIGHT, LEFT, None], #U ] def get_turn(ori, tgt_ori): return TURN_DIRS[ori][tgt_ori]
27.470588
89
0.608137
291
2,335
4.704467
0.302406
0.280497
0.350621
0.39737
0.315559
0.315559
0.229365
0.128561
0.128561
0.128561
0
0.01291
0.203854
2,335
84
90
27.797619
0.723507
0.10364
0
0.272727
0
0
0.192104
0.192104
0
0
0
0
0
1
0.075758
false
0
0.075758
0.030303
0.227273
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
97ec9f21d8b2ed72072ccf1d17aba6db890c777a
370
py
Python
backend/student/admin.py
Cian747/student-motivation
49be6b57a44dec7f0c436e3bc6a8546ccd44152c
[ "MIT" ]
null
null
null
backend/student/admin.py
Cian747/student-motivation
49be6b57a44dec7f0c436e3bc6a8546ccd44152c
[ "MIT" ]
null
null
null
backend/student/admin.py
Cian747/student-motivation
49be6b57a44dec7f0c436e3bc6a8546ccd44152c
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Category, StudentUser,Motivation,Review,WishList,Profile,ReviewThread # Register your models here. admin.site.register(StudentUser) admin.site.register(Profile) admin.site.register(Motivation) admin.site.register(ReviewThread) admin.site.register(Category) admin.site.register(Review) admin.site.register(WishList)
28.461538
89
0.835135
47
370
6.574468
0.361702
0.203884
0.385113
0
0
0
0
0
0
0
0
0
0.062162
370
12
90
30.833333
0.89049
0.07027
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.222222
0
0.222222
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
97f5f38e60f2de2ce7a9c04870086f71ab006e74
584
py
Python
nnutil2/util/__init__.py
aroig/nnutil2
1fc77df351d4eee1166688e25a94287a5cfa27c4
[ "BSD-3-Clause" ]
null
null
null
nnutil2/util/__init__.py
aroig/nnutil2
1fc77df351d4eee1166688e25a94287a5cfa27c4
[ "BSD-3-Clause" ]
3
2020-11-13T18:33:29.000Z
2021-08-25T15:55:57.000Z
nnutil2/util/__init__.py
aroig/nnutil2
1fc77df351d4eee1166688e25a94287a5cfa27c4
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # # nnutil2 - Tensorflow utilities for training neural networks # Copyright (c) 2019, Abdó Roig-Maranges <abdo.roig@gmail.com> # # This file is part of 'nnutil2'. # # This file may be modified and distributed under the terms of the 3-clause BSD # license. See the LICENSE file for details. from .tensorboard import * from .shape import * from .interpolate_shape import * from .kwargs_for import * from .numpy_json_encoder import * from .cached_property import * from .interleave import * from .uninterleave import * from .timer import *
27.809524
79
0.744863
84
584
5.119048
0.666667
0.186047
0.069767
0
0
0
0
0
0
0
0
0.016293
0.159247
584
20
80
29.2
0.85947
0.541096
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
3
3f06b008405b791e952feebf85177fb35611a404
250
py
Python
src/tcal.py
gpu0/gemm
3aa50005e3f380ab89b29dd7793c1f52c2ce6b23
[ "MIT" ]
null
null
null
src/tcal.py
gpu0/gemm
3aa50005e3f380ab89b29dd7793c1f52c2ce6b23
[ "MIT" ]
null
null
null
src/tcal.py
gpu0/gemm
3aa50005e3f380ab89b29dd7793c1f52c2ce6b23
[ "MIT" ]
null
null
null
""" Calculate different thread patterns """ tx = list(range(0, 256)) #print("sAtx", map(lambda x: (x%2) * 512 + x/2, tx)) print("gmStoreCtx", map(lambda x: (x%16)*2 + (x/16)*16*8, tx)) print("", map(lambda x: (x%16)*2 + (x/16)*16*8 + 32 + 1, tx))
22.727273
62
0.568
47
250
3.021277
0.446809
0.084507
0.211268
0.232394
0.28169
0.28169
0.28169
0.28169
0.28169
0.28169
0
0.133333
0.16
250
10
63
25
0.542857
0.348
0
0
0
0
0.064935
0
0
0
0
0
0
1
0
false
0
0
0
0
0.666667
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
3
3f2deb68c60ebfb66dff7097b04e2e38d5650535
644
py
Python
python/tests/test_multi-bracket-validation.py
everydaytimmy/data-structures-and-algorithms
eb8166c79c5cccfd20ad591c496c81dab82e297d
[ "MIT" ]
null
null
null
python/tests/test_multi-bracket-validation.py
everydaytimmy/data-structures-and-algorithms
eb8166c79c5cccfd20ad591c496c81dab82e297d
[ "MIT" ]
10
2021-04-14T20:08:53.000Z
2021-05-25T23:59:45.000Z
python/tests/test_multi-bracket-validation.py
everydaytimmy/data-structures-and-algorithms
eb8166c79c5cccfd20ad591c496c81dab82e297d
[ "MIT" ]
null
null
null
from code_challenges.multi_bracket_validation.multi_bracket_validation import (multi_bracket_validation) def test_one(): string = "{}{Code}[Fellows](())" actual = multi_bracket_validation(string) expected = True assert actual == expected def test_two(): string = "{{}}}}" actual = multi_bracket_validation(string) expected = False assert actual == expected def test_three(): string = "(){}[]" actual = multi_bracket_validation(string) expected = True assert actual == expected def test_four(): string = "{" actual = multi_bracket_validation(string) expected = False assert actual == expected
24.769231
104
0.703416
71
644
6.112676
0.28169
0.193548
0.354839
0.258065
0.702765
0.686636
0.686636
0.672811
0.672811
0.672811
0
0
0.180124
644
25
105
25.76
0.82197
0
0
0.571429
0
0
0.052795
0.032609
0
0
0
0
0.190476
1
0.190476
false
0
0.047619
0
0.238095
0
0
0
0
null
0
1
1
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
3f51f1f166df3051d1744f7177ada944d75fd4af
3,212
py
Python
test/record/parser/test_response_whois_ua_ua_uanic_property_contacts_multiple.py
huyphan/pyyawhois
77fb2f73a9c67989f1d41d98f37037406a69d136
[ "MIT" ]
null
null
null
test/record/parser/test_response_whois_ua_ua_uanic_property_contacts_multiple.py
huyphan/pyyawhois
77fb2f73a9c67989f1d41d98f37037406a69d136
[ "MIT" ]
null
null
null
test/record/parser/test_response_whois_ua_ua_uanic_property_contacts_multiple.py
huyphan/pyyawhois
77fb2f73a9c67989f1d41d98f37037406a69d136
[ "MIT" ]
null
null
null
# This file is autogenerated. Do not edit it manually. # If you want change the content of this file, edit # # spec/fixtures/responses/whois.ua/ua/uanic/property_contacts_multiple # # and regenerate the tests with the following script # # $ scripts/generate_tests.py # from nose.tools import * from dateutil.parser import parse as time_parse import yawhois class TestWhoisUaUaUanicPropertyContactsMultiple(object): def setUp(self): fixture_path = "spec/fixtures/responses/whois.ua/ua/uanic/property_contacts_multiple.txt" host = "whois.ua" part = yawhois.record.Part(open(fixture_path, "r").read(), host) self.record = yawhois.record.Record(None, [part]) def test_technical_contacts(self): eq_(self.record.technical_contacts.__class__.__name__, 'list') eq_(len(self.record.technical_contacts), 2) eq_(self.record.technical_contacts[0].__class__.__name__, 'Contact') eq_(self.record.technical_contacts[0].type, yawhois.record.Contact.TYPE_TECHNICAL) eq_(self.record.technical_contacts[0].id, "KG780-UANIC") eq_(self.record.technical_contacts[0].name, None) eq_(self.record.technical_contacts[0].organization, "Kyivstar GSM") eq_(self.record.technical_contacts[0].address, "Chervonozoryanyi Av., 51") eq_(self.record.technical_contacts[0].city, "KYIV") eq_(self.record.technical_contacts[0].zip, "03110") eq_(self.record.technical_contacts[0].state, None) eq_(self.record.technical_contacts[0].country, "UA") eq_(self.record.technical_contacts[0].country_code, None) eq_(self.record.technical_contacts[0].phone, "+380 (67) 2372213") eq_(self.record.technical_contacts[0].fax, "+380 (44) 2473954") eq_(self.record.technical_contacts[0].email, "dnsmaster@kyivstar.net") eq_(self.record.technical_contacts[0].created_on, None) eq_(self.record.technical_contacts[0].updated_on, time_parse('2008-09-02 12:52:47')) eq_(self.record.technical_contacts[1].__class__.__name__, 'Contact') eq_(self.record.technical_contacts[1].type, yawhois.record.Contact.TYPE_TECHNICAL) eq_(self.record.technical_contacts[1].id, "EIC-UANIC") eq_(self.record.technical_contacts[1].name, None) eq_(self.record.technical_contacts[1].organization, "\"ElVisti Information Center\", LLC\nООО \"Информационный центр \"Электронные вести\"\nТОВ \"ІЦ ЕЛВІСТІ\"") eq_(self.record.technical_contacts[1].address, "а/с, 151") eq_(self.record.technical_contacts[1].city, "КИЇВ") eq_(self.record.technical_contacts[1].zip, "03037") eq_(self.record.technical_contacts[1].state, None) eq_(self.record.technical_contacts[1].country, "UA") eq_(self.record.technical_contacts[1].country_code, None) eq_(self.record.technical_contacts[1].phone, "+38044 239-90-91") eq_(self.record.technical_contacts[1].fax, None) eq_(self.record.technical_contacts[1].email, "hostmaster@visti.net") eq_(self.record.technical_contacts[1].created_on, None) eq_(self.record.technical_contacts[1].updated_on, time_parse('2011-12-15 11:33:14'))
54.440678
168
0.707347
429
3,212
5.034965
0.314685
0.162037
0.299074
0.425
0.620833
0.607407
0.362037
0.237037
0.115741
0.115741
0
0.042373
0.155044
3,212
58
169
55.37931
0.7535
0.079078
0
0
1
0
0.12114
0.031897
0
0
0
0
0
1
0.045455
false
0
0.068182
0
0.136364
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
3f58b705036bdccebde3019424f2e2e4b2126bd1
93
py
Python
python/1011.py
LourdesOshiroIgarashi/uri-begginner
05f7993dfde2c7cc49e5b74907dee6297c82f447
[ "MIT" ]
3
2021-05-17T05:39:08.000Z
2021-05-23T05:14:54.000Z
python/1011.py
LourdesOshiroIgarashi/uri-beginner
05f7993dfde2c7cc49e5b74907dee6297c82f447
[ "MIT" ]
null
null
null
python/1011.py
LourdesOshiroIgarashi/uri-beginner
05f7993dfde2c7cc49e5b74907dee6297c82f447
[ "MIT" ]
null
null
null
r = int(input()) pi = 3.14159 v = (4/3) *pi * (r ** 3) print("VOLUME = {0:.3f}".format(v))
13.285714
35
0.483871
18
93
2.5
0.722222
0
0
0
0
0
0
0
0
0
0
0.148649
0.204301
93
6
36
15.5
0.459459
0
0
0
0
0
0.172043
0
0
0
0
0
0
1
0
false
0
0
0
0
0.25
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
58bb2340b8eca1239c0703eb82cd1335bf3581b3
278
py
Python
1-lsh/Fred.py
lukaselmer/ethz-data-mining
cb4215c202efc37f3626a25c8301a4ac36813493
[ "MIT" ]
2
2015-01-24T18:22:33.000Z
2019-08-14T06:30:58.000Z
1-lsh/Fred.py
lukaselmer/ethz-data-mining
cb4215c202efc37f3626a25c8301a4ac36813493
[ "MIT" ]
null
null
null
1-lsh/Fred.py
lukaselmer/ethz-data-mining
cb4215c202efc37f3626a25c8301a4ac36813493
[ "MIT" ]
2
2016-01-15T21:12:32.000Z
2019-08-14T06:30:59.000Z
#!/usr/bin/env python import sys import numpy as np from numpy.core.multiarray import dtype def test(str): key,video_id,shingles = str.split(',') return key,video_id, shingles if __name__ == "__main__": str = "0:195043920039,0,[123 1 34 45]" test(str)
16.352941
46
0.665468
43
278
4.069767
0.72093
0.08
0.114286
0.205714
0
0
0
0
0
0
0
0.099548
0.205036
278
17
47
16.352941
0.692308
0.071942
0
0
0
0
0.166667
0.081395
0
0
0
0
0
1
0.111111
false
0
0.333333
0
0.555556
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
58c06e58e5690fd8fee5c78d6212da3969ff7259
2,462
py
Python
tests/Composition/test_Composition__species_molar_densities.py
kamilazdybal/multipy
ebdcddb63bfb1cd647ca99bbf9002b04a9b50ed9
[ "MIT" ]
null
null
null
tests/Composition/test_Composition__species_molar_densities.py
kamilazdybal/multipy
ebdcddb63bfb1cd647ca99bbf9002b04a9b50ed9
[ "MIT" ]
null
null
null
tests/Composition/test_Composition__species_molar_densities.py
kamilazdybal/multipy
ebdcddb63bfb1cd647ca99bbf9002b04a9b50ed9
[ "MIT" ]
null
null
null
import unittest import numpy as np import multipy ################################################################################ ################################################################################ #### #### Class: Composition #### ################################################################################ ################################################################################ class Composition(unittest.TestCase): def test_Composition__species_molar_densities__allowed_calls(self): species_mole_fractions = np.random.rand(100,10) T = 300 p = 10000 try: comp = multipy.Composition() c = comp.species_molar_densities(species_mole_fractions, T, p) (n_observations, n_species) = np.shape(c) self.assertTrue(n_observations == 100) self.assertTrue(n_species == 10) except Exception: self.assertTrue(False) species_mole_fractions = np.random.rand(100,1) try: comp = multipy.Composition() c = comp.species_molar_densities(species_mole_fractions, T, p) (n_observations, n_species) = np.shape(c) self.assertTrue(n_observations == 100) self.assertTrue(n_species == 1) except Exception: self.assertTrue(False) ################################################################################ ################################################################################ def test_Composition__species_molar_densities__not_allowed_calls(self): species_mole_fractions = np.random.rand(100,1).ravel() T = 300 p = 10000 comp = multipy.Composition() with self.assertRaises(ValueError): c = comp.species_molar_densities(species_mole_fractions, T, p) with self.assertRaises(ValueError): c = comp.species_molar_densities(1, T, p) with self.assertRaises(ValueError): c = comp.species_molar_densities([1,2,3], T, p) ################################################################################ ################################################################################ def test_Composition__species_molar_densities__computation(self): pass ################################################################################ ################################################################################
35.681159
80
0.42567
190
2,462
5.231579
0.247368
0.096579
0.169014
0.085513
0.830986
0.762575
0.644869
0.644869
0.607646
0.577465
0
0.02099
0.187246
2,462
68
81
36.205882
0.475762
0.007311
0
0.605263
0
0
0
0
0
0
0
0
0.236842
1
0.078947
false
0.026316
0.078947
0
0.184211
0
0
0
0
null
0
0
0
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
58c8ae18db0fa9424e9c7d3ffcb0dfa8924bde01
2,972
py
Python
thirdparty/bintrees-2.0.7/issues/004_rbtree_copy.py
anonymouscode1/djxperf
b6073a761753aa7a6247f2618977ca3a2633e78a
[ "MIT" ]
50
2019-08-30T13:20:19.000Z
2022-02-12T16:25:38.000Z
thirdparty/bintrees-2.0.7/issues/004_rbtree_copy.py
anonymouscode1/djxperf
b6073a761753aa7a6247f2618977ca3a2633e78a
[ "MIT" ]
5
2019-09-02T17:42:07.000Z
2020-07-17T09:30:47.000Z
thirdparty/bintrees-2.0.7/issues/004_rbtree_copy.py
anonymouscode1/djxperf
b6073a761753aa7a6247f2618977ca3a2633e78a
[ "MIT" ]
4
2020-03-30T02:57:51.000Z
2021-08-24T09:20:51.000Z
''' Created on Apr 11, 2013 @author: matthijssnel ''' from bintrees import FastRBTree def print_node(key, value): print("Key: {}; Value:{}".format(key, value)) def populate(tree): tree[20.5] = tree.get(20.5, 0) + 644 tree[17.35] = tree.get(17.35, 0) + 32 tree[19.5] = tree.get(19.5, 0) + 440 tree[20.0] = tree.get(20.0, 0) + 73 tree[18.5] = tree.get(18.5, 0) + 1500 tree[20.8] = tree.get(20.8, 0) + 330 tree[21.0] = tree.get(21.0, 0) + 450 tree[19.25] = tree.get(19.25, 0) + 137 tree[18.7] = tree.get(18.7, 0) + 740 tree[20.12] = tree.get(20.12, 0) + 500 tree[19.85] = tree.get(19.85, 0) + 300 del tree[17.35] tree[18.5] = 1662 tree[17.23] = tree.get(17.23, 0) + 4594 tree[16.6] = tree.get(16.6, 0) + 2000 tree[16.62] = tree.get(16.62, 0) + 2000 tree[16.66] = tree.get(16.66, 0) + 2000 tree[16.68] = tree.get(16.68, 0) + 2000 tree[16.61] = tree.get(16.61, 0) + 2000 tree[16.64] = tree.get(16.64, 0) + 2000 tree[16.67] = tree.get(16.67, 0) + 2000 tree[16.57] = tree.get(16.57, 0) + 600 tree[16.58] = tree.get(16.58, 0) + 600 tree[16.59] = tree.get(16.59, 0) + 600 del tree[16.68] tree[16.59] = 2600 tree[16.59] = 2000 tree[16.56] = tree.get(16.56, 0) + 600 tree[16.59] = 2800 del tree[16.67] tree[16.58] = 2600 tree[16.56] = 5796 tree[16.56] = 600 tree[16.57] = 2600 tree[16.56] = 1400 tree[16.55] = tree.get(16.55, 0) + 5196 tree[16.53] = tree.get(16.53, 0) + 548 tree[16.55] = 5829 tree[16.54] = tree.get(16.54, 0) + 657 tree[16.56] = 1964 tree[16.58] = 3119 tree[16.6] = 2691 tree[16.57] = 3245 tree[16.59] = 3385 tree[16.58] = 3919 tree[16.6] = 3491 tree[16.57] = 4045 del tree[16.66] tree[16.56] = 2764 tree[16.55] = 6629 tree[16.58] = 3319 tree[16.55] = 7229 tree[16.55] = 2033 tree[16.55] = 2833 tree[16.54] = 1457 tree[16.54] = 6653 tree[16.54] = 5996 tree[16.62] = 2492 del tree[16.53] tree[16.61] = 2708 tree[16.54] = 5196 tree[16.55] = 2033 tree[16.62] = 2000 tree[16.54] = 5801 tree[16.62] = 2800 tree[16.61] = 3508 tree[16.58] = 3687 tree[16.61] = 2800 tree[16.53] = tree.get(16.53, 0) + 522 tree[16.55] = 2833 tree[16.54] = 6601 tree[16.54] = 1405 tree[16.53] = 5718 tree[16.6] = 2800 tree[16.52] = tree.get(16.52, 0) + 537 tree[16.58] = 3319 tree[16.56] = 3133 tree.copy() del tree[16.52] tree[16.6] = 3471 tree[16.6] = 2800 tree[16.52] = tree.get(16.52, 0) + 655 tree[16.54] = 2905 tree[16.57] = 3445 del tree[16.64] tree[16.54] = 3705 tree[16.53] = 6518 tree[16.59] = 2800 tree[16.51] = tree.get(16.51, 0) + 523 clone = tree.copy() print("\nOriginal Tree:") tree.foreach(print_node) print("\nClone Tree:") clone.foreach(print_node) tree = FastRBTree() populate(tree)
26.070175
49
0.543069
550
2,972
2.929091
0.209091
0.275605
0.100559
0.047796
0.14401
0.129112
0.088144
0.063315
0.038485
0.038485
0
0.357727
0.259758
2,972
113
50
26.300885
0.374545
0.015478
0
0.1
0
0
0.01577
0
0
0
0
0
0
1
0.02
false
0
0.01
0
0.03
0.06
0
0
0
null
1
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
58ce8f1d01d4eaf5ace86e4a20ffc665535d002f
560
py
Python
oo_trees/random_forest.py
asross/decision_trees
8cbdc7a90474abbeddcfccdaadea0e67d67aec86
[ "Apache-2.0" ]
1
2021-09-18T08:42:05.000Z
2021-09-18T08:42:05.000Z
oo_trees/random_forest.py
asross/decision_trees
8cbdc7a90474abbeddcfccdaadea0e67d67aec86
[ "Apache-2.0" ]
null
null
null
oo_trees/random_forest.py
asross/decision_trees
8cbdc7a90474abbeddcfccdaadea0e67d67aec86
[ "Apache-2.0" ]
null
null
null
from collections import Counter from .classifier import * from .decision_tree import * class RandomForest(Classifier): def __init__(self, dataset, tree_class=DecisionTree, n_trees=10, examples_per_tree=None): self.trees = [tree_class(dataset.bootstrap(examples_per_tree)) for _i in range(n_trees)] def vote_on(self, x): # TODO: we could return early as soon as we have a definite plurality return Counter(tree.classify(x) for tree in self.trees) def classify(self, x): return self.vote_on(x).most_common(1)[0][0]
37.333333
96
0.721429
85
560
4.552941
0.517647
0.046512
0.077519
0
0
0
0
0
0
0
0
0.010917
0.182143
560
14
97
40
0.834061
0.119643
0
0
0
0
0
0
0
0
0
0.071429
0
1
0.3
false
0
0.3
0.2
0.9
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
0
0
1
0
0
0
3
58e50e64efc6807b86e58a2ee032564035ca41f9
1,511
py
Python
hwtLib/abstract/frame_utils/join/state_trans_info.py
Nic30/hwtLib
52fd28023c4a25f64da17bb4d7c3089d5c7348f4
[ "MIT" ]
24
2017-02-23T10:00:50.000Z
2022-01-28T12:20:21.000Z
hwtLib/abstract/frame_utils/join/state_trans_info.py
Nic30/hwtLib
52fd28023c4a25f64da17bb4d7c3089d5c7348f4
[ "MIT" ]
32
2017-04-28T10:29:34.000Z
2021-04-27T09:16:43.000Z
hwtLib/abstract/frame_utils/join/state_trans_info.py
Nic30/hwtLib
52fd28023c4a25f64da17bb4d7c3089d5c7348f4
[ "MIT" ]
8
2019-09-19T03:34:36.000Z
2022-01-21T06:56:58.000Z
from typing import Dict, Tuple, Optional, List class StateTransInfo(): """ :ivar ~.label: tuple(frame id, word id) :ivar ~.outputs: list of tuples (input index, input time, input byte index) :ivar ~.last_per_input: last flags for each input if last=1 the the input word is end of the actual frame (None = don't care value) """ def __init__(self, label, word_bytes, input_cnt): self.label = label self.outputs: List[Optional[Tuple[int, int, int]]] = [None for _ in range(word_bytes)] self.last_per_input: List[Optional[int]] = [None for _ in range(input_cnt)] def get_state_i(self) -> int: """ :return: source state index for this state transition, min input index used when this state transition can happen """ return min([x[0] for x in self.outputs if x is not None]) def get_next_substate(self, sub_states: Dict[Tuple[int, int], "StateTransInfo"]) -> Optional["StateTransInfo"]: return sub_states.get((self.label[0], self.label[1] + 1), None) def __eq__(self, other): return self.label == other.label def set_output(self, out_B_i, in_i, time, in_B_i, B_from_last_input_word): v = (in_i, time, in_B_i, B_from_last_input_word) assert self.outputs[out_B_i] is None, ( self, out_B_i, self.outputs[out_B_i], v) self.outputs[out_B_i] = v def __repr__(self): return f"<{self.__class__.__name__:s} {self.label:s} o:{self.outputs}>"
38.74359
121
0.647253
235
1,511
3.902128
0.32766
0.015267
0.027263
0.049073
0.154853
0.100327
0.06325
0.06325
0.06325
0.06325
0
0.00431
0.232296
1,511
38
122
39.763158
0.786207
0.244209
0
0
0
0.052632
0.081952
0.025783
0
0
0
0
0.052632
1
0.315789
false
0
0.052632
0.157895
0.631579
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
450d03eaaddfd4bc726e8564701c18dcb566c7ca
255
py
Python
rdmo/system_integration/admin.py
hkrock/rdmo
80bbd3b5749f48a918e9aa4549a96479bf665b93
[ "Apache-2.0" ]
null
null
null
rdmo/system_integration/admin.py
hkrock/rdmo
80bbd3b5749f48a918e9aa4549a96479bf665b93
[ "Apache-2.0" ]
null
null
null
rdmo/system_integration/admin.py
hkrock/rdmo
80bbd3b5749f48a918e9aa4549a96479bf665b93
[ "Apache-2.0" ]
null
null
null
from django.contrib import admin # Register your models here. from .models import Catalog2ExternalDatamodel class cat2extAdmin(admin.ModelAdmin): list_display = ('catalog', 'datamodel') admin.site.register(Catalog2ExternalDatamodel, cat2extAdmin)
23.181818
60
0.803922
26
255
7.846154
0.692308
0
0
0
0
0
0
0
0
0
0
0.017699
0.113725
255
10
61
25.5
0.884956
0.101961
0
0
0
0
0.070485
0
0
0
0
0
0
1
0
false
0
0.4
0
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
451bd739afa44d6a1d3b989db4d8349587cffb65
32
py
Python
mi/dataset/driver/metbk_a/dcl/__init__.py
rmanoni/mi-dataset
c1012a0cd8f2ea075e008cdd1ab291ed54f44d43
[ "BSD-2-Clause" ]
1
2015-05-10T01:08:44.000Z
2015-05-10T01:08:44.000Z
mi/dataset/driver/metbk_a/dcl/__init__.py
rmanoni/mi-dataset
c1012a0cd8f2ea075e008cdd1ab291ed54f44d43
[ "BSD-2-Clause" ]
33
2017-04-25T19:53:45.000Z
2022-03-18T17:42:18.000Z
mi/dataset/driver/metbk_a/dcl/__init__.py
rmanoni/mi-dataset
c1012a0cd8f2ea075e008cdd1ab291ed54f44d43
[ "BSD-2-Clause" ]
31
2015-03-04T01:01:09.000Z
2020-10-28T14:42:12.000Z
__author__ = 'Ronald Ronquillo'
16
31
0.78125
3
32
7
1
0
0
0
0
0
0
0
0
0
0
0
0.125
32
1
32
32
0.75
0
0
0
0
0
0.5
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
45272c7e669eb4be33e525ade7b1038eebc2884b
73
py
Python
flush-redis.py
marshyski/pydatadc-2016
752c8f547909bac6e20d7dcdacd1b0892afd3914
[ "Apache-2.0" ]
15
2016-10-09T14:51:37.000Z
2019-05-21T15:04:58.000Z
flush-redis.py
marshyski/pydatadc-2016
752c8f547909bac6e20d7dcdacd1b0892afd3914
[ "Apache-2.0" ]
1
2017-07-17T16:18:20.000Z
2019-01-18T11:37:18.000Z
flush-redis.py
marshyski/pydatadc-2016
752c8f547909bac6e20d7dcdacd1b0892afd3914
[ "Apache-2.0" ]
7
2016-10-09T14:51:43.000Z
2022-01-28T20:23:46.000Z
import redis db = redis.StrictRedis('localhost', 6379, 1) db.flushall()
14.6
44
0.726027
10
73
5.3
0.8
0
0
0
0
0
0
0
0
0
0
0.078125
0.123288
73
4
45
18.25
0.75
0
0
0
0
0
0.123288
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
188e87bd17292b6b1a30c44117134e3109be6475
913
py
Python
test_server.py
TomerZeitune/submarines
c18415f8ec1694c83171aaf2d0e34dacdc32585c
[ "Apache-2.0" ]
null
null
null
test_server.py
TomerZeitune/submarines
c18415f8ec1694c83171aaf2d0e34dacdc32585c
[ "Apache-2.0" ]
null
null
null
test_server.py
TomerZeitune/submarines
c18415f8ec1694c83171aaf2d0e34dacdc32585c
[ "Apache-2.0" ]
null
null
null
from src.server.server import Server from src.game.game import Game from src.core.constants import SUB, SEA def main(): table = [[SUB, SUB, SUB, SEA, SEA, SEA, SEA, SEA, SEA, SEA], [SEA, SEA, SEA, SEA, SEA, SEA, SEA, SEA, SEA, SEA], [SUB, SEA, SEA, SUB, SEA, SUB, SUB, SUB, SUB, SEA], [SUB, SEA, SEA, SUB, SEA, SEA, SEA, SEA, SEA, SEA], [SUB, SEA, SEA, SUB, SEA, SEA, SEA, SEA, SEA, SEA], [SUB, SEA, SEA, SEA, SEA, SEA, SEA, SEA, SEA, SEA], [SUB, SEA, SEA, SEA, SEA, SEA, SEA, SEA, SEA, SEA], [SEA, SEA, SEA, SUB, SUB, SUB, SUB, SEA, SEA, SUB], [SEA, SEA, SEA, SEA, SEA, SEA, SEA, SEA, SEA, SUB], [SEA, SEA, SEA, SEA, SEA, SEA, SEA, SEA, SEA, SUB]] server = Server(5555) server.begin() Game(server, table, True).play() server.end() if __name__ == "__main__": main()
33.814815
64
0.526835
138
913
3.427536
0.137681
0.824524
1.027484
1.192389
0.634249
0.621564
0.58351
0.58351
0.58351
0.58351
0
0.006289
0.303395
913
26
65
35.115385
0.737421
0
0
0.2
0
0
0.008772
0
0
0
0
0
0
1
0.05
false
0
0.15
0
0.2
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
18980ec3bb7d9eab7917861f35b2a826f7fbd3e2
2,776
py
Python
migrator/UCPAccess.py
reallyinsane/docker2artifactory
eb184982d65971c43f57a1a873fbeebf049799b4
[ "Apache-2.0" ]
27
2018-06-14T16:16:33.000Z
2022-02-26T09:02:30.000Z
migrator/UCPAccess.py
reallyinsane/docker2artifactory
eb184982d65971c43f57a1a873fbeebf049799b4
[ "Apache-2.0" ]
14
2018-11-19T23:08:11.000Z
2021-05-14T11:36:39.000Z
migrator/UCPAccess.py
reallyinsane/docker2artifactory
eb184982d65971c43f57a1a873fbeebf049799b4
[ "Apache-2.0" ]
15
2018-06-11T10:01:47.000Z
2021-07-09T06:53:59.000Z
from DockerEEHTTPAccess import DockerEEHTTPAccess import urllib import logging ''' Simple API for accessing UCP resources Supports: ''' class UCPAccess(DockerEEHTTPAccess): def __init__(self, url, username=None, password=None, ignore_cert=False, exlog=False): super(UCPAccess, self).__init__(url, username, password, ignore_cert, exlog) self.log = logging.getLogger(__name__) ''' Test connection with UCP ''' def test_connection(self): return bool(super(UCPAccess, self).get_call_wrapper('/id/')) ''' Gets the list of all organizations @return None if there was an error, else the a list of available organizations ''' def get_organizations(self): return super(UCPAccess, self).get_with_pagination('accounts/', 'accounts', 'name', self.__get_organizations_page_handler) def __get_organizations_page_handler(self, result, page_results): for account in page_results: if account['isOrg'] == True: result.append(account['name']) ''' Gets the list of all users @return None if there was an error, else the a list of available team of a given organization ''' def get_users(self): return super(UCPAccess, self).get_with_pagination('accounts/', 'accounts', 'name', self.__get_users_page_handler) def __get_users_page_handler(self, result, page_results): for account in page_results: if account['isOrg'] == False and account['isActive'] == True: result.append(account['name']) ''' Get the list of all teams of a given organizations @return None if there was an error, else the a list of available team of a given organization ''' def get_teams(self, organization): org_encoded = urllib.quote_plus(organization) return super(UCPAccess, self).get_with_pagination("accounts/" + org_encoded + "/teams/", 'teams', 'name', self.__get_teams_page_handler) def __get_teams_page_handler(self, result, page_results): for team in page_results: result.append(team['name']) ''' Get the list of members of a given team @return None if there was an error, else the a list of available members of a given team ''' def get_members(self, organization, team): org_encoded = urllib.quote_plus(organization) team_encoded = urllib.quote_plus(team) return super(UCPAccess, self).get_with_pagination("accounts/" + org_encoded + "/teams/" + team + "/members/", 'members', 'member.id', self.__get_members_page_handler) def __get_members_page_handler(self, result, page_results): for member in page_results: result.append(member['member']['name'])
39.657143
174
0.675432
353
2,776
5.070822
0.215297
0.035196
0.060335
0.058659
0.582123
0.469274
0.427933
0.388827
0.388827
0.388827
0
0
0.223703
2,776
69
175
40.231884
0.830626
0
0
0.176471
0
0
0.074219
0
0
0
0
0
0
1
0.294118
false
0.058824
0.088235
0.088235
0.558824
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
3
18a65a9ae487e42679d7d964705c84cf62f6acfb
241
py
Python
tests/__init__.py
ktshen/transport-linebot
22e285dd9a148835592dfca62b1ca80b531ed4dd
[ "MIT" ]
null
null
null
tests/__init__.py
ktshen/transport-linebot
22e285dd9a148835592dfca62b1ca80b531ed4dd
[ "MIT" ]
null
null
null
tests/__init__.py
ktshen/transport-linebot
22e285dd9a148835592dfca62b1ca80b531ed4dd
[ "MIT" ]
null
null
null
import os from dotenv import load_dotenv # Load envirnoment variables dotenv_path = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), '.env') if os.path.exists(dotenv_path): load_dotenv(dotenv_path=dotenv_path)
30.125
96
0.784232
38
241
4.710526
0.394737
0.167598
0.145251
0.167598
0.178771
0
0
0
0
0
0
0
0.091286
241
7
97
34.428571
0.817352
0.107884
0
0
0
0
0.018779
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
18f841e676d7442e36ddbd04e74ddf216581716d
192
py
Python
2.4.py
mertcanaltin/python-final
25257ed78cb460ff943724638f71135bf3ca2db4
[ "MIT" ]
null
null
null
2.4.py
mertcanaltin/python-final
25257ed78cb460ff943724638f71135bf3ca2db4
[ "MIT" ]
null
null
null
2.4.py
mertcanaltin/python-final
25257ed78cb460ff943724638f71135bf3ca2db4
[ "MIT" ]
null
null
null
from math import * d1 = [] d2 = [] d3 = [] for i in range (11): d1 += [i**2] for j in range (14,26): d2 += [sqrt(j)] for k in range (11): d3 += [(d1[k],d2[k])] print d3
12
24
0.453125
35
192
2.485714
0.514286
0.241379
0.206897
0
0
0
0
0
0
0
0
0.138462
0.322917
192
15
25
12.8
0.530769
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0.090909
null
null
0.090909
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3
18fb14498629438c68c53f3e784845f09713cfbd
57
py
Python
python/testData/inspections/PyUnusedLocalInspection/singleUnderscore.py
jnthn/intellij-community
8fa7c8a3ace62400c838e0d5926a7be106aa8557
[ "Apache-2.0" ]
2
2019-04-28T07:48:50.000Z
2020-12-11T14:18:08.000Z
python/testData/inspections/PyUnusedLocalInspection/singleUnderscore.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
173
2018-07-05T13:59:39.000Z
2018-08-09T01:12:03.000Z
python/testData/inspections/PyUnusedLocalInspection/singleUnderscore.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
2
2020-03-15T08:57:37.000Z
2020-04-07T04:48:14.000Z
def foo(): l = [42 for _ in xrange(100)] print(l)
19
33
0.526316
10
57
2.9
0.9
0
0
0
0
0
0
0
0
0
0
0.125
0.298246
57
3
34
19
0.6
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.333333
0.333333
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
3
7a106907cb1692a85b7e7d5f52d930926c17dca8
752
py
Python
kipi/checksum.py
hildogjr/kicad-getlibs
67d073a2e451855ff8181e6cdd0c88de08add10e
[ "MIT" ]
2
2018-04-05T20:05:22.000Z
2018-07-08T23:40:42.000Z
kipi/checksum.py
hildogjr/kicad-getlibs
67d073a2e451855ff8181e6cdd0c88de08add10e
[ "MIT" ]
3
2018-04-05T19:27:56.000Z
2019-06-26T19:18:08.000Z
kipi/checksum.py
hildogjr/kicad-getlibs
67d073a2e451855ff8181e6cdd0c88de08add10e
[ "MIT" ]
4
2018-04-05T17:56:36.000Z
2019-06-26T18:04:18.000Z
import hashlib def hash_bytestr_iter(bytesiter, hasher, ashexstr=False): for block in bytesiter: hasher.update(block) return hasher.hexdigest() if ashexstr else hasher.digest() def file_as_blockiter(afile, blocksize=65536): with afile: block = afile.read(blocksize) while len(block) > 0: yield block block = afile.read(blocksize) def get_md5_hash (fname): return hash_bytestr_iter(file_as_blockiter(open(fname, 'rb')), hashlib.md5(), True) def get_sha256_hash (fname): return hash_bytestr_iter(file_as_blockiter(open(fname, 'rb')), hashlib.sha256(), True) def get_sha256_hash_by_handle (fhandle): return hash_bytestr_iter(file_as_blockiter(fhandle), hashlib.sha256(), True)
31.333333
90
0.714096
102
752
5.029412
0.401961
0.08577
0.116959
0.122807
0.385965
0.315789
0.315789
0.245614
0.245614
0.245614
0
0.032415
0.179521
752
23
91
32.695652
0.799028
0
0
0.117647
0
0
0.005319
0
0
0
0
0
0
1
0.294118
false
0
0.058824
0.176471
0.588235
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
7a17495789b3d6b0b8ad5f3fe40d25c16753943d
284
py
Python
examples/lexicon_learner/get_hubert_lookup_table.py
jonojace/fairseq
ce287a3ca25fb26e65ae4d12614bbf174371eaa9
[ "MIT" ]
null
null
null
examples/lexicon_learner/get_hubert_lookup_table.py
jonojace/fairseq
ce287a3ca25fb26e65ae4d12614bbf174371eaa9
[ "MIT" ]
null
null
null
examples/lexicon_learner/get_hubert_lookup_table.py
jonojace/fairseq
ce287a3ca25fb26e65ae4d12614bbf174371eaa9
[ "MIT" ]
null
null
null
#from dan lyth import joblib kmeans_model_path = '../../fairseq/examples/textless_nlp/gslm/speech2unit/pretrained_models/hubert/km100/hubert_km100.bin' kmeans_model = joblib.load(open(kmeans_model_path, "rb")) # this is just a sklearn model centroids = kmeans_model.cluster_centers_
40.571429
122
0.809859
41
284
5.341463
0.731707
0.200913
0.136986
0
0
0
0
0
0
0
0
0.02682
0.080986
284
6
123
47.333333
0.812261
0.147887
0
0
0
0
0.425
0.416667
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
e1313a7271455914ef157d05dcdeea3dcdb8507f
119
py
Python
ldapkg/__init__.py
MSSfusiqi/LDA
e23e5682e4a5d51f01f36e225d305b6645f88568
[ "MIT" ]
1
2021-11-19T03:40:29.000Z
2021-11-19T03:40:29.000Z
ldapkg/__init__.py
MSSfusiqi/LDA
e23e5682e4a5d51f01f36e225d305b6645f88568
[ "MIT" ]
null
null
null
ldapkg/__init__.py
MSSfusiqi/LDA
e23e5682e4a5d51f01f36e225d305b6645f88568
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Apr 29 16:51:16 2019 @author: linyizi """
8.5
35
0.554622
18
119
3.666667
0.944444
0
0
0
0
0
0
0
0
0
0
0.155556
0.243697
119
13
36
9.153846
0.577778
0.815126
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
e1468d99ac818a6e67f218ca7a52aea9dae60a7e
3,040
py
Python
source/pkgsrc/lang/python27/patches/patch-Lib_ctypes_test_test__parameters.py
Scottx86-64/dotfiles-1
51004b1e2b032664cce6b553d2052757c286087d
[ "Unlicense" ]
1
2021-11-20T22:46:39.000Z
2021-11-20T22:46:39.000Z
source/pkgsrc/lang/python27/patches/patch-Lib_ctypes_test_test__parameters.py
Scottx86-64/dotfiles-1
51004b1e2b032664cce6b553d2052757c286087d
[ "Unlicense" ]
null
null
null
source/pkgsrc/lang/python27/patches/patch-Lib_ctypes_test_test__parameters.py
Scottx86-64/dotfiles-1
51004b1e2b032664cce6b553d2052757c286087d
[ "Unlicense" ]
null
null
null
$NetBSD: patch-Lib_ctypes_test_test__parameters.py,v 1.1.2.2 2021/10/13 21:04:01 tm Exp $ Fix CVE-2021-3177: Replace snprintf with Python unicode formatting in ctypes param reprs Via Fedora: https://src.fedoraproject.org/rpms/python2.7/blob/rawhide/f/00357-CVE-2021-3177.patch --- Lib/ctypes/test/test_parameters.py.orig 2020-04-19 21:13:39.000000000 +0000 +++ Lib/ctypes/test/test_parameters.py @@ -206,6 +206,49 @@ class SimpleTypesTestCase(unittest.TestC with self.assertRaises(ZeroDivisionError): WorseStruct().__setstate__({}, b'foo') + def test_parameter_repr(self): + from ctypes import ( + c_bool, + c_char, + c_wchar, + c_byte, + c_ubyte, + c_short, + c_ushort, + c_int, + c_uint, + c_long, + c_ulong, + c_longlong, + c_ulonglong, + c_float, + c_double, + c_longdouble, + c_char_p, + c_wchar_p, + c_void_p, + ) + self.assertRegexpMatches(repr(c_bool.from_param(True)), r"^<cparam '\?' at 0x[A-Fa-f0-9]+>$") + self.assertEqual(repr(c_char.from_param('a')), "<cparam 'c' ('a')>") + self.assertRegexpMatches(repr(c_wchar.from_param('a')), r"^<cparam 'u' at 0x[A-Fa-f0-9]+>$") + self.assertEqual(repr(c_byte.from_param(98)), "<cparam 'b' (98)>") + self.assertEqual(repr(c_ubyte.from_param(98)), "<cparam 'B' (98)>") + self.assertEqual(repr(c_short.from_param(511)), "<cparam 'h' (511)>") + self.assertEqual(repr(c_ushort.from_param(511)), "<cparam 'H' (511)>") + self.assertRegexpMatches(repr(c_int.from_param(20000)), r"^<cparam '[li]' \(20000\)>$") + self.assertRegexpMatches(repr(c_uint.from_param(20000)), r"^<cparam '[LI]' \(20000\)>$") + self.assertRegexpMatches(repr(c_long.from_param(20000)), r"^<cparam '[li]' \(20000\)>$") + self.assertRegexpMatches(repr(c_ulong.from_param(20000)), r"^<cparam '[LI]' \(20000\)>$") + self.assertRegexpMatches(repr(c_longlong.from_param(20000)), r"^<cparam '[liq]' \(20000\)>$") + self.assertRegexpMatches(repr(c_ulonglong.from_param(20000)), r"^<cparam '[LIQ]' \(20000\)>$") + self.assertEqual(repr(c_float.from_param(1.5)), "<cparam 'f' (1.5)>") + self.assertEqual(repr(c_double.from_param(1.5)), "<cparam 'd' (1.5)>") + self.assertEqual(repr(c_double.from_param(1e300)), "<cparam 'd' (1e+300)>") + self.assertRegexpMatches(repr(c_longdouble.from_param(1.5)), r"^<cparam ('d' \(1.5\)|'g' at 0x[A-Fa-f0-9]+)>$") + self.assertRegexpMatches(repr(c_char_p.from_param(b'hihi')), "^<cparam 'z' \(0x[A-Fa-f0-9]+\)>$") + self.assertRegexpMatches(repr(c_wchar_p.from_param('hihi')), "^<cparam 'Z' \(0x[A-Fa-f0-9]+\)>$") + self.assertRegexpMatches(repr(c_void_p.from_param(0x12)), r"^<cparam 'P' \(0x0*12\)>$") + ################################################################ if __name__ == '__main__':
51.525424
120
0.587171
407
3,040
4.174447
0.292383
0.058858
0.1907
0.197763
0.513243
0.462036
0.444968
0.373161
0.334314
0.26957
0
0.081357
0.195395
3,040
58
121
52.413793
0.613246
0
0
0
0
0.018868
0.178763
0
0
0
0.002352
0
0.396226
0
null
null
0
0.018868
null
null
0.018868
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3
e16eb74e271113e181d2e95523f7b87dae1ca729
511
py
Python
pyopenproject/business/previewing_service.py
webu/pyopenproject
40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966
[ "MIT" ]
5
2021-02-25T15:54:28.000Z
2021-04-22T15:43:36.000Z
pyopenproject/business/previewing_service.py
webu/pyopenproject
40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966
[ "MIT" ]
7
2021-03-15T16:26:23.000Z
2022-03-16T13:45:18.000Z
pyopenproject/business/previewing_service.py
webu/pyopenproject
40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966
[ "MIT" ]
6
2021-06-18T18:59:11.000Z
2022-03-27T04:58:52.000Z
from abc import ABCMeta, abstractmethod from pyopenproject.business.abstract_service import AbstractService class PreviewingService(AbstractService): """ Class PreviewingService, service for previewing endpoint """ __metaclass__ = ABCMeta def __init__(self, connection): super().__init__(connection) @abstractmethod def from_markdown(self, text, context=None): raise NotImplementedError @abstractmethod def from_plain(self, text): raise NotImplementedError
24.333333
74
0.749511
48
511
7.666667
0.583333
0.108696
0.201087
0
0
0
0
0
0
0
0
0
0.181996
511
20
75
25.55
0.880383
0.109589
0
0.2
0
0
0
0
0
0
0
0
0
1
0.3
false
0
0.2
0
0.7
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
3
e17019d523feb69235892aaaf0b612b24589ea3c
158
py
Python
mysite/users/router.py
2021fallCMPUT404/group-cmput404-project
985b76dc6c554caf77e7cf5788355cca22a26e74
[ "Apache-2.0" ]
2
2021-12-06T06:42:41.000Z
2022-03-29T21:40:14.000Z
mysite/users/router.py
2021fallCMPUT404/group-cmput404-project
985b76dc6c554caf77e7cf5788355cca22a26e74
[ "Apache-2.0" ]
7
2021-10-29T20:31:44.000Z
2021-12-05T06:55:58.000Z
mysite/users/router.py
2021fallCMPUT404/group-cmput404-project
985b76dc6c554caf77e7cf5788355cca22a26e74
[ "Apache-2.0" ]
null
null
null
from . import userviewsets from rest_framework import routers router = routers.DefaultRouter() router.register('user', userviewsets, base_name ='user_api')
26.333333
60
0.797468
19
158
6.473684
0.684211
0
0
0
0
0
0
0
0
0
0
0
0.107595
158
6
60
26.333333
0.87234
0
0
0
0
0
0.075949
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
e1799e9361652655eaf0bc3c23fc3cb3a66c1f66
5,747
py
Python
build/ARM/python/m5/internal/param_RubyCache.py
Jakgn/gem5_test
0ba7cc5213cf513cf205af7fc995cf679ebc1a3f
[ "BSD-3-Clause" ]
null
null
null
build/ARM/python/m5/internal/param_RubyCache.py
Jakgn/gem5_test
0ba7cc5213cf513cf205af7fc995cf679ebc1a3f
[ "BSD-3-Clause" ]
null
null
null
build/ARM/python/m5/internal/param_RubyCache.py
Jakgn/gem5_test
0ba7cc5213cf513cf205af7fc995cf679ebc1a3f
[ "BSD-3-Clause" ]
null
null
null
# This file was automatically generated by SWIG (http://www.swig.org). # Version 2.0.11 # # Do not make changes to this file unless you know what you are doing--modify # the SWIG interface file instead. from sys import version_info if version_info >= (2,6,0): def swig_import_helper(): from os.path import dirname import imp fp = None try: fp, pathname, description = imp.find_module('_param_RubyCache', [dirname(__file__)]) except ImportError: import _param_RubyCache return _param_RubyCache if fp is not None: try: _mod = imp.load_module('_param_RubyCache', fp, pathname, description) finally: fp.close() return _mod _param_RubyCache = swig_import_helper() del swig_import_helper else: import _param_RubyCache del version_info try: _swig_property = property except NameError: pass # Python < 2.2 doesn't have 'property'. def _swig_setattr_nondynamic(self,class_type,name,value,static=1): if (name == "thisown"): return self.this.own(value) if (name == "this"): if type(value).__name__ == 'SwigPyObject': self.__dict__[name] = value return method = class_type.__swig_setmethods__.get(name,None) if method: return method(self,value) if (not static): self.__dict__[name] = value else: raise AttributeError("You cannot add attributes to %s" % self) def _swig_setattr(self,class_type,name,value): return _swig_setattr_nondynamic(self,class_type,name,value,0) def _swig_getattr(self,class_type,name): if (name == "thisown"): return self.this.own() method = class_type.__swig_getmethods__.get(name,None) if method: return method(self) raise AttributeError(name) def _swig_repr(self): try: strthis = "proxy of " + self.this.__repr__() except: strthis = "" return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,) try: _object = object _newclass = 1 except AttributeError: class _object : pass _newclass = 0 def _swig_setattr_nondynamic_method(set): def set_attr(self,name,value): if (name == "thisown"): return self.this.own(value) if hasattr(self,name) or (name == "this"): set(self,name,value) else: raise AttributeError("You cannot add attributes to %s" % self) return set_attr import m5.internal.param_ReplacementPolicy import m5.internal.param_SimObject import m5.internal.drain import m5.internal.serialize import m5.internal.param_RubySystem import m5.internal.param_SimpleMemory import m5.internal.param_AbstractMemory import m5.internal.param_MemObject import m5.internal.param_ClockedObject import m5.internal.param_ClockDomain import m5.internal.enum_PwrState import m5.internal.param_PowerModel import m5.internal.PowerModelState_vector import m5.internal.param_PowerModelState import m5.internal.param_SubSystem import m5.internal.param_ThermalDomain class CacheMemory(m5.internal.param_SimObject.SimObject): thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract") __repr__ = _swig_repr CacheMemory_swigregister = _param_RubyCache.CacheMemory_swigregister CacheMemory_swigregister(CacheMemory) class RubyCacheParams(m5.internal.param_SimObject.SimObjectParams): thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def create(self): return _param_RubyCache.RubyCacheParams_create(self) assoc = _swig_property(_param_RubyCache.RubyCacheParams_assoc_get, _param_RubyCache.RubyCacheParams_assoc_set) block_size = _swig_property(_param_RubyCache.RubyCacheParams_block_size_get, _param_RubyCache.RubyCacheParams_block_size_set) dataAccessLatency = _swig_property(_param_RubyCache.RubyCacheParams_dataAccessLatency_get, _param_RubyCache.RubyCacheParams_dataAccessLatency_set) dataArrayBanks = _swig_property(_param_RubyCache.RubyCacheParams_dataArrayBanks_get, _param_RubyCache.RubyCacheParams_dataArrayBanks_set) is_icache = _swig_property(_param_RubyCache.RubyCacheParams_is_icache_get, _param_RubyCache.RubyCacheParams_is_icache_set) replacement_policy = _swig_property(_param_RubyCache.RubyCacheParams_replacement_policy_get, _param_RubyCache.RubyCacheParams_replacement_policy_set) resourceStalls = _swig_property(_param_RubyCache.RubyCacheParams_resourceStalls_get, _param_RubyCache.RubyCacheParams_resourceStalls_set) ruby_system = _swig_property(_param_RubyCache.RubyCacheParams_ruby_system_get, _param_RubyCache.RubyCacheParams_ruby_system_set) size = _swig_property(_param_RubyCache.RubyCacheParams_size_get, _param_RubyCache.RubyCacheParams_size_set) start_index_bit = _swig_property(_param_RubyCache.RubyCacheParams_start_index_bit_get, _param_RubyCache.RubyCacheParams_start_index_bit_set) tagAccessLatency = _swig_property(_param_RubyCache.RubyCacheParams_tagAccessLatency_get, _param_RubyCache.RubyCacheParams_tagAccessLatency_set) tagArrayBanks = _swig_property(_param_RubyCache.RubyCacheParams_tagArrayBanks_get, _param_RubyCache.RubyCacheParams_tagArrayBanks_set) def __init__(self): this = _param_RubyCache.new_RubyCacheParams() try: self.this.append(this) except: self.this = this __swig_destroy__ = _param_RubyCache.delete_RubyCacheParams __del__ = lambda self : None; RubyCacheParams_swigregister = _param_RubyCache.RubyCacheParams_swigregister RubyCacheParams_swigregister(RubyCacheParams)
43.537879
153
0.768227
696
5,747
5.902299
0.227011
0.119279
0.183544
0.061344
0.327167
0.169912
0.127556
0.120253
0.082278
0.064265
0
0.00636
0.151905
5,747
131
154
43.870229
0.836479
0.040021
0
0.174312
1
0
0.042855
0
0
0
0
0
0
1
0.091743
false
0.018349
0.229358
0.018349
0.568807
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
e183a7415b7112304f8241876902f1dcf0f31a54
1,559
py
Python
fixture/signup.py
Sorail/python_training_mantis
45e1843c4cf7a2585c12d945e5c9bd305d7de20c
[ "MIT" ]
null
null
null
fixture/signup.py
Sorail/python_training_mantis
45e1843c4cf7a2585c12d945e5c9bd305d7de20c
[ "MIT" ]
null
null
null
fixture/signup.py
Sorail/python_training_mantis
45e1843c4cf7a2585c12d945e5c9bd305d7de20c
[ "MIT" ]
null
null
null
import re import quopri class SignupHelper: def __init__(self, app): self.app = app def new_user(self, username, email, password): wd = self.app.wd wd.get(self.app.config['web']['baseUrl'] + "/signup_page.php") wd.find_element_by_name("username").click() wd.find_element_by_name("username").clear() wd.find_element_by_name("username").send_keys(username) wd.find_element_by_name("email").click() wd.find_element_by_name("email").clear() wd.find_element_by_name("email").send_keys(email) wd.find_element_by_xpath("//input[@type='submit']").click() mail = self.app.mail.get_mail(username, password, '[MantisBT] Account registration') url = self.extract_confirmation_url(mail) wd.get(url) wd.find_element_by_name("realname").click() wd.find_element_by_name("realname").clear() wd.find_element_by_name("realname").send_keys(username) wd.find_element_by_name("password").click() wd.find_element_by_name("password").clear() wd.find_element_by_name("password").send_keys(password) wd.find_element_by_name("password_confirm").click() wd.find_element_by_name("password_confirm").clear() wd.find_element_by_name("password_confirm").send_keys(password) wd.find_element_by_xpath("//button[@type='submit']").click() def extract_confirmation_url(self, text): body = quopri.decodestring(text).decode('utf-8') return re.search("http://.*", body).group(0)
38.975
92
0.670943
210
1,559
4.647619
0.261905
0.104508
0.226434
0.26127
0.585041
0.559426
0.300205
0.071721
0
0
0
0.001564
0.179602
1,559
39
93
39.974359
0.761532
0
0
0
0
0
0.162284
0.030148
0
0
0
0
0
1
0.096774
false
0.258065
0.064516
0
0.225806
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
3
e184ddb7a473b6b8f017e54ede29da58c774a841
727
py
Python
Python3/0036-Valid-Sudoku/soln-2.py
wyaadarsh/LeetCode-Solutions
3719f5cb059eefd66b83eb8ae990652f4b7fd124
[ "MIT" ]
5
2020-07-24T17:48:59.000Z
2020-12-21T05:56:00.000Z
Python3/0036-Valid-Sudoku/soln-2.py
zhangyaqi1989/LeetCode-Solutions
2655a1ffc8678ad1de6c24295071308a18c5dc6e
[ "MIT" ]
null
null
null
Python3/0036-Valid-Sudoku/soln-2.py
zhangyaqi1989/LeetCode-Solutions
2655a1ffc8678ad1de6c24295071308a18c5dc6e
[ "MIT" ]
2
2020-07-24T17:49:01.000Z
2020-08-31T19:57:35.000Z
class Solution: def isValidSudoku(self, board): """ :type board: List[List[str]] :rtype: bool """ def valid(sub): nums = [item for item in sub if item.isdigit()] return len(set(nums)) == len(nums) def check_row(): return all(valid(row) for row in board) def check_col(): return all(valid(col) for col in zip(*board)) def check_sub(): return all(valid([board[r + dr][c + dc] for dr in range(3) for dc in range(3)]) for r, c in itertools.product((0, 3, 6), repeat=2)) return check_row() and check_col() and check_sub()
34.619048
75
0.489684
95
727
3.684211
0.421053
0.068571
0.12
0.062857
0
0
0
0
0
0
0
0.013514
0.389271
727
21
76
34.619048
0.774775
0.056396
0
0
0
0
0
0
0
0
0
0
0
1
0.357143
false
0
0
0.214286
0.785714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
e18907b6c62867758856da35f47ec15db42e5c92
1,233
py
Python
src/python/zquantum/core/circuits/symbolic/translations.py
alexjuda2/z-quantum-core
c258100dbd091f0b22495b77b36399426ae9abac
[ "Apache-2.0" ]
24
2020-04-15T17:36:59.000Z
2022-01-25T05:02:14.000Z
src/python/zquantum/core/circuits/symbolic/translations.py
alexjuda2/z-quantum-core
c258100dbd091f0b22495b77b36399426ae9abac
[ "Apache-2.0" ]
177
2020-04-23T15:19:59.000Z
2022-03-30T18:06:17.000Z
src/python/zquantum/core/circuits/symbolic/translations.py
alexjuda2/z-quantum-core
c258100dbd091f0b22495b77b36399426ae9abac
[ "Apache-2.0" ]
19
2020-06-24T10:56:02.000Z
2021-09-30T13:02:21.000Z
"""Utilities related to translation of symbolic expressions.""" from functools import singledispatch from numbers import Number from typing import Iterable, Tuple, Union from .expressions import Expression, ExpressionDialect, FunctionCall, Symbol @singledispatch def translate_expression( expression: Union[Expression, Tuple[Expression, ...]], dialect: ExpressionDialect ): pass @translate_expression.register def translate_number(number: Number, dialect: ExpressionDialect): return dialect.number_factory(number) @translate_expression.register def translate_symbol(symbol: Symbol, dialect: ExpressionDialect): return dialect.symbol_factory(symbol) @translate_expression.register def translate_function_call(function_call: FunctionCall, dialect: ExpressionDialect): if function_call.name not in dialect.known_functions: raise ValueError(f"Function {function_call.name} is unknown in this dialect.") return dialect.known_functions[function_call.name]( *translate_tuple(function_call.args, dialect) ) def translate_tuple(expression_tuple: Iterable[Expression], dialect: ExpressionDialect): return tuple(translate_expression(element, dialect) for element in expression_tuple)
32.447368
88
0.801298
136
1,233
7.102941
0.330882
0.074534
0.083851
0.093168
0.121118
0
0
0
0
0
0
0
0.122466
1,233
37
89
33.324324
0.892791
0.046229
0
0.125
0
0
0.048718
0
0
0
0
0
0
1
0.208333
false
0.041667
0.166667
0.125
0.541667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
e19378cb98560578fdc69fe5ee4a62131dacdd8e
751
py
Python
simple_hash_list.py
ftsmchl/hash_pointers_example
9219cc1e4a10286003f0ca2248f931045dc3a72b
[ "MIT" ]
null
null
null
simple_hash_list.py
ftsmchl/hash_pointers_example
9219cc1e4a10286003f0ca2248f931045dc3a72b
[ "MIT" ]
null
null
null
simple_hash_list.py
ftsmchl/hash_pointers_example
9219cc1e4a10286003f0ca2248f931045dc3a72b
[ "MIT" ]
null
null
null
#!/usr/bin/python import hashlib class ListNode: def __init__(self, name): self.list = [] self.name = name def __str__(self): return str(map(str, self.list)) def __repr__(self): return str(self) def __len__(self): return len(self.list) def _hash(self, s): return hashlib.sha256(s).hexdigest() def append(self, msg): if len(self) == 0: p = "" else: p = self._hash(str(self.list[-1])) n = Node(msg, p) self.list.append(n) class Node: def __init__(self, msg, prev): self.msg = str(msg) self.prev = prev def __str__(self): return "(%s, %s)" % (self.msg, self.prev)
19.25641
49
0.51265
96
751
3.739583
0.322917
0.111421
0.061281
0.089136
0
0
0
0
0
0
0
0.010204
0.347537
751
38
50
19.763158
0.722449
0.021305
0
0.076923
0
0
0.010899
0
0
0
0
0
0
1
0.307692
false
0
0.038462
0.192308
0.615385
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
e1a595bfee0149e3eeae133eef740b48588517fd
939
py
Python
Examples/EQ/compare_chebyshev.py
apohl79/AudioTK
05ac241b0bc6a8f841d93257b4d81e5961b1f627
[ "BSD-3-Clause" ]
10
2018-05-17T15:29:05.000Z
2021-12-19T22:26:08.000Z
Examples/EQ/compare_chebyshev.py
apohl79/AudioTK
05ac241b0bc6a8f841d93257b4d81e5961b1f627
[ "BSD-3-Clause" ]
null
null
null
Examples/EQ/compare_chebyshev.py
apohl79/AudioTK
05ac241b0bc6a8f841d93257b4d81e5961b1f627
[ "BSD-3-Clause" ]
2
2020-04-21T13:43:57.000Z
2020-04-28T19:10:14.000Z
#!/usr/bin/env python from scipy import signal import numpy as np import matplotlib.pyplot as plt b, a = signal.cheby2(5, 3, (200./24000, 1000./24000), btype="bandstop") print b, a myb = (0.970480608569324, -9.674889407346342, 43.43258097823774, -115.6217271672294, 202.12989516929048, -242.47268036304317, 202.12989516929048, -115.62172716722938, 43.43258097823773, -9.674889407346342, 0.9704806085693239) mya = (-0.9418326116090838, 9.445529769182228, -42.657169682139696, 114.23899007770811, -200.91212825428292, 242.46055315510847, -203.33552044319293, 117.01137758064448, -44.21926252097025, 9.909462929551104, -1 )[::-1] origbutter = signal.freqz(b, a) mybutter = signal.freqz(myb, mya) fig = plt.figure() plt.title('Digital filter frequency response') ax1 = fig.add_subplot(111) plt.loglog(origbutter[0] / np.pi * 24000, np.abs(origbutter[1]), 'b') plt.loglog(mybutter[0] / np.pi * 24000, np.abs(mybutter[1]), 'g') plt.show()
40.826087
225
0.745474
128
939
5.460938
0.601563
0.008584
0.014306
0.028612
0.042918
0.042918
0
0
0
0
0
0.457547
0.096912
939
22
226
42.681818
0.366745
0.021299
0
0
0
0
0.046841
0
0
0
0
0
0
0
null
null
0
0.2
null
null
0.066667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3
e1b7ce140799842f0e9876b00a4e50608d6cc803
410
py
Python
alphatwirl/nanoaod/Component.py
benkrikler/alphatwirl
cda7d12fec21291ea33af23234fc08be19430934
[ "BSD-3-Clause" ]
null
null
null
alphatwirl/nanoaod/Component.py
benkrikler/alphatwirl
cda7d12fec21291ea33af23234fc08be19430934
[ "BSD-3-Clause" ]
7
2018-02-26T10:32:26.000Z
2018-03-19T12:27:12.000Z
alphatwirl/nanoaod/Component.py
benkrikler/alphatwirl
cda7d12fec21291ea33af23234fc08be19430934
[ "BSD-3-Clause" ]
null
null
null
##__________________________________________________________________|| import collections ##__________________________________________________________________|| # NamedTuple for each row in the component dataframe Component = collections.namedtuple( 'Component', 'name eventtype dataset era nevents nfiles cross_section files' ) ##__________________________________________________________________||
34.166667
70
0.856098
23
410
6.608696
0.826087
0.276316
0
0
0
0
0
0
0
0
0
0
0.090244
410
11
71
37.272727
0.407507
0.621951
0
0
0
0
0.472973
0
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
e1d0d74e64f4061117fcd3bf365823e248f9e74a
1,828
py
Python
blueapps/middleware/xss/decorators.py
jin-cc/bastion-test
9feecbe927e5446213ab25b4da4a5eca23cf6bae
[ "Apache-2.0" ]
42
2021-06-16T12:06:03.000Z
2022-03-29T13:18:00.000Z
blueapps/middleware/xss/decorators.py
jin-cc/bastion-test
9feecbe927e5446213ab25b4da4a5eca23cf6bae
[ "Apache-2.0" ]
2
2021-02-08T20:50:31.000Z
2021-06-10T23:01:54.000Z
blueapps/middleware/xss/decorators.py
wangzishuo111/bk_prometheus
c6aa16d8a547a3d00fbca317f6846ad35b1297ea
[ "MIT" ]
16
2021-07-13T01:17:57.000Z
2022-03-01T12:39:32.000Z
# -*- coding: utf-8 -*- from django.utils.decorators import available_attrs try: from functools import wraps except ImportError: from django.utils.functional import wraps # Python 2.4 fallback. # =============================================================================== # 转义装饰器 # =============================================================================== def escape_exempt(view_func): """ 转义豁免,被此装饰器修饰的action可以不进行中间件escape """ def wrapped_view(*args, **kwargs): return view_func(*args, **kwargs) wrapped_view.escape_exempt = True return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view) def escape_script(view_func): """ 被此装饰器修饰的action会对GET与POST参数进行javascript escape """ def wrapped_view(*args, **kwargs): return view_func(*args, **kwargs) wrapped_view.escape_script = True return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view) def escape_url(view_func): """ 被此装饰器修饰的action会对GET与POST参数进行url escape """ def wrapped_view(*args, **kwargs): return view_func(*args, **kwargs) wrapped_view.escape_url = True return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view) def escape_exempt_param(*param_list, **param_list_dict): """ 此装饰器用来豁免某个view函数的某个参数 @param param_list: 参数列表 @return: """ def _escape_exempt_param(view_func): def wrapped_view(*args, **kwargs): return view_func(*args, **kwargs) if param_list_dict.get('param_list'): wrapped_view.escape_exempt_param = param_list_dict['param_list'] else: wrapped_view.escape_exempt_param = list(param_list) return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view) return _escape_exempt_param
30.983051
82
0.63895
199
1,828
5.552764
0.231156
0.115837
0.076923
0.065158
0.560181
0.483258
0.483258
0.483258
0.483258
0.483258
0
0.001997
0.178337
1,828
58
83
31.517241
0.733688
0.209519
0
0.4
0
0
0.014577
0
0
0
0
0
0
1
0.3
false
0
0.133333
0.133333
0.733333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
bed49e65c6596e2759173f0239db2f5b671a5efc
202
py
Python
herd/herd.py
AmirRanos/TripleSeven-Cogs
2b7a37b59fb8e82c85b9d8d44aadc8efcf2bfe8f
[ "MIT" ]
null
null
null
herd/herd.py
AmirRanos/TripleSeven-Cogs
2b7a37b59fb8e82c85b9d8d44aadc8efcf2bfe8f
[ "MIT" ]
null
null
null
herd/herd.py
AmirRanos/TripleSeven-Cogs
2b7a37b59fb8e82c85b9d8d44aadc8efcf2bfe8f
[ "MIT" ]
null
null
null
from redbot.core import commands class Herd(commands.Cog): """Move people to a different channel.""" @commands.command() async def herd(self, ctx): await ctx.send("Hello world.")
20.2
45
0.658416
27
202
4.925926
0.851852
0
0
0
0
0
0
0
0
0
0
0
0.212871
202
9
46
22.444444
0.836478
0.173267
0
0
0
0
0.074534
0
0
0
0
0
0
1
0
true
0
0.2
0
0.4
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
beeb389094030d0a585c42bc0a8ebeb619d5473d
61
py
Python
aioclustermanager/tests/conftest.py
sunbit/aioclustermanager
f5a2f4ba7936a75c7748cff9f77c3bfff1a3a61d
[ "BSD-3-Clause" ]
1
2020-03-24T16:15:56.000Z
2020-03-24T16:15:56.000Z
aioclustermanager/tests/conftest.py
bloodbare/aioclustermanager
9abe7e9db7140854709c8044128e0153debe6971
[ "BSD-3-Clause" ]
8
2018-03-12T20:40:23.000Z
2018-06-05T18:35:16.000Z
aioclustermanager/tests/conftest.py
onna/aioclustermanager
9abe7e9db7140854709c8044128e0153debe6971
[ "BSD-3-Clause" ]
2
2020-05-21T17:32:23.000Z
2021-05-11T12:17:56.000Z
pytest_plugins = [ 'aioclustermanager.tests.fixtures' ]
12.2
38
0.721311
5
61
8.6
1
0
0
0
0
0
0
0
0
0
0
0
0.163934
61
4
39
15.25
0.843137
0
0
0
0
0
0.533333
0.533333
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3