hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
08c3c73fb071c563aa6c6cb9106af9a4e78d2bdf
| 1,416
|
py
|
Python
|
bookmarks/bookmarks/models.py
|
tom-henderson/bookmarks
|
5515bedf1008da3e97caf0ed5867bcf983b375b1
|
[
"MIT"
] | 6
|
2017-01-09T22:59:31.000Z
|
2022-01-06T01:40:57.000Z
|
bookmarks/bookmarks/models.py
|
tom-henderson/bookmarks
|
5515bedf1008da3e97caf0ed5867bcf983b375b1
|
[
"MIT"
] | 30
|
2016-09-13T07:30:26.000Z
|
2022-02-07T22:49:03.000Z
|
bookmarks/bookmarks/models.py
|
tom-henderson/bookmarks
|
5515bedf1008da3e97caf0ed5867bcf983b375b1
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
from django.dispatch import receiver
from django.conf import settings
from taggit.managers import TaggableManager
import requests
class Bookmark(models.Model):
title = models.CharField(max_length=200, blank=True, null=True)
description = models.TextField(blank=True, null=True)
date_added = models.DateTimeField(default=timezone.now, blank=True)
tags = TaggableManager(blank=True)
private = models.BooleanField(default=False)
url = models.URLField(max_length=500)
def __unicode__(self):
return "{}: {} [{}]".format(
self.pk,
self.title[:40],
self.date_added
)
@receiver(models.signals.post_save, sender=Bookmark)
def bookmark_pre_save_handler(sender, instance, created, *args, **kwargs):
# Only run for new items, not updates
if created:
if not hasattr(settings, 'SLACK_WEBHOOK_URL'):
return
payload = {
'channel': "#bookmarks-dev",
'username': "Bookmarks",
'text': "<{}|{}>\n{}".format(
instance.url,
instance.title,
instance.description,
),
'icon_emoji': ":blue_book:",
'unfurl_links': True
}
requests.post(settings.SLACK_WEBHOOK_URL, json=payload)
| 28.897959
| 74
| 0.631356
| 154
| 1,416
| 5.649351
| 0.545455
| 0.045977
| 0.029885
| 0.03908
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007612
| 0.257768
| 1,416
| 48
| 75
| 29.5
| 0.820171
| 0.024718
| 0
| 0
| 0
| 0
| 0.082669
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.189189
| 0.027027
| 0.486486
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08c3ea3ed3c0d6241f479fa852ed05c431f46706
| 797
|
py
|
Python
|
vernam cipher.py
|
BenMiller3/Vernam-Cipher
|
19f7a447bc8080c8e275b96a85d359f4e187a4d3
|
[
"MIT"
] | null | null | null |
vernam cipher.py
|
BenMiller3/Vernam-Cipher
|
19f7a447bc8080c8e275b96a85d359f4e187a4d3
|
[
"MIT"
] | null | null | null |
vernam cipher.py
|
BenMiller3/Vernam-Cipher
|
19f7a447bc8080c8e275b96a85d359f4e187a4d3
|
[
"MIT"
] | null | null | null |
"""
Vernam Cipher
Benjamin D. Miller
Takes a key, and a message
Encripts the message using the key
"""
def vernam(key,message):
message = str(message)
m = message.upper().replace(" ","") # Convert to upper case, remove whitespace
encrypt = ""
try:
key = int(key) # if the key value is not a number, then run with key = 0
except ValueError:
key = 0
for i in range(len(m)):
letter = ord(m[i])-65 # Letters now range 0-25
letter = (letter + key)%25 # Alphanumeric + key mod 25 = 0-25
letter +=65
encrypt = encrypt + chr(letter) # Concatenate message
return encrypt
""" * TEST CASES * """
vernam(9,"hello world")
vernam(14,"TEST_CASE 34!")
vernam("test","test")
| 27.482759
| 91
| 0.567127
| 106
| 797
| 4.254717
| 0.566038
| 0.026608
| 0.039911
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038321
| 0.312422
| 797
| 28
| 92
| 28.464286
| 0.784672
| 0.336261
| 0
| 0
| 0
| 0
| 0.070664
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08c47e02acc3cf4c516e8edc1336ab1be1430cd8
| 421
|
py
|
Python
|
utils.py
|
c0derabbit/talk
|
26673fde934ef51e76002ea6ddc65bdb42720865
|
[
"MIT"
] | null | null | null |
utils.py
|
c0derabbit/talk
|
26673fde934ef51e76002ea6ddc65bdb42720865
|
[
"MIT"
] | 1
|
2017-05-25T20:37:54.000Z
|
2017-05-26T07:33:00.000Z
|
utils.py
|
c0derabbit/talk
|
26673fde934ef51e76002ea6ddc65bdb42720865
|
[
"MIT"
] | null | null | null |
from datetime import datetime as d
def stringify_date(date):
try:
return '{0}-{1}-{2}-{3}-{4}'.format(date.year, date.month, date.day, date.hour, date.minute)
except ValueError:
raise ValueError('Invalid date format', date)
def parse_date(date):
try:
return d.strptime(date, '%Y-%m-%d-%H-%M')
except ValueError:
raise ValueError('Could not convert string to date', date)
| 30.071429
| 100
| 0.64133
| 61
| 421
| 4.393443
| 0.57377
| 0.089552
| 0.08209
| 0.126866
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015015
| 0.209026
| 421
| 13
| 101
| 32.384615
| 0.78979
| 0
| 0
| 0.363636
| 0
| 0
| 0.199525
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.090909
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08c693a49ad9f776684155a7c2f26843f0a00070
| 3,694
|
py
|
Python
|
fineract/objects/org.py
|
mobidevke/py-fineract
|
712b0c20686accd7d7e0a2356ccaf59c5fe4f7dd
|
[
"Apache-2.0"
] | 7
|
2019-03-11T16:17:33.000Z
|
2020-10-22T21:57:51.000Z
|
fineract/objects/org.py
|
mobidevke/py-fineract
|
712b0c20686accd7d7e0a2356ccaf59c5fe4f7dd
|
[
"Apache-2.0"
] | 3
|
2019-11-05T20:22:16.000Z
|
2019-12-11T17:09:04.000Z
|
fineract/objects/org.py
|
mobidevke/py-fineract
|
712b0c20686accd7d7e0a2356ccaf59c5fe4f7dd
|
[
"Apache-2.0"
] | 2
|
2020-11-19T16:00:36.000Z
|
2021-11-19T09:36:13.000Z
|
from fineract.objects.currency import Currency
from fineract.objects.fineract_object import FineractObject
from fineract.objects.types import ChargeTimeType, ChargeAppliesTo, ChargeCalculationType, ChargePaymentMode
class Office(FineractObject):
"""
This class represent an Office
"""
def _init_attributes(self):
self.id = None
self.name = None
self.name_decorated = None
self.external_id = None
self.opening_date = None
self.hierarchy = None
def _use_attributes(self, attributes):
self.id = attributes.get('id', None)
self.name = attributes.get('name', None)
self.name_decorated = attributes.get('nameDecorated', None)
self.external_id = attributes.get('externalId', None)
self.opening_date = self._make_date_object(attributes.get('openingDate', None))
self.hierarchy = attributes.get('hierarchy', None)
class Staff(FineractObject):
"""
This class represents a Staff
"""
def _init_attributes(self):
self.id = None
self.firstname = None
self.lastname = None
self.display_name = None
self.office_id = None
self.office_name = None
self.is_loan_officer = None
self.external_id = None
self.is_active = None
self.join_date = None
def _use_attributes(self, attributes):
self.id = attributes.get('id', None)
self.firstname = attributes.get('firstname', None)
self.lastname = attributes.get('lastname', None)
self.display_name = attributes.get('displayName', None)
self.office_id = attributes.get('officeId', None)
self.office_name = attributes.get('officeName', None)
self.is_loan_officer = attributes.get('isLoanOfficer', None)
self.is_active = attributes.get('externalId', None)
self.join_date = self._make_date_object(attributes.get('joiningDate', None))
class Fund(FineractObject):
"""
This class represents a Fund
"""
def _init_attributes(self):
self.id = None
self.name = None
def _use_attributes(self, attributes):
self.id = attributes.get('id', None)
self.name = attributes.get('name', None)
class Charge(FineractObject):
"""
This class represents a Charge
"""
def _init_attributes(self):
self.id = None
self.name = None
self.active = None
self.penalty = None
self.currency = None
self.amount = None
self.charge_time_type = None
self.charge_applies_to = None
self.charge_calculation_type = None
self.charge_payment_mode = None
def _use_attributes(self, attributes):
self.id = attributes.get('id', None)
self.name = attributes.get('name', None)
self.active = attributes.get('active', None)
self.penalty = attributes.get('penalty', None)
self.currency = self._make_fineract_object(Currency, attributes.get('currency', None))
self.amount = attributes.get('amount', None)
self.charge_time_type = self._make_fineract_object(ChargeTimeType, attributes.get('chargeTimeType', None))
self.charge_applies_to = self._make_fineract_object(ChargeAppliesTo, attributes.get('chargeAppliesTo', None))
self.charge_calculation_type = self._make_fineract_object(ChargeCalculationType,
attributes.get('chargeCalculationType', None))
self.charge_payment_mode = self._make_fineract_object(ChargePaymentMode,
attributes.get('chargePaymentMode', None))
| 36.574257
| 117
| 0.647266
| 406
| 3,694
| 5.704434
| 0.152709
| 0.162349
| 0.047496
| 0.036269
| 0.503022
| 0.294041
| 0.247409
| 0.217185
| 0.202073
| 0.202073
| 0
| 0
| 0.252301
| 3,694
| 100
| 118
| 36.94
| 0.838523
| 0.032485
| 0
| 0.333333
| 0
| 0
| 0.067464
| 0.005978
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.041667
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08c6e61cafacb0416494f10178b2d50c3d4b7ef8
| 1,736
|
py
|
Python
|
Heap/PathWithMinEffort.py
|
karan2808/Python-Data-Structures-and-Algorithms
|
a4b39ddf7297541d90dc4efcaab883f928281abd
|
[
"MIT"
] | 2
|
2021-01-31T03:42:01.000Z
|
2021-01-31T03:43:08.000Z
|
Heap/PathWithMinEffort.py
|
karan2808/Python-Data-Structures-and-Algorithms
|
a4b39ddf7297541d90dc4efcaab883f928281abd
|
[
"MIT"
] | null | null | null |
Heap/PathWithMinEffort.py
|
karan2808/Python-Data-Structures-and-Algorithms
|
a4b39ddf7297541d90dc4efcaab883f928281abd
|
[
"MIT"
] | 1
|
2021-01-31T03:42:02.000Z
|
2021-01-31T03:42:02.000Z
|
from heapq import heapify, heappop, heappush
class Solution:
def minimumEffortPath(self, heights):
# get the max rows and cols
m, n = len(heights), len(heights[0])
# make a heap to store the current min cost, x, and y
heap = [(0, 0, 0)]
# keep track of current cost
currCost = 0
# keep track of the nodes you have visited
visited = set()
# make a directions array
directions = [[-1, 0], [1, 0], [0, 1], [0, -1]]
while heap:
# get the min cost val, x and y coordinate
k, x, y = heappop(heap)
# update the cost
currCost = max(currCost, k)
# if we reach the bottom right corner, return the cost
if (x, y) == (m -1, n - 1):
return currCost
# add current node to the visited set
visited.add((x, y))
# for each direction, find the new cost
for dir_ in directions:
xn = x + dir_[0]
yn = y + dir_[1]
# check boundary conditions and if the cell has been visited
if 0 <= xn <= m - 1 and 0 <= yn <= n - 1 and (xn, yn) not in visited:
# get new cost
newc = abs(heights[x][y] - heights[xn][yn])
# push the new x, y location and the new cost to min heap
heappush(heap, (newc, xn, yn))
# if no path, return -1
return -1
def main():
heights = [[1,2,2],[3,8,2],[5,3,5]]
mySol = Solution()
print("The min cost path for the grid heights = [[1,2,2],[3,8,2],[5,3,5]] is " + str(mySol.minimumEffortPath(heights)))
if __name__ == "__main__":
main()
| 36.166667
| 123
| 0.506336
| 246
| 1,736
| 3.528455
| 0.369919
| 0.011521
| 0.011521
| 0.02765
| 0.036866
| 0.036866
| 0.036866
| 0.036866
| 0.036866
| 0.036866
| 0
| 0.03814
| 0.38076
| 1,736
| 48
| 124
| 36.166667
| 0.769302
| 0.289747
| 0
| 0
| 0
| 0.037037
| 0.063987
| 0.020509
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.037037
| 0
| 0.222222
| 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08c9e9c176a984ea5d15821ab3616cd2313fc432
| 1,427
|
wsgi
|
Python
|
vagrant/catalog/StuffMart.wsgi
|
cpwhidden/StuffMart
|
a192b8cad8942d0bfddb3af861f1e48c460e28cf
|
[
"MIT"
] | null | null | null |
vagrant/catalog/StuffMart.wsgi
|
cpwhidden/StuffMart
|
a192b8cad8942d0bfddb3af861f1e48c460e28cf
|
[
"MIT"
] | null | null | null |
vagrant/catalog/StuffMart.wsgi
|
cpwhidden/StuffMart
|
a192b8cad8942d0bfddb3af861f1e48c460e28cf
|
[
"MIT"
] | null | null | null |
activate_this = '/var/www/html/venv/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
import sys, os, logging
from flask_apscheduler import APScheduler
sys.path.insert(0, 'var/www/html/StuffMart/vagrant/catalog')
logging.basicConfig(stream=sys.stderr)
from server import flask as application
application.secret_key = 'qPHE[Cht}*kSCVango3i'
application.config['APP_DIR'] = os.path.abspath(os.path.dirname(__file__))
application.config['WHOOSH_BASE'] = 'server/whoosh'
application.config['PRODUCT_IMAGES_FOLDER'] = 'vagrant/catalog/server/static/product_images/'
application.config['JOBS'] = [
{
'id': 'buildNewlyAddedRSSFeed',
'func': 'server.views:buildNewlyAddedRSSFeed',
'trigger': 'interval',
'seconds': (60*60)
},
{
'id': 'buildNewlyAddedAtomFeed',
'func': 'server.views:buildNewlyAddedAtomFeed',
'trigger': 'interval',
'seconds': (60*60)
},
{
'id': 'buildNewlyAddedRSSFeedAtStartup',
'func': 'server.views:buildNewlyAddedRSSFeed'
},
{
'id': 'buildNewlyAddedAtomFeedAtStartup',
'func': 'server.views:buildNewlyAddedAtomFeed'
}
]
application.config['SCHEDULER_VIEWS_ENABLED'] = True
application.debug = True
scheduler = APScheduler()
scheduler.init_app(application)
scheduler.start()
| 34.804878
| 93
| 0.658725
| 135
| 1,427
| 6.8
| 0.474074
| 0.092593
| 0.065359
| 0.08061
| 0.061002
| 0.061002
| 0
| 0
| 0
| 0
| 0
| 0.008834
| 0.206727
| 1,427
| 41
| 94
| 34.804878
| 0.80212
| 0
| 0
| 0.105263
| 0
| 0
| 0.377451
| 0.291317
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.078947
| 0
| 0.078947
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08ceeff12c2a6ee62212a18498cd6880997296e3
| 1,759
|
py
|
Python
|
application/routes.py
|
N-A-Podgornov/CFT-MLC
|
ded9267c5b8053a15bdcc67be9f83097749cfb13
|
[
"Apache-2.0"
] | null | null | null |
application/routes.py
|
N-A-Podgornov/CFT-MLC
|
ded9267c5b8053a15bdcc67be9f83097749cfb13
|
[
"Apache-2.0"
] | null | null | null |
application/routes.py
|
N-A-Podgornov/CFT-MLC
|
ded9267c5b8053a15bdcc67be9f83097749cfb13
|
[
"Apache-2.0"
] | null | null | null |
import os
import shutil
from flask import render_template, redirect, url_for, request
from werkzeug.utils import secure_filename
from config import Config
from application import app
from application.model import Model
@app.route('/')
def index():
return redirect(url_for('submit'))
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in Config.ALLOWED_EXTENSIONS
def file_system_preparation():
try:
shutil.rmtree(path=Config.UPLOAD_FOLDER)
shutil.rmtree(path=Config.PATH_TO_SPECTROGRAM_FOLDER + Config.SPECTROGRAM_FOLDER)
except OSError:
print("error :: failed to clean file system")
try:
os.mkdir(path=Config.UPLOAD_FOLDER)
os.mkdir(path=Config.PATH_TO_SPECTROGRAM_FOLDER + Config.SPECTROGRAM_FOLDER)
except OSError:
print("error :: failed to prepare file system")
@app.route('/submit', methods=['GET', 'POST'])
def submit():
file_system_preparation()
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect(url_for('response', filename=filename))
return render_template('submit.html')
@app.route('/<filename>', methods=['GET'])
def response(filename):
in_fn, fn_ex = os.path.splitext(filename)
out_fn_w = os.path.join(Config.PATH_TO_SPECTROGRAM_FOLDER + Config.SPECTROGRAM_FOLDER, in_fn + ".png")
out_fn_r = os.path.join(Config.SPECTROGRAM_FOLDER, in_fn + ".png")
Model(filename).get_spectrogram().savefig(out_fn_w)
return render_template('response.html', spectrogram=out_fn_r)
| 30.859649
| 106
| 0.704377
| 230
| 1,759
| 5.195652
| 0.286957
| 0.099582
| 0.076987
| 0.057741
| 0.220084
| 0.220084
| 0.189121
| 0.189121
| 0.145607
| 0.145607
| 0
| 0.001379
| 0.175668
| 1,759
| 56
| 107
| 31.410714
| 0.822759
| 0
| 0
| 0.097561
| 0
| 0
| 0.097783
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121951
| false
| 0
| 0.170732
| 0.04878
| 0.414634
| 0.04878
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08cfc63dc9bcf57b5303ab14c053f28fd612cafc
| 4,095
|
py
|
Python
|
tests/test_onnxml_imputer_converter.py
|
vumichien/hummingbird
|
8981e11ce2536167c329a5d9d20e81125a792fe4
|
[
"MIT"
] | 2,772
|
2020-05-04T21:03:40.000Z
|
2022-03-30T11:00:03.000Z
|
tests/test_onnxml_imputer_converter.py
|
vumichien/hummingbird
|
8981e11ce2536167c329a5d9d20e81125a792fe4
|
[
"MIT"
] | 486
|
2020-05-05T00:45:44.000Z
|
2022-03-15T01:02:31.000Z
|
tests/test_onnxml_imputer_converter.py
|
vumichien/hummingbird
|
8981e11ce2536167c329a5d9d20e81125a792fe4
|
[
"MIT"
] | 232
|
2019-11-02T22:06:38.000Z
|
2022-03-25T07:36:17.000Z
|
"""
Tests onnxml Imputer converter
"""
import unittest
import warnings
import numpy as np
import torch
from sklearn.impute import SimpleImputer
from hummingbird.ml._utils import onnx_ml_tools_installed, onnx_runtime_installed, lightgbm_installed
from hummingbird.ml import convert
if onnx_runtime_installed():
import onnxruntime as ort
if onnx_ml_tools_installed():
from onnxmltools import convert_sklearn
from onnxmltools.convert.common.data_types import FloatTensorType as FloatTensorType_onnx
class TestONNXImputer(unittest.TestCase):
def _test_imputer_converter(self, model, mode="onnx"):
warnings.filterwarnings("ignore")
X = np.array([[1, 2], [np.nan, 3], [7, 6]], dtype=np.float32)
model.fit(X)
# Create ONNX-ML model
onnx_ml_model = convert_sklearn(model, initial_types=[("float_input", FloatTensorType_onnx(X.shape))])
# Get the predictions for the ONNX-ML model
session = ort.InferenceSession(onnx_ml_model.SerializeToString())
output_names = [session.get_outputs()[i].name for i in range(len(session.get_outputs()))]
inputs = {session.get_inputs()[0].name: X}
onnx_ml_pred = session.run(output_names, inputs)[0]
# Create test model by calling converter
model = convert(onnx_ml_model, mode, X)
# Get the predictions for the test model
pred = model.transform(X)
return onnx_ml_pred, pred
@unittest.skipIf(
not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason="ONNXML test requires ONNX, ORT and ONNXMLTOOLS"
)
def test_onnx_imputer_const(self, rtol=1e-06, atol=1e-06):
model = SimpleImputer(strategy="constant")
onnx_ml_pred, onnx_pred = self._test_imputer_converter(model)
# Check that predicted values match
np.testing.assert_allclose(onnx_ml_pred, onnx_pred, rtol=rtol, atol=atol)
@unittest.skipIf(
not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason="ONNXML test requires ONNX, ORT and ONNXMLTOOLS"
)
def test_onnx_imputer_const_nan0(self, rtol=1e-06, atol=1e-06):
model = SimpleImputer(strategy="constant", fill_value=0)
onnx_ml_pred, onnx_pred = self._test_imputer_converter(model)
# Check that predicted values match
np.testing.assert_allclose(onnx_ml_pred, onnx_pred, rtol=rtol, atol=atol)
@unittest.skipIf(
not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason="ONNXML test requires ONNX, ORT and ONNXMLTOOLS"
)
def test_onnx_imputer_mean(self, rtol=1e-06, atol=1e-06):
model = SimpleImputer(strategy="mean", fill_value="nan")
onnx_ml_pred, onnx_pred = self._test_imputer_converter(model)
# Check that predicted values match
np.testing.assert_allclose(onnx_ml_pred, onnx_pred, rtol=rtol, atol=atol)
@unittest.skipIf(
not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason="ONNXML test requires ONNX, ORT and ONNXMLTOOLS"
)
def test_onnx_imputer_converter_raises_rt(self):
warnings.filterwarnings("ignore")
model = SimpleImputer(strategy="mean", fill_value="nan")
X = np.array([[1, 2], [np.nan, 3], [7, 6]], dtype=np.float32)
model.fit(X)
# Create ONNX-ML model
onnx_ml_model = convert_sklearn(model, initial_types=[("float_input", FloatTensorType_onnx(X.shape))])
onnx_ml_model.graph.node[0].attribute[0].name = "".encode()
self.assertRaises(RuntimeError, convert, onnx_ml_model, "onnx", X)
@unittest.skipIf(
not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason="ONNXML test requires ONNX, ORT and ONNXMLTOOLS"
)
def test_onnx_imputer_torch(self, rtol=1e-06, atol=1e-06):
model = SimpleImputer(strategy="constant")
onnx_ml_pred, onnx_pred = self._test_imputer_converter(model, mode="torch")
# Check that predicted values match
np.testing.assert_allclose(onnx_ml_pred, onnx_pred, rtol=rtol, atol=atol)
if __name__ == "__main__":
unittest.main()
| 40.147059
| 125
| 0.70696
| 547
| 4,095
| 5.034735
| 0.20841
| 0.056645
| 0.036311
| 0.040668
| 0.631445
| 0.614742
| 0.614742
| 0.593682
| 0.593682
| 0.593682
| 0
| 0.013241
| 0.188523
| 4,095
| 101
| 126
| 40.544554
| 0.815528
| 0.080342
| 0
| 0.439394
| 0
| 0
| 0.086133
| 0
| 0
| 0
| 0
| 0
| 0.075758
| 1
| 0.090909
| false
| 0
| 0.151515
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08d1b0407331ee4e1921fc4b74a0794639337160
| 7,520
|
py
|
Python
|
rs_etl.py
|
jlauman/data_engineering_project_03
|
722c0f5226ed29c00d6b33e64da5982fe0be69e0
|
[
"MIT"
] | null | null | null |
rs_etl.py
|
jlauman/data_engineering_project_03
|
722c0f5226ed29c00d6b33e64da5982fe0be69e0
|
[
"MIT"
] | null | null | null |
rs_etl.py
|
jlauman/data_engineering_project_03
|
722c0f5226ed29c00d6b33e64da5982fe0be69e0
|
[
"MIT"
] | null | null | null |
import configparser, os, glob, csv, json, hashlib, time
import pandas as pd
import psycopg2
from pprint import pprint
from rs_sql_queries import staging_events_insert, staging_songs_insert
from rs_sql_queries import insert_table_queries
import boto3
from botocore import UNSIGNED
from botocore.config import Config
DEND_BUCKET='udacity-dend'
# global lookup table
NAME_TO_GENDER = {}
def load_gender_lookup():
"""Load lookup dictionary to find gender given a name.
"""
base_path = os.getcwd() + '/data/names'
for root, dirs, files in os.walk(base_path):
file_paths = glob.glob(os.path.join(root,'*.txt'))
for file_path in file_paths:
print('names: %s' % file_path)
with open(file_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
# pprint(row)
NAME_TO_GENDER[row[0]] = row[1]
# pprint(NAME_TO_GENDER)
True
def get_object_paths(s3, bucket, prefix):
"""List objects in S3 bucket with given prefix.
Uses paginator to ensure a complete list of object paths is returned.
"""
# r1 = s3.list_objects(Bucket=DEND_BUCKET, Prefix=prefix)
# r2 = list(map(lambda obj: obj['Key'], r1['Contents']))
# r3 = list(filter(lambda str: str.endswith('.json'), r2))
# s3 client does not need to be closed
object_paths = []
paginator = s3.get_paginator('list_objects')
pages = paginator.paginate(Bucket=bucket, Prefix=prefix)
for page in pages:
# print("len(page['Contents'])=" + str(len(page['Contents'])))
r1 = list(map(lambda obj: obj['Key'], page['Contents']))
r2 = list(filter(lambda str: str.endswith('.json'), r1))
object_paths.extend(r2)
print('%s/%s total object paths = %d' % (bucket, prefix, len(object_paths)))
time.sleep(2)
return object_paths
def load_staging_log_data(cur, conn):
"""Load song-play event records into s_songplay_event table.
"""
# import pdb; pdb.set_trace()
# load log_data (events) into s_event table
s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))
file_paths = get_object_paths(s3, DEND_BUCKET, 'log_data')
pprint(file_paths)
for file_path in file_paths:
sql = str(staging_events_insert)
print('log_data: %s' % file_path)
obj1 = s3.get_object(Bucket='udacity-dend', Key=file_path)
str1 = obj1['Body'].read().decode('utf-8').strip()
df = pd.read_json(str1, lines=True)
df = df[df.page == 'NextSong']
df['timestamp'] = pd.to_datetime(df['ts'], unit='ms')
df['year'] = df['timestamp'].dt.year
df['week'] = df['timestamp'].dt.weekofyear
df['month'] = df['timestamp'].dt.month
df['day'] = df['timestamp'].dt.day
df['hour'] = df['timestamp'].dt.hour
df['weekday'] = df['timestamp'].dt.weekday
# pprint(df)
for index, row in df.iterrows():
# create a sha256 hash for event's unique id
event_id = hashlib.sha256((str(row.userId) + ' ' + str(row.sessionId) + ' ' + row.timestamp.strftime('%Y%m%d%H%M') + ' ' + row.song).encode('utf-8')).hexdigest()
str1 = ("(" +
"'" + event_id + "', " +
"'" + row.artist.replace("'", "''") + "', " +
"'" + row.auth + "', " +
"'" + row.firstName.replace("'", "''") + "', " +
"" + str(row.itemInSession) + ", " +
"'" + row.lastName.replace("'", "''") + "', " +
"'" + NAME_TO_GENDER[row.firstName] + "', " +
"" + str(row.length) + ", " +
"'" + row.level + "', " +
"'" + row.location.replace("'", "''") + "', " +
"'" + row.method + "', " +
"'" + row.page + "', " +
"'" + str(row.registration) + "', " +
"'" + str(row.sessionId) + "', " +
"'" + row.song.replace("'", "''") + "', " +
"'" + str(row.status) + "', " +
"'" + row.timestamp.strftime('%Y-%m-%d %H') + "', " +
"" + str(row.year) + ", " +
"" + str(row.week) + ", " +
"" + str(row.month) + ", " +
"" + str(row.day) + ", " +
"" + str(row.hour) + ", " +
"" + str(row.weekday) + ", " +
"'" + row.userAgent.replace("'", "''") + "', " +
"'" + str(row.userId) + "'" +
"),\n")
sql += str1
sql = ''.join(sql).strip()[:-1] + ';'
# print(sql)
# import pdb; pdb.set_trace()
cur.execute(sql)
conn.commit()
def load_staging_song_data(cur, conn):
"""Load song records into s_song staging table.
"""
sql = str(staging_songs_insert)
s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))
file_paths = get_object_paths(s3, DEND_BUCKET, 'song_data')
pprint(file_paths)
for file_path in file_paths:
print('song_data: %s' % file_path)
obj1 = s3.get_object(Bucket='udacity-dend', Key=file_path)
str1 = obj1['Body'].read().decode('utf-8').strip()
data = json.loads(str1)
if data['year'] == 0: data['year'] = None
# fix link string...
if str(data['artist_location']).startswith('<a'): data['artist_location'] = None
# pprint(data)
str2 = ("(" +
"'" + data['artist_id'] + "', " +
"" + (str(data['artist_latitude']) if not data['artist_latitude'] == None else 'null') + ", " +
"'" + str(data['artist_location']).replace("'", "''") + "', " +
"" + (str(data['artist_longitude']) if not data['artist_longitude'] == None else 'null') + ", " +
"'" + str(data['artist_name']).replace("'", "''") + "', " +
"" + str(data['duration']) + ", " +
"" + str(data['num_songs']) + ", " +
"'" + data['song_id'] + "', " +
"'" + str(data['title']).replace("'", "''") + "', " +
"" + (str(data['year']) if not data['year'] == None else 'null') + "" +
"),\n")
sql += str2
# print(str2)
# batch inserts at 8k character threshold
if len(sql) > 8192:
print(' 8k insert...')
sql = ''.join(sql).strip()[:-1] + ';'
cur.execute(sql)
conn.commit()
sql = str(staging_songs_insert)
print('last insert...')
sql = ''.join(sql).strip()[:-1] + ';'
# print(sql)
# import pdb; pdb.set_trace()
cur.execute(sql)
conn.commit()
def load_staging_tables(cur, conn):
load_staging_song_data(cur, conn)
load_staging_log_data(cur, conn)
def insert_tables(cur, conn):
"""Populate staging, dimension and fact tables.
The fact table must be the last item in the query list.
"""
for query in insert_table_queries:
if query.strip() != "":
pprint(query)
cur.execute(query)
conn.commit()
def main():
"""Run Redshift ETL for staging, dimension and fact tables.
"""
config = configparser.ConfigParser()
config.read('rs_dwh.cfg')
conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['CLUSTER'].values()))
cur = conn.cursor()
load_gender_lookup()
load_staging_tables(cur, conn)
insert_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main()
| 38.367347
| 173
| 0.529255
| 873
| 7,520
| 4.412371
| 0.255441
| 0.021807
| 0.020249
| 0.010125
| 0.310748
| 0.247923
| 0.204309
| 0.153167
| 0.153167
| 0.153167
| 0
| 0.01155
| 0.28617
| 7,520
| 196
| 174
| 38.367347
| 0.706036
| 0.140293
| 0
| 0.179856
| 0
| 0
| 0.116912
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05036
| false
| 0.007194
| 0.064748
| 0
| 0.122302
| 0.071942
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08d50632dbe42cde10ed75ee126dd035ddf3804a
| 3,480
|
py
|
Python
|
src/frontend/function_transforms/pass_div_zero.py
|
mfeliu/gelpia
|
30c6c1030165b26bf5f84613316f6fc2ce3ebe8b
|
[
"MIT"
] | null | null | null |
src/frontend/function_transforms/pass_div_zero.py
|
mfeliu/gelpia
|
30c6c1030165b26bf5f84613316f6fc2ce3ebe8b
|
[
"MIT"
] | null | null | null |
src/frontend/function_transforms/pass_div_zero.py
|
mfeliu/gelpia
|
30c6c1030165b26bf5f84613316f6fc2ce3ebe8b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
try:
from gelpia import bin_dir
except:
print("gelpia not found, gaol_repl must be in your PATH\n")
bin_dir = ""
from pass_utils import *
from output_flatten import flatten
import re
import sys
import subprocess
import os.path as path
def div_by_zero(exp, inputs, assigns, consts):
query_proc = subprocess.Popen(path.join(bin_dir, 'gaol_repl'),
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
universal_newlines=True,
bufsize=0)
root = exp
bad_exp = None
def gaol_eval(exp):
flat_exp = flatten(exp, inputs, consts, assigns)
query_proc.stdin.write('{}\n'.format(flat_exp))
result = query_proc.stdout.readline()
try:
match = re.match("[<\[]([^,]+),([^>\]]+)[>\]]", result)
l = float(match.group(1))
r = float(match.group(2))
except:
print("Fatal error in gaol_eval")
print(" query was: '{}'".format(flat_exp))
print(" unable to match: '{}'".format(result))
sys.exit(-1)
return l,r
def contains_zero(exp):
l,r = gaol_eval(exp)
return l<=0 and 0<=r
def less_than_zero(exp):
l,r = gaol_eval(exp)
return l<0
def _div_by_zero(exp):
nonlocal bad_exp
typ = exp[0]
if typ in {'Float', 'Integer', 'ConstantInterval',
'InputInterval', 'Input', 'Symbol'}:
return False
if typ == '/':
retval = (contains_zero(exp[2]) or
_div_by_zero(exp[1]) or
_div_by_zero(exp[2]))
if retval:
bad_exp = exp
return retval
if typ == "powi":
temp = False
if less_than_zero(exp[2]):
temp = contains_zero(exp[1])
retval = temp or _div_by_zero(exp[1]) or _div_by_zero(exp[2])
if retval:
bad_exp = exp
return retval
if typ == "pow":
temp = False
e = expand(exp[2], assigns, consts)
assert(e[0] == "Integer")
if int(e[1]) < 0:
temp = contains_zero(exp[1])
retval = temp or _div_by_zero(exp[1])
if retval:
bad_exp = exp
return retval
if typ in BINOPS:
return _div_by_zero(exp[1]) or _div_by_zero(exp[2])
if typ in UNOPS.union({"Return"}):
return _div_by_zero(exp[1])
if typ in {"Variable"}:
return _div_by_zero(assigns[exp[1]])
if typ in {"Const"}:
return _div_by_zero(consts[exp[1]])
print("div_by_zero error unknown: '{}'".format(exp))
sys.exit(-1)
result = _div_by_zero(exp)
query_proc.communicate()
return (result, bad_exp)
def runmain():
from lexed_to_parsed import parse_function
from pass_lift_inputs_and_assigns import lift_inputs_and_assigns
from pass_lift_consts import lift_consts
from pass_simplify import simplify
data = get_runmain_input()
exp = parse_function(data)
exp, inputs, assigns = lift_inputs_and_assigns(exp)
exp, consts = lift_consts(exp, inputs, assigns)
exp = simplify(exp, inputs, assigns, consts)
has_div_zero, bad_exp = div_by_zero(exp, inputs, assigns, consts)
print("divides by zero:")
print(has_div_zero)
if has_div_zero:
print()
print("offending exp:")
print(bad_exp)
print()
print_exp(exp)
print()
print_inputs(inputs)
print()
print_assigns(assigns)
print()
print_consts(consts)
if __name__ == "__main__":
try:
runmain()
except KeyboardInterrupt:
print("\nGoodbye")
| 23.355705
| 67
| 0.611207
| 486
| 3,480
| 4.13786
| 0.242798
| 0.062655
| 0.067131
| 0.071606
| 0.22178
| 0.204873
| 0.191447
| 0.160617
| 0.160617
| 0.14371
| 0
| 0.010929
| 0.263793
| 3,480
| 148
| 68
| 23.513514
| 0.774005
| 0.006034
| 0
| 0.238938
| 0
| 0
| 0.093117
| 0.007808
| 0
| 0
| 0
| 0
| 0.00885
| 1
| 0.053097
| false
| 0.035398
| 0.097345
| 0
| 0.256637
| 0.168142
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08d52a54cf446718a15b7b80b28b2ccd05586869
| 2,150
|
py
|
Python
|
setup.py
|
bearroast/django-estimators
|
5dd72694dab6725335214543a59104c4de504037
|
[
"MIT"
] | 46
|
2016-09-13T06:33:30.000Z
|
2022-01-08T00:55:37.000Z
|
setup.py
|
bearroast/django-estimators
|
5dd72694dab6725335214543a59104c4de504037
|
[
"MIT"
] | 14
|
2016-09-10T04:56:30.000Z
|
2017-11-28T04:12:43.000Z
|
setup.py
|
bearroast/django-estimators
|
5dd72694dab6725335214543a59104c4de504037
|
[
"MIT"
] | 19
|
2016-09-20T23:53:26.000Z
|
2022-01-08T00:55:39.000Z
|
import os
from pip.req import parse_requirements
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# parse_requirements() returns generator of pip.req.InstallRequirement objects
install_reqs = parse_requirements(
os.path.join(os.path.dirname(__file__), 'requirements.txt'), session=False)
reqs = [str(ir.req) for ir in install_reqs]
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-estimators',
version='0.2.1',
packages=find_packages(),
include_package_data=True,
install_requires=reqs,
license='MIT License', # example license
description='A django model to persist and track machine learning models',
long_description=README,
url='https://github.com/fridiculous/django-estimators',
author='Simon Frid',
author_email='simon.frid@gmail.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.9',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Version Control',
],
keywords='''scikit-learn, sklearn, machine learning, artificial intelligence, ml,
ai, estimators, version, versioning, benchmark, persist, storage, track, models,
repository, evaluation, workflow'''
)
| 40.566038
| 88
| 0.670698
| 245
| 2,150
| 5.787755
| 0.563265
| 0.029619
| 0.070522
| 0.025388
| 0.049365
| 0.038082
| 0.038082
| 0
| 0
| 0
| 0
| 0.007009
| 0.203721
| 2,150
| 52
| 89
| 41.346154
| 0.821262
| 0.095349
| 0
| 0
| 0
| 0
| 0.527592
| 0.011346
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.068182
| 0
| 0.068182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08d5314ae1e6b39701c18dfc2466ee45cde74ef6
| 7,517
|
py
|
Python
|
ip_group.py
|
vectranetworks/csv-to-ip-group
|
f8f53f979c62c3db161fcb7fdc3b7ebb26842055
|
[
"MIT"
] | null | null | null |
ip_group.py
|
vectranetworks/csv-to-ip-group
|
f8f53f979c62c3db161fcb7fdc3b7ebb26842055
|
[
"MIT"
] | null | null | null |
ip_group.py
|
vectranetworks/csv-to-ip-group
|
f8f53f979c62c3db161fcb7fdc3b7ebb26842055
|
[
"MIT"
] | null | null | null |
import csv
import ipaddress
import logging.handlers
import sys
import argparse
try:
import vat.vectra as vectra
import requests
except Exception as error:
print('\nMissing import requirements: {}\n'.format(str(error)))
sys.exit(0)
LOG = logging.getLogger(__name__)
INVALID_CHARS = ['~', '#', '$', '^', '+', '=', '<', '>', '?', ';']
SUB_CHAR = '_'
# Suppress Detect certificate warning
requests.packages.urllib3.disable_warnings()
def ip_subnet(subnet_string):
"""
Called with string that represents an IP subnet with CIDR or netmask in dotted decimal format
Validates string represents valid subnet and removes host bits
Returns string representation of subnet in CIDR format
:param subnet_string: string representing subnet in CIDR w.x.y.z/n or netmask w.x.y.z/aa.bb.cc.dd format
:return: returns string representation of subnet in CIDR format
"""
try:
ipaddress.IPv4Network(subnet_string)
except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as error:
LOG.info('Subnet {} format error, {}'.format(subnet_string, error))
return
except ValueError as error:
LOG.info('{}, removing host bits'.format(error))
subnet = ipaddress.IPv4Network(subnet_string, strict=False)
return str(subnet)
def sub_bad_chars(string, sub=SUB_CHAR):
"""
Substitute unsupported characters in string representing group
:param string: original string
:param sub: substitution character, default defined in SUB_CHAR
:return: returns the original string with any illegal characters substituted
"""
for bad_char in INVALID_CHARS:
string = string.replace(bad_char, sub)
return string
def group_exists(group_name, brain):
"""
Determines if group exists
Called with initialized vectra client and name of group
:param group_name: group name
:param brain: initialized Vectra Client object
:return: True if group exists, False otherwise
"""
group_iterator = brain.get_all_groups(name=group_name)
for item in group_iterator:
if item.json()['count'] > 0:
for group in item.json()['results']:
if group['name'] == group_name:
return {'name': group['name'], 'id': group['id']}
return False
def create_group(name, subnet, brain, descr=''):
"""
Creates group and adds supplied subnet, and description if supplied
:param name: group name
:param subnet: CIDR subnet string
:param brain: initialized Vectra Client object
:param descr: group description, optional
"""
if bool(descr):
brain.create_group(name=name, description=descr, type='ip', members=list(subnet))
else:
brain.create_group(name=name, type='ip', members=list(subnet))
def update_group(grp_id, subnet, brain, descr=''):
"""
Updates existing group with supplied subnet, and description if supplied
:param grp_id: group ID
:param subnet: CIDR subnet string
:param brain: initialized Vectra Client object
:param descr: group description, optional
"""
if bool(descr):
brain.update_group(group_id=grp_id, description=descr, members=subnet, append=True)
else:
brain.update_group(group_id=grp_id, members=subnet, append=True)
def obtain_args():
parser = argparse.ArgumentParser(description='Supplied with name of CSV input file, creates or updates IP groups '
'with supplied subnet information. \nCSV file format: '
'group_name,subnet,description\n\n'
'Subnet can be supplied in CIDR notation e.g. \n'
'group name,10.1.1.0/24,some description\n\n'
'or as subnet and netmask separate by a comma (,) e.g.\n'
'group name,10.1.1.1.0,255.255.255.0,some description',
prefix_chars='--', formatter_class=argparse.RawTextHelpFormatter,
epilog='')
parser.add_argument('brain', type=str, help='Hostname or IP of Congito Detect brain')
parser.add_argument('token', type=str, help='API token to access Cognito Detect')
parser.add_argument('file', type=str, help='Name of csv input file')
parser.add_argument('--sub_char', default=False, type=str, help='Override default invalid character '
'substitution in group names and '
'description. Default is _\n'
'May not be one of the following characters\n'
'{}'.format(str(INVALID_CHARS)))
parser.add_argument('--verbose', default=False, action='store_true', help='Verbose logging')
return parser.parse_args()
def main():
"""
Supplied with valid CSV file containing 3 or 4 columns of data, iterates over rows and creates or updates groups
Supports CSV files with following format examples with or without header row
group 1,192.168.1.0/255.255.255.0,group1 description
group 2,10.1.1.0/24,group2 description
"""
args = obtain_args()
sub_char = args.sub_char if args.sub_char else SUB_CHAR
log_level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(level=log_level, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if len(sys.argv) == 1:
print('Run python3 ip_group.py -h for help.')
sys.exit()
file = args.file
with open(file, newline='') as csvfile:
vc = vectra.VectraClientV2_1(url='https://' + args.brain, token=args.token, verify=False)
reader = csv.reader(csvfile)
for row in reader:
if len(row) < 3 or len(row) > 4:
LOG.info('Invalid number of columns in row, skipping')
continue
if len(row) == 4:
LOG.debug('Number of rows 4: {}'.format(len(row)))
subnet = ip_subnet('{}/{}'.format(row[1], row[2]))
description = sub_bad_chars(row[3], sub_char)
elif len(row) == 3:
LOG.debug('Number of rows 3: {}'.format(len(row)))
subnet = ip_subnet(row[1])
description = sub_bad_chars(row[2], sub_char)
group_name = sub_bad_chars(row[0], sub_char)
if subnet is not None:
"""group_obj False or {'name': 'somename', 'id':'123'}"""
group_obj = group_exists(group_name, vc)
if not group_obj:
# Group does not exist, creating
LOG.info('Group does not exist, creating. group:{}, subnet:{}, description:{}'.format(
group_name, subnet, description))
create_group(group_name, [str(subnet)], vc, description)
else:
LOG.info('Group exists, updating. group:{}, subnet:{}, description:{}'.format(
group_name, subnet, description))
update_group(group_obj['id'], [str(subnet)], vc, description)
else:
LOG.info('Invalid subnet, skipping')
if __name__ == '__main__':
main()
| 39.151042
| 118
| 0.596648
| 906
| 7,517
| 4.844371
| 0.258278
| 0.038961
| 0.01481
| 0.018455
| 0.248576
| 0.178628
| 0.152427
| 0.106175
| 0.052859
| 0.052859
| 0
| 0.014773
| 0.297592
| 7,517
| 191
| 119
| 39.356021
| 0.816477
| 0.214846
| 0
| 0.09434
| 0
| 0.009434
| 0.196857
| 0.015537
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066038
| false
| 0
| 0.075472
| 0
| 0.198113
| 0.018868
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08d5fc45e5a26919b46ae56fd9e3cb2d53ede3e7
| 512
|
py
|
Python
|
BasicSyntax/DataType.py
|
Fjaxzhy/top.kagurayayoi.learn.Python
|
af2ad3b7da85fb0af1668d3751c0342b16d0966f
|
[
"MIT"
] | null | null | null |
BasicSyntax/DataType.py
|
Fjaxzhy/top.kagurayayoi.learn.Python
|
af2ad3b7da85fb0af1668d3751c0342b16d0966f
|
[
"MIT"
] | 11
|
2021-03-29T08:50:16.000Z
|
2021-03-31T08:46:55.000Z
|
BasicSyntax/DataType.py
|
Fjaxzhy/top.kagurayayoi.learn.Python
|
af2ad3b7da85fb0af1668d3751c0342b16d0966f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Python变量不需要声明数据类型
# 变量在使用前必须赋值
# 变量没有类型 类型指内存中对象的类型
# 不可变数据 Number / String / Tuple
# 可变数据 List / Dictionary / Set
# 数字 Number
# 整数 Int
IntNum = 100
# 浮点数 Float
FloatNum = 100.10
# 布尔值 Boolean // True:1 False:0
BoolNum = True
# 复数 Complex
ComplexNum = 1.00j
# 字符串 String
Str = "这是字符串"
# 列表 List
List = ['a', 'b', 1, 2]
# 元组 Tuple
Tup = ('a', 'b', 1, 2)
# 集合 Set
Set = {'a', 'b', 1, 2}
# 字典 Dictionary
Dict = {'key1': 'value1', 'key2': 'value2'}
| 14.628571
| 43
| 0.59375
| 75
| 512
| 4.053333
| 0.733333
| 0.019737
| 0.029605
| 0.039474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063452
| 0.230469
| 512
| 34
| 44
| 15.058824
| 0.708122
| 0.541016
| 0
| 0
| 0
| 0
| 0.142202
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08d6edb44ef1415e69d5e8564970749ce00f431c
| 382
|
py
|
Python
|
rename_smpls.py
|
Chartiza/bulls
|
e4e7895a37a0335572dea50f2cbaae2737b3cd5f
|
[
"MIT"
] | null | null | null |
rename_smpls.py
|
Chartiza/bulls
|
e4e7895a37a0335572dea50f2cbaae2737b3cd5f
|
[
"MIT"
] | null | null | null |
rename_smpls.py
|
Chartiza/bulls
|
e4e7895a37a0335572dea50f2cbaae2737b3cd5f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
sootv = {}
#Read file sootvetstviya
for l in open ("filesootv"):
data = l.strip().split("\t")
if data[0] not in sootv:
sootv[data[0]] = data[1]
#Read FinalReport file
for l in open('Ire30_GP'):
data = l.strip().split("\t")
if data[1] in sootv:
print(data[0]+"\t"+sootv[data[1]]+"\t"+data[2]+"\t"+data[3]+"\t"+"\t"+data[4]+"\t"+data[5])
| 23.875
| 94
| 0.570681
| 67
| 382
| 3.238806
| 0.41791
| 0.092166
| 0.0553
| 0.092166
| 0.202765
| 0.202765
| 0.202765
| 0
| 0
| 0
| 0
| 0.037975
| 0.172775
| 382
| 16
| 95
| 23.875
| 0.648734
| 0.157068
| 0
| 0.222222
| 0
| 0
| 0.108197
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08d8e05ba83fd1eb90111af5408ae91ffdf11318
| 2,619
|
py
|
Python
|
src/custom_arch/custom_alexnet.py
|
joeyseash/PruneTrain
|
5adb367eb90b7e1e38251f8e3a8e7eb65b167aa0
|
[
"Apache-2.0"
] | 1
|
2021-10-03T00:57:32.000Z
|
2021-10-03T00:57:32.000Z
|
src/custom_arch/custom_alexnet.py
|
VictorSuciu/prunetrain
|
ef84a88ef8a34f8e79de783ffdb9d3b82545dc3b
|
[
"Apache-2.0"
] | null | null | null |
src/custom_arch/custom_alexnet.py
|
VictorSuciu/prunetrain
|
ef84a88ef8a34f8e79de783ffdb9d3b82545dc3b
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2019 Sangkug Lym
Copyright 2019 The University of Texas at Austin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from .arch_utils import layerUtil
arch = {}
arch[0] = {'name':'conv1', 'kernel_size':11, 'stride':4, 'padding':5, 'bias':True}
arch[1] = {'name':'conv2', 'kernel_size':5, 'stride':1, 'padding':2, 'bias':True}
arch[2] = {'name':'conv3', 'kernel_size':3, 'stride':1, 'padding':1, 'bias':True}
arch[3] = {'name':'conv4', 'kernel_size':3, 'stride':1, 'padding':1, 'bias':True}
arch[4] = {'name':'conv5', 'kernel_size':3, 'stride':1, 'padding':1, 'bias':True}
arch[5] = {'name':'pool', 'kernel_size':2, 'stride':2}
arch[6] = {'name':'relu'}
arch[7] = {'name':'fc', 'out_chs':'num_classes'}
def _genDenseArchAlexNet(model, out_f_dir1, out_f_dir2, arch_name, dense_chs, chs_map, is_gating=False):
# File heading
ctx = 'import torch.nn as nn\n'
ctx += '__all__ = [\'alexnet_flat\']\n'
ctx += 'class AlexNet(nn.Module):\n'
ctx += '\tdef __init__(self, num_classes=10):\n'
ctx += '\t\tsuper(AlexNet, self).__init__()\n'
lyr = layerUtil(model, dense_chs)
# Layer definition
for idx in sorted(arch):
ctx += lyr.getLayerDef(arch[idx])
ctx += '\tdef forward(self, x):\n'
ctx += lyr.forward('conv1')
ctx += lyr.forward('relu')
ctx += lyr.forward('pool')
ctx += lyr.forward('conv2')
ctx += lyr.forward('relu')
ctx += lyr.forward('pool')
ctx += lyr.forward('conv3')
ctx += lyr.forward('relu')
ctx += lyr.forward('conv4')
ctx += lyr.forward('relu')
ctx += lyr.forward('conv5')
ctx += lyr.forward('relu')
ctx += lyr.forward('pool')
ctx += '\t\tx = x.view(x.size(0), -1)\n'
ctx += forward('fc')
ctx += '\t\treturn x\n'
# AlexNet definition
ctx += 'def alexnet_flat(**kwargs):\n'
ctx += '\tmodel = AlexNet(**kwargs)\n'
ctx += '\treturn model\n'
if not os.path.exists(out_f_dir2):
os.makedirs(out_f_dir2)
print ("[INFO] Generating a new dense architecture...")
f_out1 = open(os.path.join(out_f_dir1, 'alexnet_flat.py'),'w')
f_out1.write(ctx)
f_out2 = open(os.path.join(out_f_dir2, arch_name),'w')
f_out2.write(ctx)
| 34.012987
| 104
| 0.658267
| 408
| 2,619
| 4.107843
| 0.375
| 0.050119
| 0.100835
| 0.050716
| 0.220167
| 0.203461
| 0.181981
| 0.146181
| 0.146181
| 0.124105
| 0
| 0.028004
| 0.154639
| 2,619
| 77
| 105
| 34.012987
| 0.728997
| 0.248186
| 0
| 0.170213
| 0
| 0
| 0.332478
| 0.023602
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021277
| false
| 0
| 0.06383
| 0
| 0.085106
| 0.021277
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08dc052ecc3d96e2ef3efe41624a974268f5c7b0
| 2,596
|
py
|
Python
|
DIP/exercises/ex10/pca.py
|
apeyrard/sjtu-work
|
ca98fec3c83b81ed9091bdc968cb5ad8a74d1d6a
|
[
"MIT"
] | 1
|
2022-03-26T10:04:05.000Z
|
2022-03-26T10:04:05.000Z
|
DIP/exercises/ex10/pca.py
|
apeyrard/sjtu-work
|
ca98fec3c83b81ed9091bdc968cb5ad8a74d1d6a
|
[
"MIT"
] | null | null | null |
DIP/exercises/ex10/pca.py
|
apeyrard/sjtu-work
|
ca98fec3c83b81ed9091bdc968cb5ad8a74d1d6a
|
[
"MIT"
] | 1
|
2022-03-26T10:04:06.000Z
|
2022-03-26T10:04:06.000Z
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import sys
import os
from PIL import Image
import numpy as np
size = None
matrix_x = None
for image in os.listdir('./washington'):
try:
print(image)
with Image.open(os.path.join('./washington',image)) as im:
imgVector = np.array(list(im.getdata()))
imgVector = imgVector.reshape(1, imgVector.shape[0])
try:
matrix_x = np.vstack((matrix_x, imgVector))
except:
matrix_x = imgVector
except FileNotFoundError as e:
sys.exit("Error : file not found")
#matrix_x = np.array([[0,1,1,1],
#[0,0,1,0],
#[0,0,0,1]
#])
#mean vector
K = matrix_x.shape[1]
print('K', K)
nb = matrix_x.shape[0]
print('nb', nb)
mx = np.zeros((nb, 1))
for x in range(K):
for y in range(nb):
mx[y] += matrix_x[y, x]
mx = mx/K
#covar matrix
cx = np.zeros((nb,nb))
for x in range(K):
tmp = (matrix_x[:,x])
tmp = tmp.reshape(tmp.shape[0],1)
cx += np.dot(tmp,tmp.T) - np.dot(mx,mx.T)
cx = cx/K
eigenvalues, eigenvectors = np.linalg.eig(cx)
#tri
eival = np.zeros(eigenvalues.shape)
eivec = np.zeros(eigenvectors.shape)
j = 0
for _ in range(nb):
maxval = eigenvalues.max()
for i in range(eigenvalues.shape[0]):
val = eigenvalues[i]
if val == maxval:
eival[j] = val
eigenvalues[i] = 0
eivec[j] = eigenvectors[i]
j += 1
break
#pruning eivec
pruning = 2
eivec = eivec[:pruning,:]
print(eivec)
matrix_y = np.zeros((pruning, matrix_x.shape[1]))
for i in range(K):
tmp = (matrix_x[:,i]).reshape(nb, 1)
truc = np.dot(eivec,(tmp-mx))
matrix_y[:, i] = truc.reshape(truc.shape[0])
#reconstruction
matrix_x2 = np.zeros(matrix_x.shape)
for i in range(K):
tmp = (matrix_y[:,i])
tmp = tmp.reshape(tmp.shape[0], 1)
matrix_x2[:, i] = np.array((np.dot(eivec.T,tmp)+mx).reshape(nb))
def rescale(matrix):
matrix = matrix - matrix.min()
matrix = matrix * 255 / matrix.max()
return matrix
data = np.vsplit(matrix_x2, 6)
for i,item in enumerate(data):
item = list(rescale(item.reshape(item.shape[1])))
newIm = Image.new(im.mode, im.size)
newIm.putdata(item)
newIm.show()
diff = item - matrix_x[i]
epsilon = 0.1
print(diff)
for j,val in enumerate(diff):
if abs(val) < epsilon:
diff[j] = 0
print(diff)
diff = rescale(diff)
newIm = Image.new(im.mode, im.size)
newIm.putdata(list(diff))
newIm.show()
| 23.6
| 68
| 0.573575
| 393
| 2,596
| 3.737913
| 0.249364
| 0.061947
| 0.032675
| 0.022464
| 0.134105
| 0.123213
| 0.110279
| 0.050374
| 0.050374
| 0
| 0
| 0.022059
| 0.266564
| 2,596
| 109
| 69
| 23.816514
| 0.749475
| 0.057011
| 0
| 0.17284
| 0
| 0
| 0.020148
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012346
| false
| 0
| 0.049383
| 0
| 0.074074
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08dc36bae83be55acec0ed61f76a33d11f4bb8a1
| 1,677
|
py
|
Python
|
organisations/migrate-entities/script.py
|
jbarnes/aws-python-script-collection
|
bf2accf60b8c14af89fab3a210c4df6a3b2e0ba9
|
[
"MIT"
] | null | null | null |
organisations/migrate-entities/script.py
|
jbarnes/aws-python-script-collection
|
bf2accf60b8c14af89fab3a210c4df6a3b2e0ba9
|
[
"MIT"
] | null | null | null |
organisations/migrate-entities/script.py
|
jbarnes/aws-python-script-collection
|
bf2accf60b8c14af89fab3a210c4df6a3b2e0ba9
|
[
"MIT"
] | null | null | null |
import boto3
import sys
if __name__ == "__main__":
if len(sys.argv) > 2:
print("[ERROR] You have passed in an invalid target-id, example target-id is ou-zhz0-prn5fmbc")
sys.exit()
else:
print("[INFO] Valid argument detected, proceeding with account migration")
destination_id = str(sys.argv[1])
# Gather source ids
with open("source_ids.txt") as f:
source_ids = f.read().splitlines()
l = len(source_ids)
print("[INFO] Detected {} source id(s) to be migrated".format(l))
print("[INFO] Beginning processing of source id(s)...")
# Process the source ids for migration
client = boto3.client("organizations")
for source_id in source_ids:
print("[INFO] Now attempting to move source id: {}".format(source_id))
get_parent = client.list_parents(ChildId=source_id)
parent_id = get_parent["Parents"][0]["Id"]
try:
response = client.move_account(
AccountId=source_id, SourceParentId=parent_id, DestinationParentId=destination_id
)
print(
"[INFO] Successfully moved source id: {} to target id: {}".format(
source_id, destination_id
)
)
except client.exceptions.DuplicateAccountException:
print(
"[NOTICE] Source id: {} is already migrated to target id: {}".format(
source_id, destination_id
)
)
print("[INFO] Successfully migrated required accounts.")
| 35.680851
| 103
| 0.556947
| 180
| 1,677
| 5.033333
| 0.461111
| 0.09713
| 0.046358
| 0.05298
| 0.142384
| 0.081678
| 0.081678
| 0.081678
| 0
| 0
| 0
| 0.006358
| 0.34347
| 1,677
| 46
| 104
| 36.456522
| 0.81653
| 0.0322
| 0
| 0.114286
| 0
| 0.028571
| 0.303704
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.028571
| 0.057143
| 0
| 0.057143
| 0.228571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08e07a97a9f3cede768ff174381cda6e3a2e9847
| 3,823
|
py
|
Python
|
ProgrammersGuideExamples/provisioning.py
|
mrhorrible78/PyU4V
|
5b9274fd6f5f80a4a6e7aa487e348fa91f6f315c
|
[
"MIT"
] | null | null | null |
ProgrammersGuideExamples/provisioning.py
|
mrhorrible78/PyU4V
|
5b9274fd6f5f80a4a6e7aa487e348fa91f6f315c
|
[
"MIT"
] | null | null | null |
ProgrammersGuideExamples/provisioning.py
|
mrhorrible78/PyU4V
|
5b9274fd6f5f80a4a6e7aa487e348fa91f6f315c
|
[
"MIT"
] | null | null | null |
# The MIT License (MIT)
# Copyright (c) 2016 Dell Inc. or its subsidiaries.
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import argparse
from PyU4V import U4VConn
ru = U4VConn(u4v_version='84')
PARSER = argparse.ArgumentParser(description='This python scrtipt is a basic '
'VMAX REST recipe provisioning '
'multiple sized volume for an '
'application.\n'
'python provisioning.py -sg TEST '
'-ig initiators.txt -pg ports.txt'
' -cap 1')
RFLAGS = PARSER.add_argument_group('Required arguments')
RFLAGS.add_argument('-sg', required=True, help='Storage group name, typically '
'the application name '
'e.g. oraclefinace')
RFLAGS.add_argument('-ig', required=True, help='Filename containing initiators'
',one per line '
'e.g. 10000000c9873cae')
RFLAGS.add_argument('-pg', required=True, help='Filename containing list of '
'ports one per line, '
'e.g. FA-1D:25')
RFLAGS.add_argument('-cap', required=True, help='Capacity in GB')
# Assign parameters to command line arguments
ARGS = PARSER.parse_args()
sgname = ARGS.sg
hba_file = ARGS.ig
port_file = ARGS.pg
appname = "REST_" + sgname
sg_id = appname + "_SG"
ig_id = appname + "_IG"
pg_id = appname + "_PG"
mv_id = appname + "_MV"
requested_capacity = ARGS.cap
initiator_list = ru.common.create_list_from_file(hba_file)
def provision_storage():
if headroom_check():
sg_job = ru.provisioning.create_non_empty_storagegroup(
"SRP_1", sg_id, "Diamond", "OLTP", 1, requested_capacity, "GB", True)
# showing how async functions can be worked in.
ru.common.wait_for_job("", sg_job)
print("Storage Group Created.")
ru.provisioning.create_host(ig_id, initiator_list)
print("Host Created.")
ru.provisioning.create_portgroup_from_file(port_file, pg_id)
print("Port Group Created.")
ru.provisioning.create_masking_view_existing_components(
pg_id, mv_id, sg_id, ig_id)
print("Masking View Created.")
else:
print("Headroom Check Failed, Check array Capacity Usage")
def headroom_check():
headroom_cp = ru.common.get_headroom("OLTP")[0]["headroom"][0]["headroomCapacity"]
if int(requested_capacity) <= int(headroom_cp):
return True
else:
return False
provision_storage()
| 43.443182
| 86
| 0.625948
| 469
| 3,823
| 4.976546
| 0.452026
| 0.037704
| 0.029135
| 0.034704
| 0.066838
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011074
| 0.291394
| 3,823
| 87
| 87
| 43.942529
| 0.850498
| 0.309181
| 0
| 0.037037
| 0
| 0
| 0.230153
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.037037
| 0
| 0.111111
| 0.092593
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08e17200183b1b4c4b38978e4c91346462570f54
| 8,227
|
py
|
Python
|
quickdraw-doodle-recognition/gcloud/common.py
|
yasserglez/kaggle_titanic
|
7a4857ec9a99c31eb53a91dda3ad9ecd5b647278
|
[
"MIT"
] | 2
|
2019-09-29T02:26:58.000Z
|
2020-03-06T07:38:58.000Z
|
quickdraw-doodle-recognition/gcloud/common.py
|
yasserglez/kaggle_titanic
|
7a4857ec9a99c31eb53a91dda3ad9ecd5b647278
|
[
"MIT"
] | 2
|
2018-12-17T04:32:09.000Z
|
2019-10-22T00:31:06.000Z
|
quickdraw-doodle-recognition/gcloud/common.py
|
yasserglez/kaggle
|
7a4857ec9a99c31eb53a91dda3ad9ecd5b647278
|
[
"MIT"
] | null | null | null |
import struct
import itertools
import numpy as np
from bitarray import bitarray
RANDOM_SEED = 2387613
IMAGE_SIZE = 128
BATCH_SIZE = 2048
# Assign an integer to each word to be predicted.
WORD2LABEL = {
'The Eiffel Tower': 0,
'The Great Wall of China': 1,
'The Mona Lisa': 2,
'airplane': 3,
'alarm clock': 4,
'ambulance': 5,
'angel': 6,
'animal migration': 7,
'ant': 8,
'anvil': 9,
'apple': 10,
'arm': 11,
'asparagus': 12,
'axe': 13,
'backpack': 14,
'banana': 15,
'bandage': 16,
'barn': 17,
'baseball': 19,
'baseball bat': 18,
'basket': 20,
'basketball': 21,
'bat': 22,
'bathtub': 23,
'beach': 24,
'bear': 25,
'beard': 26,
'bed': 27,
'bee': 28,
'belt': 29,
'bench': 30,
'bicycle': 31,
'binoculars': 32,
'bird': 33,
'birthday cake': 34,
'blackberry': 35,
'blueberry': 36,
'book': 37,
'boomerang': 38,
'bottlecap': 39,
'bowtie': 40,
'bracelet': 41,
'brain': 42,
'bread': 43,
'bridge': 44,
'broccoli': 45,
'broom': 46,
'bucket': 47,
'bulldozer': 48,
'bus': 49,
'bush': 50,
'butterfly': 51,
'cactus': 52,
'cake': 53,
'calculator': 54,
'calendar': 55,
'camel': 56,
'camera': 57,
'camouflage': 58,
'campfire': 59,
'candle': 60,
'cannon': 61,
'canoe': 62,
'car': 63,
'carrot': 64,
'castle': 65,
'cat': 66,
'ceiling fan': 67,
'cell phone': 68,
'cello': 69,
'chair': 70,
'chandelier': 71,
'church': 72,
'circle': 73,
'clarinet': 74,
'clock': 75,
'cloud': 76,
'coffee cup': 77,
'compass': 78,
'computer': 79,
'cookie': 80,
'cooler': 81,
'couch': 82,
'cow': 83,
'crab': 84,
'crayon': 85,
'crocodile': 86,
'crown': 87,
'cruise ship': 88,
'cup': 89,
'diamond': 90,
'dishwasher': 91,
'diving board': 92,
'dog': 93,
'dolphin': 94,
'donut': 95,
'door': 96,
'dragon': 97,
'dresser': 98,
'drill': 99,
'drums': 100,
'duck': 101,
'dumbbell': 102,
'ear': 103,
'elbow': 104,
'elephant': 105,
'envelope': 106,
'eraser': 107,
'eye': 108,
'eyeglasses': 109,
'face': 110,
'fan': 111,
'feather': 112,
'fence': 113,
'finger': 114,
'fire hydrant': 115,
'fireplace': 116,
'firetruck': 117,
'fish': 118,
'flamingo': 119,
'flashlight': 120,
'flip flops': 121,
'floor lamp': 122,
'flower': 123,
'flying saucer': 124,
'foot': 125,
'fork': 126,
'frog': 127,
'frying pan': 128,
'garden': 130,
'garden hose': 129,
'giraffe': 131,
'goatee': 132,
'golf club': 133,
'grapes': 134,
'grass': 135,
'guitar': 136,
'hamburger': 137,
'hammer': 138,
'hand': 139,
'harp': 140,
'hat': 141,
'headphones': 142,
'hedgehog': 143,
'helicopter': 144,
'helmet': 145,
'hexagon': 146,
'hockey puck': 147,
'hockey stick': 148,
'horse': 149,
'hospital': 150,
'hot air balloon': 151,
'hot dog': 152,
'hot tub': 153,
'hourglass': 154,
'house': 156,
'house plant': 155,
'hurricane': 157,
'ice cream': 158,
'jacket': 159,
'jail': 160,
'kangaroo': 161,
'key': 162,
'keyboard': 163,
'knee': 164,
'ladder': 165,
'lantern': 166,
'laptop': 167,
'leaf': 168,
'leg': 169,
'light bulb': 170,
'lighthouse': 171,
'lightning': 172,
'line': 173,
'lion': 174,
'lipstick': 175,
'lobster': 176,
'lollipop': 177,
'mailbox': 178,
'map': 179,
'marker': 180,
'matches': 181,
'megaphone': 182,
'mermaid': 183,
'microphone': 184,
'microwave': 185,
'monkey': 186,
'moon': 187,
'mosquito': 188,
'motorbike': 189,
'mountain': 190,
'mouse': 191,
'moustache': 192,
'mouth': 193,
'mug': 194,
'mushroom': 195,
'nail': 196,
'necklace': 197,
'nose': 198,
'ocean': 199,
'octagon': 200,
'octopus': 201,
'onion': 202,
'oven': 203,
'owl': 204,
'paint can': 205,
'paintbrush': 206,
'palm tree': 207,
'panda': 208,
'pants': 209,
'paper clip': 210,
'parachute': 211,
'parrot': 212,
'passport': 213,
'peanut': 214,
'pear': 215,
'peas': 216,
'pencil': 217,
'penguin': 218,
'piano': 219,
'pickup truck': 220,
'picture frame': 221,
'pig': 222,
'pillow': 223,
'pineapple': 224,
'pizza': 225,
'pliers': 226,
'police car': 227,
'pond': 228,
'pool': 229,
'popsicle': 230,
'postcard': 231,
'potato': 232,
'power outlet': 233,
'purse': 234,
'rabbit': 235,
'raccoon': 236,
'radio': 237,
'rain': 238,
'rainbow': 239,
'rake': 240,
'remote control': 241,
'rhinoceros': 242,
'river': 243,
'roller coaster': 244,
'rollerskates': 245,
'sailboat': 246,
'sandwich': 247,
'saw': 248,
'saxophone': 249,
'school bus': 250,
'scissors': 251,
'scorpion': 252,
'screwdriver': 253,
'sea turtle': 254,
'see saw': 255,
'shark': 256,
'sheep': 257,
'shoe': 258,
'shorts': 259,
'shovel': 260,
'sink': 261,
'skateboard': 262,
'skull': 263,
'skyscraper': 264,
'sleeping bag': 265,
'smiley face': 266,
'snail': 267,
'snake': 268,
'snorkel': 269,
'snowflake': 270,
'snowman': 271,
'soccer ball': 272,
'sock': 273,
'speedboat': 274,
'spider': 275,
'spoon': 276,
'spreadsheet': 277,
'square': 278,
'squiggle': 279,
'squirrel': 280,
'stairs': 281,
'star': 282,
'steak': 283,
'stereo': 284,
'stethoscope': 285,
'stitches': 286,
'stop sign': 287,
'stove': 288,
'strawberry': 289,
'streetlight': 290,
'string bean': 291,
'submarine': 292,
'suitcase': 293,
'sun': 294,
'swan': 295,
'sweater': 296,
'swing set': 297,
'sword': 298,
't-shirt': 299,
'table': 300,
'teapot': 301,
'teddy-bear': 302,
'telephone': 303,
'television': 304,
'tennis racquet': 305,
'tent': 306,
'tiger': 307,
'toaster': 308,
'toe': 309,
'toilet': 310,
'tooth': 311,
'toothbrush': 312,
'toothpaste': 313,
'tornado': 314,
'tractor': 315,
'traffic light': 316,
'train': 317,
'tree': 318,
'triangle': 319,
'trombone': 320,
'truck': 321,
'trumpet': 322,
'umbrella': 323,
'underwear': 324,
'van': 325,
'vase': 326,
'violin': 327,
'washing machine': 328,
'watermelon': 329,
'waterslide': 330,
'whale': 331,
'wheel': 332,
'windmill': 333,
'wine bottle': 334,
'wine glass': 335,
'wristwatch': 336,
'yoga': 337,
'zebra': 338,
'zigzag': 339,
}
LABEL2WORD = dict((v, k) for k, v in WORD2LABEL.items())
def pack_example(image, label, fout):
image_as_bits = bitarray(image.flatten().tolist())
fout.write(image_as_bits.tobytes())
fout.write(struct.pack('H', label))
def unpack_example(fin):
image_size = IMAGE_SIZE * IMAGE_SIZE // 8 # bytes
image_as_bits = bitarray()
image_as_bits.fromfile(fin, image_size)
image_as_bytes = np.frombuffer(image_as_bits.tobytes(), count=image_size, dtype=np.uint8)
image = np.unpackbits(image_as_bytes).astype(np.float32).reshape(IMAGE_SIZE, IMAGE_SIZE, 1)
label, = struct.unpack('H', fin.read(2))
return {'image': image, 'label': label}
def unpack_examples(fin):
while True:
try:
yield unpack_example(fin)
except (EOFError, struct.error):
break
# https://docs.python.org/3/library/itertools.html#recipes
def roundrobin(iterables):
num_active = len(iterables)
nexts = itertools.cycle(iter(it).__next__ for it in iterables)
while num_active:
try:
for next in nexts:
yield next()
except StopIteration:
# Remove the iterator we just exhausted from the cycle.
num_active -= 1
nexts = itertools.cycle(itertools.islice(nexts, num_active))
| 20.880711
| 95
| 0.519874
| 943
| 8,227
| 4.497349
| 0.852598
| 0.016977
| 0.012969
| 0.012733
| 0.011318
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161207
| 0.295004
| 8,227
| 393
| 96
| 20.933842
| 0.57
| 0.019813
| 0
| 0.005291
| 0
| 0
| 0.300074
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010582
| false
| 0.005291
| 0.010582
| 0
| 0.02381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08e511f1a5de576d29d0f24338c61be5e0fb82ee
| 2,250
|
py
|
Python
|
multiband_melgan/dataset.py
|
AppleHolic/multiband_melgan
|
e0864d0fc205c3bdf5e19c77753e105e29a2641b
|
[
"MIT"
] | 41
|
2020-06-24T08:07:23.000Z
|
2022-01-24T16:39:54.000Z
|
multiband_melgan/dataset.py
|
AppleHolic/multiband_melgan
|
e0864d0fc205c3bdf5e19c77753e105e29a2641b
|
[
"MIT"
] | 2
|
2020-06-24T08:02:15.000Z
|
2020-11-23T02:56:42.000Z
|
multiband_melgan/dataset.py
|
AppleHolic/multiband_melgan
|
e0864d0fc205c3bdf5e19c77753e105e29a2641b
|
[
"MIT"
] | 5
|
2020-07-03T04:00:50.000Z
|
2020-11-04T03:24:48.000Z
|
import numpy as np
import librosa
import os
from pytorch_sound.data.meta.ljspeech import LJSpeechMeta
from torch.utils.data import Dataset, DataLoader
from typing import Tuple
class AudioDataset(Dataset):
def __init__(self, meta_frame, crop_length: int, seed: int = 1234):
self.meta_frame = meta_frame
self.column_name = 'audio_filename'
self.crop_length = crop_length
self.seed = seed
np.random.seed(seed)
def __getitem__(self, idx):
# get selected file path
file_path = self.meta_frame.iloc[idx][self.column_name]
# load audio
wav, _ = librosa.load(file_path, sr=None)
# wav = librosa.effects.trim(wav)[0]
# random crop
if self.crop_length:
rand_start = np.random.randint(0, (len(wav) - self.crop_length))
cropped_wav = wav[rand_start:rand_start + self.crop_length]
# crop on voiced part
while np.abs(cropped_wav).max() < 0.05 and np.random.randint(5):
rand = np.random.randint(0, max(len(wav) - 1 - self.crop_length, 1))
cropped_wav = wav[rand:rand + self.crop_length]
wav = cropped_wav
# make mask
wav_mask = np.ones_like(wav)
return wav, wav_mask
def __len__(self):
return len(self.meta_frame)
def get_datasets(meta_dir: str, batch_size: int, num_workers: int, crop_length: int, random_seed: int
) -> Tuple[DataLoader, DataLoader]:
assert os.path.isdir(meta_dir), '{} is not valid directory path!'
train_file, valid_file = LJSpeechMeta.frame_file_names[1:]
# load meta file
train_meta = LJSpeechMeta(os.path.join(meta_dir, train_file))
valid_meta = LJSpeechMeta(os.path.join(meta_dir, valid_file))
# create dataset
train_dataset = AudioDataset(train_meta, crop_length=crop_length, seed=random_seed)
valid_dataset = AudioDataset(valid_meta, crop_length=crop_length, seed=random_seed)
# create data loader
train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=num_workers, shuffle=True)
valid_loader = DataLoader(valid_dataset, batch_size=batch_size, num_workers=num_workers)
return train_loader, valid_loader
| 34.090909
| 106
| 0.679111
| 310
| 2,250
| 4.66129
| 0.290323
| 0.089965
| 0.058131
| 0.041522
| 0.160554
| 0.160554
| 0.160554
| 0.114879
| 0.062284
| 0
| 0
| 0.008046
| 0.226667
| 2,250
| 65
| 107
| 34.615385
| 0.822414
| 0.070667
| 0
| 0
| 0
| 0
| 0.021624
| 0
| 0
| 0
| 0
| 0
| 0.026316
| 1
| 0.105263
| false
| 0
| 0.157895
| 0.026316
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08e85c7f00798390cfd21fa3cd1b2758063f698c
| 3,830
|
py
|
Python
|
yasmss/sparkmapper/sparkmapper.py
|
AshirwadPradhan/yasmss
|
8b8b7108a3a437f0c757f19225a0c2082dbbd488
|
[
"MIT"
] | null | null | null |
yasmss/sparkmapper/sparkmapper.py
|
AshirwadPradhan/yasmss
|
8b8b7108a3a437f0c757f19225a0c2082dbbd488
|
[
"MIT"
] | 2
|
2019-09-22T03:27:20.000Z
|
2019-09-22T13:56:35.000Z
|
yasmss/sparkmapper/sparkmapper.py
|
AshirwadPradhan/yasmss
|
8b8b7108a3a437f0c757f19225a0c2082dbbd488
|
[
"MIT"
] | 2
|
2019-09-15T13:10:41.000Z
|
2019-10-29T11:20:10.000Z
|
"""Get the parsed query from the driver and apply transformation and action based on the
query template
"""
import time
import pyspark.sql.functions as f
from pyspark.sql import SparkSession
from pyspark.sql.types import IntegerType, StringType, StructField, StructType
import yaml
from schema import schema
with open("config.yaml", 'r') as file:
data = yaml.load(file, Loader=yaml.FullLoader)
baseURI = data['pathconfig']['host_ip_port'] + \
'/' + data['pathconfig']['input_dir']
table_format = '.csv'
class SparkJob:
"""Start a spark job based on the query template provided by the user
"""
def __init__(self):
self.queryresult = None
self.trans_actions = None
self.exectime = None
self.classType = None
def _prepareEnv(self):
spark = SparkSession.builder.master(
'local').appName('master_job').getOrCreate()
return spark
def _getKeyType(self, keyType):
if keyType == 'IntegerType':
return IntegerType()
elif keyType == 'StringType':
return StringType()
else:
raise TypeError(keyType+' is not supported')
def _getdata(self, table):
"""Read from csv using sprak.read.csv with schema
Make a YAML file to specify schema and get StructType
"""
spark = self._prepareEnv()
table_schema_dict = schema.Schema().getSchemaDict(table=table)
table_schema_structlist = []
for key, value in table_schema_dict.items():
table_schema_structlist.append(
StructField(key, self._getKeyType(value), True))
table_schema = StructType(table_schema_structlist)
table_data = spark.read.csv(
baseURI+table+table_format, schema=table_schema)
return table_data
def _computeaggr(self, df_fromtable, queryset):
df_tempg = df_fromtable.groupBy(queryset.groupcolumns)
df_tempga = df_tempg.agg({str(queryset.aggcol): str(queryset.aggfunc)})
return df_tempga
def startjob(self, queryset, classType):
self.classType = classType
if classType == 'QuerySetJoin':
start_time = time.time()
df_fromtabledata = self._getdata(queryset.fromtable)
df_jointabledata = self._getdata(queryset.jointable)
on_l = queryset.onlval.split('.')
on_r = queryset.onrval.split('.')
if on_l[1] != on_r[1]:
raise AttributeError(
'Lval and Rval of "On" condition does not match')
df_innerjoin = df_fromtabledata.join(
df_jointabledata, on=on_l[1], how='inner').orderBy(on_l[1], ascending=True)
wherecol = queryset.wherelval.split('.')[1]
try:
con_whererval = int(queryset.whererval)
filter_cond = wherecol+queryset.whereop+queryset.whererval
except ValueError:
filter_cond = wherecol+queryset.whereop+'"'+queryset.whererval+'"'
df_finalres = df_innerjoin.where(filter_cond)
self.exectime = (time.time() - start_time)
self.queryresult = df_finalres
self.trans_actions = ['join', 'where']
return self
elif classType == 'QuerySetGroupBy':
start_time = time.time()
df_fromtable = self._getdata(queryset.fromtable)
df_agg_groupby = self._computeaggr(df_fromtable, queryset)
df_finalres = df_agg_groupby.where(queryset.havcond)
self.exectime = (time.time() - start_time)
self.queryresult = df_finalres
self.trans_actions = ['groupby', 'agg', 'where']
return self
else:
raise TypeError('Unidentified Class Type')
return None
| 33.304348
| 91
| 0.621671
| 421
| 3,830
| 5.482185
| 0.35867
| 0.033362
| 0.020797
| 0.012998
| 0.166378
| 0.103986
| 0.103986
| 0.060659
| 0.060659
| 0.060659
| 0
| 0.001814
| 0.280157
| 3,830
| 114
| 92
| 33.596491
| 0.835328
| 0.07154
| 0
| 0.126582
| 0
| 0
| 0.068583
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075949
| false
| 0
| 0.075949
| 0
| 0.265823
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08ea5997bb021488c38f1e74924d799e82ac53bd
| 17,456
|
py
|
Python
|
src/rangeen/_emotes.py
|
khera-shanu/Rangeen
|
0a7f7699c0030d28fd42211c1fb33c89ced3e857
|
[
"MIT"
] | null | null | null |
src/rangeen/_emotes.py
|
khera-shanu/Rangeen
|
0a7f7699c0030d28fd42211c1fb33c89ced3e857
|
[
"MIT"
] | null | null | null |
src/rangeen/_emotes.py
|
khera-shanu/Rangeen
|
0a7f7699c0030d28fd42211c1fb33c89ced3e857
|
[
"MIT"
] | null | null | null |
class Emote:
smile = u"😄"
satisfied = u"😆"
blush = u"😊"
smiley = u"😃"
relaxed = u"☺️"
smirk = u"😏"
heart_eyes = u"😍"
kissing_heart = u"😘"
kissing_closed_eyes = u"😚"
flushed = u"😳"
relieved = u"😌"
grin = u"😁"
wink = u"😉"
stuck_out_tongue_winking_eye = u"😜"
stuck_out_tongue_closed_eyes = u"😝"
grinning = u"😀"
kissing = u"😗"
kissing_smiling_eyes = u"😙"
stuck_out_tongue = u"😛"
sleeping = u"😴"
worried = u"😟"
frowning = u"😦"
anguished = u"😧"
open_mouth = u"😮"
grimacing = u"😬"
confused = u"😕"
hushed = u"😯"
expressionless = u"😑"
unamused = u"😒"
sweat_smile = u"😅"
sweat = u"😓"
disappointed_relieved = u"😥"
weary = u"😩"
pensive = u"😔"
disappointed = u"😞"
confounded = u"😖"
fearful = u"😨"
cold_sweat = u"😰"
persevere = u"😣"
cry = u"😢"
sob = u"😭"
joy = u"😂"
astonished = u"😲"
scream = u"😱"
tired_face = u"😫"
angry = u"😠"
rage = u"😡"
triumph = u"😤"
sleepy = u"😪"
yum = u"😋"
mask = u"😷"
sunglasses = u"😎"
dizzy_face = u"😵"
imp = u"👿"
smiling_imp = u"😈"
neutral_face = u"😐"
no_mouth = u"😶"
innocent = u"😇"
alien = u"👽"
yellow_heart = u"💛"
blue_heart = u"💙"
purple_heart = u"💜"
heart = u"❤️"
green_heart = u"💚"
broken_heart = u"💔"
heartbeat = u"💓"
heartpulse = u"💗"
two_hearts = u"💕"
revolving_hearts = u"💞"
cupid = u"💘"
sparkling_heart = u"💖"
sparkles = u"✨"
star = u"⭐️"
star2 = u"🌟"
dizzy = u"💫"
collision = u"💥"
anger = u"💢"
heavy_exclamation_mark = u"❗️"
question = u"❓"
grey_exclamation = u"❕"
grey_question = u"❔"
zzz = u"💤"
dash = u"💨"
sweat_drops = u"💦"
notes = u"🎶"
musical_note = u"🎵"
fire = u"🔥"
shit = u"💩"
thumbsup = u"👍"
thumbsdown = u"👎"
ok_hand = u"👌"
facepunch = u"👊"
fist = u"✊"
v = u"✌️"
wave = u"👋"
raised_hand = u"✋"
open_hands = u"👐"
point_up = u"☝️"
point_down = u"👇"
point_left = u"👈"
point_right = u"👉"
raised_hands = u"🙌"
pray = u"🙏"
point_up_2 = u"👆"
clap = u"👏"
muscle = u"💪"
metal = u"🤘"
fu = u"🖕"
walking = u"🚶"
running = u"🏃"
couple = u"👫"
family = u"👪"
two_men_holding_hands = u"👬"
two_women_holding_hands = u"👭"
dancer = u"💃"
dancers = u"👯"
ok_woman = u"🙆"
no_good = u"🙅"
information_desk_person = u"💁"
raising_hand = u"🙋"
bride_with_veil = u"👰"
person_with_pouting_face = u"🙎"
person_frowning = u"🙍"
bow = u"🙇"
couple_with_heart = u"💑"
massage = u"💆"
haircut = u"💇"
nail_care = u"💅"
boy = u"👦"
girl = u"👧"
woman = u"👩"
man = u"👨"
baby = u"👶"
older_woman = u"👵"
older_man = u"👴"
person_with_blond_hair = u"👱"
man_with_gua_pi_mao = u"👲"
man_with_turban = u"👳"
construction_worker = u"👷"
cop = u"👮"
angel = u"👼"
princess = u"👸"
smiley_cat = u"😺"
smile_cat = u"😸"
heart_eyes_cat = u"😻"
kissing_cat = u"😽"
smirk_cat = u"😼"
scream_cat = u"🙀"
crying_cat_face = u"😿"
joy_cat = u"😹"
pouting_cat = u"😾"
japanese_ogre = u"👹"
japanese_goblin = u"👺"
see_no_evil = u"🙈"
hear_no_evil = u"🙉"
speak_no_evil = u"🙊"
guardsman = u"💂"
skull = u"💀"
paw_prints = u"🐾"
lips = u"👄"
kiss = u"💋"
droplet = u"💧"
ear = u"👂"
eyes = u"👀"
nose = u"👃"
tongue = u"👅"
love_letter = u"💌"
bust_in_silhouette = u"👤"
busts_in_silhouette = u"👥"
speech_balloon = u"💬"
thought_balloon = u"💭"
sunny = u"☀️"
umbrella = u"☔️"
cloud = u"☁️"
snowflake = u"❄️"
snowman = u"⛄️"
zap = u"⚡️"
cyclone = u"🌀"
foggy = u"🌁"
ocean = u"🌊"
cat = u"🐱"
dog = u"🐶"
mouse = u"🐭"
hamster = u"🐹"
rabbit = u"🐰"
wolf = u"🐺"
frog = u"🐸"
tiger = u"🐯"
koala = u"🐨"
bear = u"🐻"
pig = u"🐷"
pig_nose = u"🐽"
cow = u"🐮"
boar = u"🐗"
monkey_face = u"🐵"
monkey = u"🐒"
horse = u"🐴"
racehorse = u"🐎"
camel = u"🐫"
sheep = u"🐑"
elephant = u"🐘"
panda_face = u"🐼"
snake = u"🐍"
bird = u"🐦"
baby_chick = u"🐤"
hatched_chick = u"🐥"
hatching_chick = u"🐣"
chicken = u"🐔"
penguin = u"🐧"
turtle = u"🐢"
bug = u"🐛"
honeybee = u"🐝"
ant = u"🐜"
beetle = u"🐞"
snail = u"🐌"
octopus = u"🐙"
tropical_fish = u"🐠"
fish = u"🐟"
whale = u"🐳"
whale2 = u"🐋"
dolphin = u"🐬"
cow2 = u"🐄"
ram = u"🐏"
rat = u"🐀"
water_buffalo = u"🐃"
tiger2 = u"🐅"
rabbit2 = u"🐇"
dragon = u"🐉"
goat = u"🐐"
rooster = u"🐓"
dog2 = u"🐕"
pig2 = u"🐖"
mouse2 = u"🐁"
ox = u"🐂"
dragon_face = u"🐲"
blowfish = u"🐡"
crocodile = u"🐊"
dromedary_camel = u"🐪"
leopard = u"🐆"
cat2 = u"🐈"
poodle = u"🐩"
bouquet = u"💐"
cherry_blossom = u"🌸"
tulip = u"🌷"
four_leaf_clover = u"🍀"
rose = u"🌹"
sunflower = u"🌻"
hibiscus = u"🌺"
maple_leaf = u"🍁"
leaves = u"🍃"
fallen_leaf = u"🍂"
herb = u"🌿"
mushroom = u"🍄"
cactus = u"🌵"
palm_tree = u"🌴"
evergreen_tree = u"🌲"
deciduous_tree = u"🌳"
chestnut = u"🌰"
seedling = u"🌱"
blossom = u"🌼"
ear_of_rice = u"🌾"
shell = u"🐚"
globe_with_meridians = u"🌐"
sun_with_face = u"🌞"
full_moon_with_face = u"🌝"
new_moon_with_face = u"🌚"
new_moon = u"🌑"
waxing_crescent_moon = u"🌒"
first_quarter_moon = u"🌓"
moon = u"🌔"
full_moon = u"🌕"
waning_gibbous_moon = u"🌖"
last_quarter_moon = u"🌗"
waning_crescent_moon = u"🌘"
last_quarter_moon_with_face = u"🌜"
first_quarter_moon_with_face = u"🌛"
earth_africa = u"🌍"
earth_americas = u"🌎"
earth_asia = u"🌏"
volcano = u"🌋"
milky_way = u"🌌"
partly_sunny = u"⛅️"
bamboo = u"🎍"
gift_heart = u"💝"
dolls = u"🎎"
school_satchel = u"🎒"
mortar_board = u"🎓"
flags = u"🎏"
fireworks = u"🎆"
sparkler = u"🎇"
wind_chime = u"🎐"
rice_scene = u"🎑"
jack_o_lantern = u"🎃"
ghost = u"👻"
santa = u"🎅"
christmas_tree = u"🎄"
gift = u"🎁"
bell = u"🔔"
no_bell = u"🔕"
tanabata_tree = u"🎋"
tada = u"🎉"
confetti_ball = u"🎊"
balloon = u"🎈"
crystal_ball = u"🔮"
cd = u"💿"
dvd = u"📀"
floppy_disk = u"💾"
camera = u"📷"
video_camera = u"📹"
movie_camera = u"🎥"
computer = u"💻"
tv = u"📺"
iphone = u"📱"
telephone = u"☎️"
telephone_receiver = u"📞"
pager = u"📟"
fax = u"📠"
minidisc = u"💽"
vhs = u"📼"
sound = u"🔉"
speaker = u"🔈"
mute = u"🔇"
loudspeaker = u"📢"
mega = u"📣"
hourglass = u"⌛️"
hourglass_flowing_sand = u"⏳"
alarm_clock = u"⏰"
watch = u"⌚️"
radio = u"📻"
satellite = u"📡"
loop = u"➿"
mag = u"🔍"
mag_right = u"🔎"
unlock = u"🔓"
lock = u"🔒"
lock_with_ink_pen = u"🔏"
closed_lock_with_key = u"🔐"
key = u"🔑"
bulb = u"💡"
flashlight = u"🔦"
high_brightness = u"🔆"
low_brightness = u"🔅"
electric_plug = u"🔌"
battery = u"🔋"
calling = u"📲"
envelope = u"✉️"
mailbox = u"📫"
postbox = u"📮"
bath = u"🛀"
bathtub = u"🛁"
shower = u"🚿"
toilet = u"🚽"
wrench = u"🔧"
nut_and_bolt = u"🔩"
hammer = u"🔨"
seat = u"💺"
moneybag = u"💰"
yen = u"💴"
dollar = u"💵"
pound = u"💷"
euro = u"💶"
credit_card = u"💳"
money_with_wings = u"💸"
e_mail = u"📧"
inbox_tray = u"📥"
outbox_tray = u"📤"
incoming_envelope = u"📨"
postal_horn = u"📯"
mailbox_closed = u"📪"
mailbox_with_mail = u"📬"
mailbox_with_no_mail = u"📭"
door = u"🚪"
smoking = u"🚬"
bomb = u"💣"
gun = u"🔫"
hocho = u"🔪"
pill = u"💊"
syringe = u"💉"
page_facing_up = u"📄"
page_with_curl = u"📃"
bookmark_tabs = u"📑"
bar_chart = u"📊"
chart_with_upwards_trend = u"📈"
chart_with_downwards_trend = u"📉"
scroll = u"📜"
clipboard = u"📋"
calendar = u"📆"
date = u"📅"
card_index = u"📇"
file_folder = u"📁"
open_file_folder = u"📂"
scissors = u"✂️"
pushpin = u"📌"
paperclip = u"📎"
black_nib = u"✒️"
pencil2 = u"✏️"
straight_ruler = u"📏"
triangular_ruler = u"📐"
closed_book = u"📕"
green_book = u"📗"
blue_book = u"📘"
orange_book = u"📙"
notebook = u"📓"
notebook_with_decorative_cover = u"📔"
ledger = u"📒"
books = u"📚"
bookmark = u"🔖"
name_badge = u"📛"
microscope = u"🔬"
telescope = u"🔭"
newspaper = u"📰"
football = u"🏈"
basketball = u"🏀"
soccer = u"⚽️"
baseball = u"⚾️"
tennis = u"🎾"
_8ball = u"🎱"
rugby_football = u"🏉"
bowling = u"🎳"
golf = u"⛳️"
mountain_bicyclist = u"🚵"
bicyclist = u"🚴"
horse_racing = u"🏇"
snowboarder = u"🏂"
swimmer = u"🏊"
surfer = u"🏄"
ski = u"🎿"
spades = u"♠️"
hearts = u"♥️"
clubs = u"♣️"
diamonds = u"♦️"
gem = u"💎"
ring = u"💍"
trophy = u"🏆"
musical_score = u"🎼"
musical_keyboard = u"🎹"
violin = u"🎻"
space_invader = u"👾"
video_game = u"🎮"
black_joker = u"🃏"
flower_playing_cards = u"🎴"
game_die = u"🎲"
dart = u"🎯"
mahjong = u"🀄️"
clapper = u"🎬"
pencil = u"📝"
book = u"📖"
art = u"🎨"
microphone = u"🎤"
headphones = u"🎧"
trumpet = u"🎺"
saxophone = u"🎷"
guitar = u"🎸"
mans_shoe = u"👞"
sandal = u"👡"
high_heel = u"👠"
lipstick = u"💄"
boot = u"👢"
tshirt = u"👕"
necktie = u"👔"
womans_clothes = u"👚"
dress = u"👗"
running_shirt_with_sash = u"🎽"
jeans = u"👖"
kimono = u"👘"
bikini = u"👙"
ribbon = u"🎀"
tophat = u"🎩"
crown = u"👑"
womans_hat = u"👒"
closed_umbrella = u"🌂"
briefcase = u"💼"
handbag = u"👜"
pouch = u"👝"
purse = u"👛"
eyeglasses = u"👓"
fishing_pole_and_fish = u"🎣"
coffee = u"☕️"
tea = u"🍵"
sake = u"🍶"
baby_bottle = u"🍼"
beer = u"🍺"
beers = u"🍻"
cocktail = u"🍸"
tropical_drink = u"🍹"
wine_glass = u"🍷"
fork_and_knife = u"🍴"
pizza = u"🍕"
hamburger = u"🍔"
fries = u"🍟"
poultry_leg = u"🍗"
meat_on_bone = u"🍖"
spaghetti = u"🍝"
curry = u"🍛"
fried_shrimp = u"🍤"
bento = u"🍱"
sushi = u"🍣"
fish_cake = u"🍥"
rice_ball = u"🍙"
rice_cracker = u"🍘"
rice = u"🍚"
ramen = u"🍜"
stew = u"🍲"
oden = u"🍢"
dango = u"🍡"
egg = u"🥚"
bread = u"🍞"
doughnut = u"🍩"
custard = u"🍮"
icecream = u"🍦"
ice_cream = u"🍨"
shaved_ice = u"🍧"
birthday = u"🎂"
cake = u"🍰"
cookie = u"🍪"
chocolate_bar = u"🍫"
candy = u"🍬"
lollipop = u"🍭"
honey_pot = u"🍯"
apple = u"🍎"
green_apple = u"🍏"
tangerine = u"🍊"
lemon = u"🍋"
cherries = u"🍒"
grapes = u"🍇"
watermelon = u"🍉"
strawberry = u"🍓"
peach = u"🍑"
melon = u"🍈"
banana = u"🍌"
pear = u"🍐"
pineapple = u"🍍"
sweet_potato = u"🍠"
eggplant = u"🍆"
tomato = u"🍅"
corn = u"🌽"
house = u"🏠"
house_with_garden = u"🏡"
school = u"🏫"
office = u"🏢"
post_office = u"🏣"
hospital = u"🏥"
bank = u"🏦"
convenience_store = u"🏪"
love_hotel = u"🏩"
hotel = u"🏨"
wedding = u"💒"
church = u"⛪️"
department_store = u"🏬"
european_post_office = u"🏤"
city_sunrise = u"🌇"
city_sunset = u"🌆"
japanese_castle = u"🏯"
european_castle = u"🏰"
tent = u"⛺️"
factory = u"🏭"
tokyo_tower = u"🗼"
japan = u"🗾"
mount_fuji = u"🗻"
sunrise_over_mountains = u"🌄"
sunrise = u"🌅"
stars = u"🌠"
statue_of_liberty = u"🗽"
bridge_at_night = u"🌉"
carousel_horse = u"🎠"
rainbow = u"🌈"
ferris_wheel = u"🎡"
fountain = u"⛲️"
roller_coaster = u"🎢"
ship = u"🚢"
speedboat = u"🚤"
sailboat = u"⛵️"
rowboat = u"🚣"
anchor = u"⚓️"
rocket = u"🚀"
airplane = u"✈️"
helicopter = u"🚁"
steam_locomotive = u"🚂"
tram = u"🚊"
mountain_railway = u"🚞"
bike = u"🚲"
aerial_tramway = u"🚡"
suspension_railway = u"🚟"
mountain_cableway = u"🚠"
tractor = u"🚜"
blue_car = u"🚙"
oncoming_automobile = u"🚘"
red_car = u"🚗"
taxi = u"🚕"
oncoming_taxi = u"🚖"
articulated_lorry = u"🚛"
bus = u"🚌"
oncoming_bus = u"🚍"
rotating_light = u"🚨"
police_car = u"🚓"
oncoming_police_car = u"🚔"
fire_engine = u"🚒"
ambulance = u"🚑"
minibus = u"🚐"
truck = u"🚚"
train = u"🚋"
station = u"🚉"
train2 = u"🚆"
bullettrain_front = u"🚅"
bullettrain_side = u"🚄"
light_rail = u"🚈"
monorail = u"🚝"
railway_car = u"🚃"
trolleybus = u"🚎"
ticket = u"🎫"
fuelpump = u"⛽️"
vertical_traffic_light = u"🚦"
traffic_light = u"🚥"
warning = u"⚠️"
construction = u"🚧"
beginner = u"🔰"
atm = u"🏧"
slot_machine = u"🎰"
busstop = u"🚏"
barber = u"💈"
hotsprings = u"♨️"
checkered_flag = u"🏁"
crossed_flags = u"🎌"
izakaya_lantern = u"🏮"
moyai = u"🗿"
circus_tent = u"🎪"
performing_arts = u"🎭"
round_pushpin = u"📍"
triangular_flag_on_post = u"🚩"
one = u"1️⃣"
two = u"2️⃣"
three = u"3️⃣"
four = u"4️⃣"
five = u"5️⃣"
six = u"6️⃣"
seven = u"7️⃣"
eight = u"8️⃣"
nine = u"9️⃣"
keycap_ten = u"🔟"
_1234 = u"🔢"
zero = u"0️⃣"
hash = u"#️⃣"
symbols = u"🔣"
arrow_backward = u"◀️"
arrow_down = u"⬇️"
arrow_forward = u"▶️"
arrow_left = u"⬅️"
capital_abcd = u"🔠"
abcd = u"🔡"
abc = u"🔤"
arrow_lower_left = u"↙️"
arrow_lower_right = u"↘️"
arrow_right = u"➡️"
arrow_up = u"⬆️"
arrow_upper_left = u"↖️"
arrow_upper_right = u"↗️"
arrow_double_down = u"⏬"
arrow_double_up = u"⏫"
arrow_down_small = u"🔽"
arrow_heading_down = u"⤵️"
arrow_heading_up = u"⤴️"
leftwards_arrow_with_hook = u"↩️"
arrow_right_hook = u"↪️"
left_right_arrow = u"↔️"
arrow_up_down = u"↕️"
arrow_up_small = u"🔼"
arrows_clockwise = u"🔃"
arrows_counterclockwise = u"🔄"
rewind = u"⏪"
fast_forward = u"⏩"
information_source = u"ℹ️"
ok = u"🆗"
twisted_rightwards_arrows = u"🔀"
repeat = u"🔁"
repeat_one = u"🔂"
new = u"🆕"
top = u"🔝"
up = u"🆙"
cool = u"🆒"
free = u"🆓"
ng = u"🆖"
cinema = u"🎦"
koko = u"🈁"
signal_strength = u"📶"
u5272 = u"🈹"
u5408 = u"🈴"
u55b6 = u"🈺"
u6307 = u"🈯️"
u6708 = u"🈷️"
u6709 = u"🈶"
u6e80 = u"🈵"
u7121 = u"🈚️"
u7533 = u"🈸"
u7a7a = u"🈳"
u7981 = u"🈲"
sa = u"🈂️"
restroom = u"🚻"
mens = u"🚹"
womens = u"🚺"
baby_symbol = u"🚼"
no_smoking = u"🚭"
parking = u"🅿️"
wheelchair = u"♿️"
metro = u"🚇"
baggage_claim = u"🛄"
accept = u"🉑"
wc = u"🚾"
potable_water = u"🚰"
put_litter_in_its_place = u"🚮"
secret = u"㊙️"
congratulations = u"㊗️"
m = u"Ⓜ️"
passport_control = u"🛂"
left_luggage = u"🛅"
customs = u"🛃"
ideograph_advantage = u"🉐"
cl = u"🆑"
sos = u"🆘"
id = u"🆔"
no_entry_sign = u"🚫"
underage = u"🔞"
no_mobile_phones = u"📵"
do_not_litter = u"🚯"
non_potable_water = u"🚱"
no_bicycles = u"🚳"
no_pedestrians = u"🚷"
children_crossing = u"🚸"
no_entry = u"⛔️"
eight_spoked_asterisk = u"✳️"
eight_pointed_black_star = u"✴️"
heart_decoration = u"💟"
vs = u"🆚"
vibration_mode = u"📳"
mobile_phone_off = u"📴"
chart = u"💹"
currency_exchange = u"💱"
aries = u"♈️"
taurus = u"♉️"
gemini = u"♊️"
cancer = u"♋️"
leo = u"♌️"
virgo = u"♍️"
libra = u"♎️"
scorpius = u"♏️"
sagittarius = u"♐️"
capricorn = u"♑️"
aquarius = u"♒️"
pisces = u"♓️"
ophiuchus = u"⛎"
six_pointed_star = u"🔯"
negative_squared_cross_mark = u"❎"
a = u"🅰️"
b = u"🅱️"
ab = u"🆎"
o2 = u"🅾️"
diamond_shape_with_a_dot_inside = u"💠"
recycle = u"♻️"
end = u"🔚"
on = u"🔛"
soon = u"🔜"
clock1 = u"🕐"
clock130 = u"🕜"
clock10 = u"🕙"
clock1030 = u"🕥"
clock11 = u"🕚"
clock1130 = u"🕦"
clock12 = u"🕛"
clock1230 = u"🕧"
clock2 = u"🕑"
clock230 = u"🕝"
clock3 = u"🕒"
clock330 = u"🕞"
clock4 = u"🕓"
clock430 = u"🕟"
clock5 = u"🕔"
clock530 = u"🕠"
clock6 = u"🕕"
clock630 = u"🕡"
clock7 = u"🕖"
clock730 = u"🕢"
clock8 = u"🕗"
clock830 = u"🕣"
clock9 = u"🕘"
clock930 = u"🕤"
heavy_dollar_sign = u"💲"
copyright = u"©️"
registered = u"®️"
tm = u"™️"
x = u"❌"
bangbang = u"‼️"
interrobang = u"⁉️"
o = u"⭕️"
heavy_multiplication_x = u"✖️"
heavy_plus_sign = u"➕"
heavy_minus_sign = u"➖"
heavy_division_sign = u"➗"
white_flower = u"💮"
_100 = u"💯"
heavy_check_mark = u"✔️"
ballot_box_with_check = u"☑️"
radio_button = u"🔘"
link = u"🔗"
curly_loop = u"➰"
wavy_dash = u"〰️"
part_alternation_mark = u"〽️"
trident = u"🔱"
white_check_mark = u"✅"
black_square_button = u"🔲"
white_square_button = u"🔳"
black_circle = u"⚫️"
white_circle = u"⚪️"
red_circle = u"🔴"
large_blue_circle = u"🔵"
large_blue_diamond = u"🔷"
large_orange_diamond = u"🔶"
small_blue_diamond = u"🔹"
small_orange_diamond = u"🔸"
small_red_triangle = u"🔺"
small_red_triangle_down = u"🔻"
| 21.261876
| 42
| 0.477601
| 2,942
| 17,456
| 2.990823
| 0.56968
| 0.007387
| 0.005114
| 0.00591
| 0.004546
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010665
| 0.32854
| 17,456
| 820
| 43
| 21.287805
| 0.66078
| 0
| 0
| 0
| 0
| 0
| 0.053678
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.00122
| 0
| 0
| 1
| 0.00122
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08f00026b0a8d4a6cccad1a88563ce7a5b83f749
| 1,522
|
py
|
Python
|
src/config.py
|
DQiaole/ZITS
|
5f7a060167790789d5e29a3d14d3c2ef8a34e765
|
[
"Apache-2.0"
] | 40
|
2022-03-02T06:12:43.000Z
|
2022-03-30T02:17:02.000Z
|
src/config.py
|
DQiaole/ZITS
|
5f7a060167790789d5e29a3d14d3c2ef8a34e765
|
[
"Apache-2.0"
] | 6
|
2022-03-06T03:53:14.000Z
|
2022-03-31T06:36:34.000Z
|
src/config.py
|
DQiaole/ZITS
|
5f7a060167790789d5e29a3d14d3c2ef8a34e765
|
[
"Apache-2.0"
] | 5
|
2022-03-04T06:39:44.000Z
|
2022-03-28T04:58:32.000Z
|
import os
import yaml
class Config(dict):
def __init__(self, config_path):
with open(config_path, 'r') as f:
self._yaml = f.read()
self._dict = yaml.load(self._yaml, Loader=yaml.FullLoader)
self._dict['PATH'] = os.path.dirname(config_path)
def __getattr__(self, name):
if self._dict.get(name) is not None:
return self._dict[name]
if DEFAULT_CONFIG.get(name) is not None:
return DEFAULT_CONFIG[name]
return None
def print(self):
print('Model configurations:')
print('---------------------------------')
print(self._yaml)
print('')
print('---------------------------------')
print('')
DEFAULT_CONFIG = {
'SEED': 10, # random seed
'BATCH_SIZE': 8, # input batch size for training
'INPUT_SIZE': 256, # input image size for training 0 for original size
'MAX_ITERS': 1e6, # maximum number of iterations to train the model
'SAVE_INTERVAL': 1000, # how many iterations to wait before saving model (0: never)
'SAMPLE_INTERVAL': 1000, # how many iterations to wait before sampling (0: never)
'SAMPLE_SIZE': 12, # number of images to sample
'EVAL_INTERVAL': 0, # how many iterations to wait before model evaluation (0: never)
'LOG_INTERVAL': 10, # how many iterations to wait before logging training status (0: never)
}
| 35.395349
| 107
| 0.557819
| 180
| 1,522
| 4.555556
| 0.405556
| 0.073171
| 0.082927
| 0.092683
| 0.22439
| 0.22439
| 0.1
| 0.1
| 0
| 0
| 0
| 0.024598
| 0.305519
| 1,522
| 42
| 108
| 36.238095
| 0.751183
| 0.271353
| 0
| 0.125
| 0
| 0
| 0.171975
| 0.060055
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0
| 0.0625
| 0
| 0.28125
| 0.21875
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08f0432c93f8f390bd7d7a71479785cb462167ba
| 8,786
|
py
|
Python
|
examples/acados_python/test/generate_c_code.py
|
besticka/acados
|
32767a19aed01a15b5e7b83ebc6ddbd669a47954
|
[
"BSD-2-Clause"
] | null | null | null |
examples/acados_python/test/generate_c_code.py
|
besticka/acados
|
32767a19aed01a15b5e7b83ebc6ddbd669a47954
|
[
"BSD-2-Clause"
] | null | null | null |
examples/acados_python/test/generate_c_code.py
|
besticka/acados
|
32767a19aed01a15b5e7b83ebc6ddbd669a47954
|
[
"BSD-2-Clause"
] | null | null | null |
#
# Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,
# Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,
# Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,
# Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl
#
# This file is part of acados.
#
# The 2-Clause BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.;
#
from acados_template import *
import acados_template as at
from export_ode_model import *
import numpy as np
import scipy.linalg
from ctypes import *
import json
import argparse
# set to 'True' to generate test data
GENERATE_DATA = False
LOCAL_TEST = False
TEST_TOL = 1e-8
if LOCAL_TEST is True:
FORMULATION = 'LS'
SOLVER_TYPE = 'SQP_RTI'
QP_SOLVER = 'FULL_CONDENSING_QPOASES'
INTEGRATOR_TYPE = 'IRK'
else:
parser = argparse.ArgumentParser(description='test Python interface on pendulum example.')
parser.add_argument('--FORMULATION', dest='FORMULATION',
default='LS',
help='FORMULATION: linear least-squares (LS) or nonlinear \
least-squares (NLS) (default: LS)')
parser.add_argument('--QP_SOLVER', dest='QP_SOLVER',
default='PARTIAL_CONDENSING_HPIPM',
help='QP_SOLVER: PARTIAL_CONDENSING_HPIPM, FULL_CONDENSING_HPIPM, ' \
'FULL_CONDENSING_HPIPM (default: PARTIAL_CONDENSING_HPIPM)')
parser.add_argument('--INTEGRATOR_TYPE', dest='INTEGRATOR_TYPE',
default='ERK',
help='INTEGRATOR_TYPE: explicit (ERK) or implicit (IRK) ' \
' Runge-Kutta (default: ERK)')
parser.add_argument('--SOLVER_TYPE', dest='SOLVER_TYPE',
default='SQP_RTI',
help='SOLVER_TYPE: (full step) sequential quadratic programming (SQP) or ' \
' real-time iteration (SQP-RTI) (default: SQP-RTI)')
args = parser.parse_args()
FORMULATION = args.FORMULATION
FORMULATION_values = ['LS', 'NLS']
if FORMULATION not in FORMULATION_values:
raise Exception('Invalid unit test value {} for parameter FORMULATION. Possible values are' \
' {}. Exiting.'.format(FORMULATION, FORMULATION_values))
QP_SOLVER = args.QP_SOLVER
QP_SOLVER_values = ['PARTIAL_CONDENSING_HPIPM', 'FULL_CONDENSING_HPIPM', 'FULL_CONDENSING_QPOASES']
if QP_SOLVER not in QP_SOLVER_values:
raise Exception('Invalid unit test value {} for parameter QP_SOLVER. Possible values are' \
' {}. Exiting.'.format(QP_SOLVER, QP_SOLVER_values))
INTEGRATOR_TYPE = args.INTEGRATOR_TYPE
INTEGRATOR_TYPE_values = ['ERK', 'IRK']
if INTEGRATOR_TYPE not in INTEGRATOR_TYPE:
raise Exception('Invalid unit test value {} for parameter INTEGRATOR_TYPE. Possible values are' \
' {}. Exiting.'.format(INTEGRATOR_TYPE, INTEGRATOR_TYPE_values))
SOLVER_TYPE = args.SOLVER_TYPE
SOLVER_TYPE_values = ['SQP', 'SQP-RTI']
if SOLVER_TYPE not in SOLVER_TYPE:
raise Exception('Invalid unit test value {} for parameter SOLVER_TYPE. Possible values are' \
' {}. Exiting.'.format(SOLVER_TYPE, SOLVER_TYPE_values))
# print test setting
print("Running test with:\n\tformulation:", FORMULATION, "\n\tqp solver: ", QP_SOLVER,\
"\n\tintergrator: ", INTEGRATOR_TYPE, "\n\tsolver: ", SOLVER_TYPE)
# create render arguments
ocp = acados_ocp_nlp()
# export model
model = export_ode_model()
# set model_name
ocp.model = model
Tf = 2.0
nx = model.x.size()[0]
nu = model.u.size()[0]
ny = nx + nu
ny_e = nx
N = 50
# set ocp_nlp_dimensions
nlp_dims = ocp.dims
nlp_dims.nx = nx
nlp_dims.ny = ny
nlp_dims.ny_e = ny_e
nlp_dims.nbx = 0
nlp_dims.nbu = nu
nlp_dims.nu = model.u.size()[0]
nlp_dims.N = N
# set weighting matrices
nlp_cost = ocp.cost
if FORMULATION == 'LS':
nlp_cost.cost_type = 'LINEAR_LS'
nlp_cost.cost_type_e = 'LINEAR_LS'
elif FORMULATION == 'NLS':
nlp_cost.cost_type = 'NONLINEAR_LS'
nlp_cost.cost_type_e = 'NONLINEAR_LS'
else:
raise Exception('Unknown FORMULATION. Possible values are \'LS\' and \'NLS\'.')
Q = np.eye(4)
Q[0,0] = 1e0
Q[1,1] = 1e2
Q[2,2] = 1e-3
Q[3,3] = 1e-2
R = np.eye(1)
R[0,0] = 1e0
unscale = N/Tf
Q = Q * unscale
R = R * unscale
if FORMULATION == 'NLS':
nlp_cost.W = scipy.linalg.block_diag(R, Q)
else:
nlp_cost.W = scipy.linalg.block_diag(Q, R)
nlp_cost.W_e = Q/unscale
Vx = np.zeros((ny, nx))
Vx[0,0] = 1.0
Vx[1,1] = 1.0
Vx[2,2] = 1.0
Vx[3,3] = 1.0
nlp_cost.Vx = Vx
Vu = np.zeros((ny, nu))
Vu[4,0] = 1.0
nlp_cost.Vu = Vu
Vx_e = np.zeros((ny_e, nx))
Vx_e[0,0] = 1.0
Vx_e[1,1] = 1.0
Vx_e[2,2] = 1.0
Vx_e[3,3] = 1.0
nlp_cost.Vx_e = Vx_e
if FORMULATION == 'NLS':
x = SX.sym('x', 4, 1)
u = SX.sym('u', 1, 1)
ocp.cost_r.expr = vertcat(u, x)
ocp.cost_r.x = x
ocp.cost_r.u = u
ocp.cost_r.name = 'lin_res'
ocp.cost_r.ny = nx + nu
ocp.cost_r_e.expr = x
ocp.cost_r_e.x = x
ocp.cost_r_e.name = 'lin_res'
ocp.cost_r_e.ny = nx
nlp_cost.yref = np.zeros((ny, ))
nlp_cost.yref_e = np.zeros((ny_e, ))
# setting bounds
Fmax = 2.0
nlp_con = ocp.constraints
nlp_con.lbu = np.array([-Fmax])
nlp_con.ubu = np.array([+Fmax])
nlp_con.x0 = np.array([0.0, 3.14, 0.0, 0.0])
nlp_con.idxbu = np.array([0])
# set QP solver
ocp.solver_options.qp_solver = QP_SOLVER
ocp.solver_options.hessian_approx = 'GAUSS_NEWTON'
ocp.solver_options.integrator_type = INTEGRATOR_TYPE
ocp.solver_options.sim_method_num_stages = 2
ocp.solver_options.sim_method_num_steps = 5
# set prediction horizon
ocp.solver_options.tf = Tf
ocp.solver_options.nlp_solver_type = SOLVER_TYPE
# set header path
ocp.acados_include_path = '../../../../include'
ocp.acados_lib_path = '../../../../lib'
acados_solver = generate_solver(ocp, json_file = 'acados_ocp.json')
Nsim = 100
simX = np.ndarray((Nsim, nx))
simU = np.ndarray((Nsim, nu))
for i in range(Nsim):
status = acados_solver.solve()
if status !=0:
print("acados failure! Exiting. \n")
sys.exit(status)
# get solution
x0 = acados_solver.get(0, "x")
u0 = acados_solver.get(0, "u")
for j in range(nx):
simX[i,j] = x0[j]
for j in range(nu):
simU[i,j] = u0[j]
# update initial condition
x0 = acados_solver.get(1, "x")
acados_solver.set(0, "lbx", x0)
acados_solver.set(0, "ubx", x0)
# update reference
for j in range(N):
acados_solver.set(j, "yref", np.array([0, 0, 0, 0, 0]))
acados_solver.set(N, "yref", np.array([0, 0, 0, 0]))
# dump result to JSON file for unit testing
test_file_name = 'test_data/generate_c_code_out_' + FORMULATION + '_' + QP_SOLVER + '_' + \
INTEGRATOR_TYPE + '_' + SOLVER_TYPE + '.json'
if GENERATE_DATA:
with open(test_file_name, 'w') as f:
json.dump({"simX": simX.tolist(), "simU": simU.tolist()}, f, indent=4, sort_keys=True)
else:
with open(test_file_name, 'r') as f:
test_data = json.load(f)
simX_error = np.linalg.norm(test_data['simX'] - simX)
simU_error = np.linalg.norm(test_data['simU'] - simU)
if simX_error > TEST_TOL or simU_error > TEST_TOL:
raise Exception("Python acados test failure with accuracies {:.2E} and {:.2E} ({:.2E} required) on pendulum example! Exiting.\n".format(simX_error, simU_error, TEST_TOL))
else:
print('Python test passed with accuracy {:.2E}'.format(max(simU_error, simX_error)))
| 31.604317
| 178
| 0.669019
| 1,312
| 8,786
| 4.307165
| 0.260671
| 0.024067
| 0.012741
| 0.020527
| 0.236949
| 0.142984
| 0.099451
| 0.079632
| 0.060166
| 0.024067
| 0
| 0.01787
| 0.210221
| 8,786
| 277
| 179
| 31.718412
| 0.796512
| 0.214546
| 0
| 0.039548
| 0
| 0.00565
| 0.213629
| 0.037794
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.00565
| 0.045198
| 0
| 0.045198
| 0.016949
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08f3eae9e91dde600e2781b52aa83909fff87587
| 1,560
|
py
|
Python
|
prob_h.py
|
ShinjiKatoA16/icpc2017ucsy
|
de1954620036e8025b7b4c1b469e6b8c57af212e
|
[
"MIT"
] | null | null | null |
prob_h.py
|
ShinjiKatoA16/icpc2017ucsy
|
de1954620036e8025b7b4c1b469e6b8c57af212e
|
[
"MIT"
] | null | null | null |
prob_h.py
|
ShinjiKatoA16/icpc2017ucsy
|
de1954620036e8025b7b4c1b469e6b8c57af212e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''
2017 ICPC at UCSY
Problem-H: Sum Square
'''
import sys
class TestCase():
pass
def parse_tc(tc):
'''
Input: Test Case
Update:
Return: None
'''
x = list(map(int,tc.infile.readline().split()))
tc.dataset = x[0]
tc.max_num = x[1]
tc.base = x[2]
tc.a0 = x[3]
return
def ssd(b, n):
val = 0
while n > 0:
val += (n % b) ** 2
n //= b
# print(b, n, val)
return val
def prt_list(_list):
while len(_list) >= 20:
s = ' '.join(map(str,_list[:20]))
print(s)
_list = _list[20:]
if len(_list):
s = ' '.join(map(str, _list))
print(s)
return
def solve(tc):
'''
Input: Test Case
Return: None
'''
parse_tc(tc)
ak = tc.a0
ssd_list = [ak]
for i in range(tc.max_num):
ssd_val = ssd(tc.base, ak)
if ssd_val in ssd_list:
index_k = ssd_list.index(ssd_val)
print(tc.dataset, len(ssd_list)+1, len(ssd_list)-index_k)
ssd_list.append(ssd_val)
prt_list(ssd_list[index_k:])
break
ssd_list.append(ssd_val)
ak = ssd_val
else:
print(tc.dataset, tc.max_num, 0)
print(ak)
return
##
## Main routine
##
if __name__ == '__main__':
tc = TestCase()
tc.infile = sys.stdin
tc.t = int(tc.infile.readline())
for i in range(tc.t):
solve(tc)
if tc.infile != sys.stdin:
tc.infile.close()
| 16.956522
| 69
| 0.501923
| 225
| 1,560
| 3.306667
| 0.328889
| 0.075269
| 0.064516
| 0.052419
| 0.219086
| 0.053763
| 0
| 0
| 0
| 0
| 0
| 0.02266
| 0.349359
| 1,560
| 91
| 70
| 17.142857
| 0.710345
| 0.114744
| 0
| 0.137255
| 0
| 0
| 0.007722
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078431
| false
| 0.019608
| 0.019608
| 0
| 0.196078
| 0.098039
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08f5c575bcbcd0ee74f875b6fd32a403f396576c
| 6,819
|
py
|
Python
|
neuralpredictors/layers/readouts/factorized.py
|
kellirestivo/neuralpredictors
|
57205a90d2e3daa5f8746c6ef6170be9e35cb5f5
|
[
"MIT"
] | 9
|
2020-11-26T18:22:32.000Z
|
2022-01-22T15:51:52.000Z
|
neuralpredictors/layers/readouts/factorized.py
|
kellirestivo/neuralpredictors
|
57205a90d2e3daa5f8746c6ef6170be9e35cb5f5
|
[
"MIT"
] | 60
|
2020-10-21T15:32:28.000Z
|
2022-02-25T10:38:16.000Z
|
neuralpredictors/layers/readouts/factorized.py
|
mohammadbashiri/neuralpredictors
|
8e60c9ce91f83e3dcaa1b3dbe4422e1509ccbd5f
|
[
"MIT"
] | 21
|
2020-10-21T09:29:17.000Z
|
2022-02-07T10:04:46.000Z
|
import torch
from torch import nn as nn
import numpy as np
from .base import Readout
class FullFactorized2d(Readout):
"""
Factorized fully connected layer. Weights are a sum of outer products between a spatial filter and a feature vector.
"""
def __init__(
self,
in_shape,
outdims,
bias,
normalize=True,
init_noise=1e-3,
constrain_pos=False,
positive_weights=False,
shared_features=None,
mean_activity=None,
spatial_and_feature_reg_weight=1.0,
gamma_readout=None, # depricated, use feature_reg_weight instead
**kwargs,
):
super().__init__()
c, w, h = in_shape
self.in_shape = in_shape
self.outdims = outdims
self.positive_weights = positive_weights
self.constrain_pos = constrain_pos
self.init_noise = init_noise
self.normalize = normalize
self.mean_activity = mean_activity
self.spatial_and_feature_reg_weight = self.resolve_deprecated_gamma_readout(
spatial_and_feature_reg_weight, gamma_readout
)
self._original_features = True
self.initialize_features(**(shared_features or {}))
self.spatial = nn.Parameter(torch.Tensor(self.outdims, w, h))
if bias:
bias = nn.Parameter(torch.Tensor(outdims))
self.register_parameter("bias", bias)
else:
self.register_parameter("bias", None)
self.initialize(mean_activity)
@property
def shared_features(self):
return self._features
@property
def features(self):
if self._shared_features:
return self.scales * self._features[self.feature_sharing_index, ...]
else:
return self._features
@property
def weight(self):
if self.positive_weights:
self.features.data.clamp_min_(0)
n = self.outdims
c, w, h = self.in_shape
return self.normalized_spatial.view(n, 1, w, h) * self.features.view(n, c, 1, 1)
@property
def normalized_spatial(self):
"""
Normalize the spatial mask
"""
if self.normalize:
norm = self.spatial.pow(2).sum(dim=1, keepdim=True)
norm = norm.sum(dim=2, keepdim=True).sqrt().expand_as(self.spatial) + 1e-6
weight = self.spatial / norm
else:
weight = self.spatial
if self.constrain_pos:
weight.data.clamp_min_(0)
return weight
def regularizer(self, reduction="sum", average=None):
return self.l1(reduction=reduction, average=average) * self.spatial_and_feature_reg_weight
def l1(self, reduction="sum", average=None):
reduction = self.resolve_reduction_method(reduction=reduction, average=average)
if reduction is None:
raise ValueError("Reduction of None is not supported in this regularizer")
n = self.outdims
c, w, h = self.in_shape
ret = (
self.normalized_spatial.view(self.outdims, -1).abs().sum(dim=1, keepdim=True)
* self.features.view(self.outdims, -1).abs().sum(dim=1)
).sum()
if reduction == "mean":
ret = ret / (n * c * w * h)
return ret
def initialize(self, mean_activity=None):
"""
Initializes the mean, and sigma of the Gaussian readout along with the features weights
"""
if mean_activity is None:
mean_activity = self.mean_activity
self.spatial.data.normal_(0, self.init_noise)
self._features.data.normal_(0, self.init_noise)
if self._shared_features:
self.scales.data.fill_(1.0)
if self.bias is not None:
self.initialize_bias(mean_activity=mean_activity)
def initialize_features(self, match_ids=None, shared_features=None):
"""
The internal attribute `_original_features` in this function denotes whether this instance of the FullGuassian2d
learns the original features (True) or if it uses a copy of the features from another instance of FullGaussian2d
via the `shared_features` (False). If it uses a copy, the feature_l1 regularizer for this copy will return 0
"""
c, w, h = self.in_shape
if match_ids is not None:
assert self.outdims == len(match_ids)
n_match_ids = len(np.unique(match_ids))
if shared_features is not None:
assert shared_features.shape == (
n_match_ids,
c,
), f"shared features need to have shape ({n_match_ids}, {c})"
self._features = shared_features
self._original_features = False
else:
self._features = nn.Parameter(
torch.Tensor(n_match_ids, c)
) # feature weights for each channel of the core
self.scales = nn.Parameter(torch.Tensor(self.outdims, 1)) # feature weights for each channel of the core
_, sharing_idx = np.unique(match_ids, return_inverse=True)
self.register_buffer("feature_sharing_index", torch.from_numpy(sharing_idx))
self._shared_features = True
else:
self._features = nn.Parameter(torch.Tensor(self.outdims, c)) # feature weights for each channel of the core
self._shared_features = False
def forward(self, x, shift=None):
if shift is not None:
raise NotImplementedError("shift is not implemented for this readout")
if self.constrain_pos:
self.features.data.clamp_min_(0)
N, c, w, h = x.size()
c_in, w_in, h_in = self.in_shape
if (c_in, w_in, h_in) != (c, w, h):
raise ValueError("the specified feature map dimension is not the readout's expected input dimension")
y = torch.einsum("ncwh,owh->nco", x, self.normalized_spatial)
y = torch.einsum("nco,oc->no", y, self.features)
if self.bias is not None:
y = y + self.bias
return y
def __repr__(self):
c, w, h = self.in_shape
r = self.__class__.__name__ + " (" + "{} x {} x {}".format(c, w, h) + " -> " + str(self.outdims) + ")"
if self.bias is not None:
r += " with bias"
if self._shared_features:
r += ", with {} features".format("original" if self._original_features else "shared")
if self.normalize:
r += ", normalized"
else:
r += ", unnormalized"
for ch in self.children():
r += " -> " + ch.__repr__() + "\n"
return r
# Classes for backwards compatibility
class SpatialXFeatureLinear(FullFactorized2d):
pass
class FullSXF(FullFactorized2d):
pass
| 35.889474
| 120
| 0.60698
| 850
| 6,819
| 4.676471
| 0.208235
| 0.049308
| 0.006792
| 0.027673
| 0.223648
| 0.161006
| 0.088553
| 0.056604
| 0.034214
| 0.021132
| 0
| 0.006892
| 0.297844
| 6,819
| 189
| 121
| 36.079365
| 0.823308
| 0.114533
| 0
| 0.217687
| 0
| 0
| 0.065228
| 0.00354
| 0
| 0
| 0
| 0
| 0.013605
| 1
| 0.07483
| false
| 0.013605
| 0.027211
| 0.013605
| 0.183673
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08f7015d2835dcc1e926fd4acbcfff51249816e9
| 1,186
|
py
|
Python
|
app/main/views.py
|
josphat-otieno/news-app
|
e6ff307230bd2cab787489fca4850004cd9bdbd0
|
[
"MIT"
] | null | null | null |
app/main/views.py
|
josphat-otieno/news-app
|
e6ff307230bd2cab787489fca4850004cd9bdbd0
|
[
"MIT"
] | null | null | null |
app/main/views.py
|
josphat-otieno/news-app
|
e6ff307230bd2cab787489fca4850004cd9bdbd0
|
[
"MIT"
] | 1
|
2022-02-28T22:33:33.000Z
|
2022-02-28T22:33:33.000Z
|
from flask import render_template,request, redirect, url_for
from . import main
from ..requests import get_articles, get_news_sources,get_top_headlines, get_news_category
@main.route('/')
def index():
'''
view root function that returns the idex page and its data
'''
title="Welcome to your favorite news app"
message='Read your favorite news here'
news_sources=get_news_sources('sources')
top_headlines = get_top_headlines()
return render_template('index.html', title=title, message=message, sources=news_sources,top_headlines=top_headlines)
@main.route('/article/<id>')
def articles(id):
'''function to dsiplay articls page and its data
'''
articles = get_articles(id)
title = 'trending articles'
return render_template('article.html' ,articles=articles, title = title)
@main.route('/categories/<category_name>')
def category(category_name):
'''
function to return the categories.html page and its content
'''
category = get_news_category(category_name)
title = f'{category_name}'
cat = category_name
return render_template('categories.html',title = title,category = category, category_name=cat)
| 34.882353
| 120
| 0.729342
| 156
| 1,186
| 5.352564
| 0.339744
| 0.086228
| 0.035928
| 0.033533
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164418
| 1,186
| 34
| 121
| 34.882353
| 0.842583
| 0.142496
| 0
| 0
| 0
| 0
| 0.181263
| 0.027495
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e92c270410556137345bdc66663f957e85d9d78
| 937
|
py
|
Python
|
notebook/pypdf2_merge_page.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 174
|
2018-05-30T21:14:50.000Z
|
2022-03-25T07:59:37.000Z
|
notebook/pypdf2_merge_page.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 5
|
2019-08-10T03:22:02.000Z
|
2021-07-12T20:31:17.000Z
|
notebook/pypdf2_merge_page.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 53
|
2018-04-27T05:26:35.000Z
|
2022-03-25T07:59:37.000Z
|
import PyPDF2
merger = PyPDF2.PdfFileMerger()
merger.append('data/src/pdf/sample1.pdf', pages=(0, 1))
merger.append('data/src/pdf/sample2.pdf', pages=(2, 4))
merger.merge(2, 'data/src/pdf/sample3.pdf', pages=(0, 3, 2))
merger.write('data/temp/sample_merge_page.pdf')
merger.close()
merger = PyPDF2.PdfFileMerger()
merger.append('data/src/pdf/sample1.pdf', pages=PyPDF2.pagerange.PageRange('-1'))
merger.append('data/src/pdf/sample2.pdf', pages=PyPDF2.pagerange.PageRange('2:'))
merger.merge(2, 'data/src/pdf/sample3.pdf', pages=PyPDF2.pagerange.PageRange('::-1'))
merger.write('data/temp/sample_merge_pagerange.pdf')
merger.close()
reader1 = PyPDF2.PdfFileReader('data/src/pdf/sample1.pdf')
reader2 = PyPDF2.PdfFileReader('data/src/pdf/sample2.pdf')
writer = PyPDF2.PdfFileWriter()
writer.addPage(reader1.getPage(0))
writer.addPage(reader2.getPage(2))
with open('data/temp/sample_merge_wr.pdf', 'wb') as f:
writer.write(f)
| 30.225806
| 85
| 0.741729
| 140
| 937
| 4.921429
| 0.264286
| 0.081277
| 0.11611
| 0.110305
| 0.703919
| 0.555878
| 0.477504
| 0.397678
| 0.397678
| 0.179971
| 0
| 0.039954
| 0.065101
| 937
| 30
| 86
| 31.233333
| 0.746575
| 0
| 0
| 0.2
| 0
| 0
| 0.318036
| 0.307364
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.05
| 0
| 0.05
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e93e3456bdf96692c3deeb42d3cc140eb248959
| 1,983
|
py
|
Python
|
examples/nlp/bert_squad_pytorch/data.py
|
gh-determined-ai/determined
|
9a1ab33a3a356b69681b3351629fef4ab98ddb56
|
[
"Apache-2.0"
] | 1,729
|
2020-04-27T17:36:40.000Z
|
2022-03-31T05:48:39.000Z
|
examples/nlp/bert_squad_pytorch/data.py
|
ChrisW09/determined
|
5c37bfe9cfcc69174ba29a3f1a115c3e9e3632e0
|
[
"Apache-2.0"
] | 1,940
|
2020-04-27T17:34:14.000Z
|
2022-03-31T23:02:28.000Z
|
examples/nlp/bert_squad_pytorch/data.py
|
ChrisW09/determined
|
5c37bfe9cfcc69174ba29a3f1a115c3e9e3632e0
|
[
"Apache-2.0"
] | 214
|
2020-04-27T19:57:28.000Z
|
2022-03-29T08:17:16.000Z
|
from transformers.data.processors.squad import SquadV1Processor, SquadV2Processor
from transformers import squad_convert_examples_to_features
import urllib.request
import os
def load_and_cache_examples(data_dir: str, tokenizer, task, max_seq_length, doc_stride, max_query_length, evaluate=False):
if (task == "SQuAD1.1"):
train_url = "https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json"
validation_url = "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json"
train_file = "train-v1.1.json"
validation_file = "dev-v1.1.json"
processor = SquadV1Processor()
elif (task == "SQuAD2.0"):
train_url = "https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json"
validation_url = "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json"
train_file = "train-v2.0.json"
validation_file = "dev-v2.0.json"
processor = SquadV2Processor()
else:
raise NameError("Incompatible dataset detected")
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if evaluate:
with urllib.request.urlopen(validation_url) as url:
with open(data_dir + "/" + validation_file, 'w') as f:
f.write(url.read().decode())
examples = processor.get_dev_examples(data_dir, filename=validation_file)
else:
with urllib.request.urlopen(train_url) as url:
with open(data_dir + "/" + train_file, 'w') as f:
f.write(url.read().decode())
examples = processor.get_train_examples(data_dir, filename=train_file)
features, dataset = squad_convert_examples_to_features(
examples=examples,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=not evaluate,
return_dataset="pt",
)
return dataset, examples, features
| 44.066667
| 122
| 0.670701
| 251
| 1,983
| 5.087649
| 0.302789
| 0.038371
| 0.05325
| 0.072044
| 0.440877
| 0.292874
| 0.292874
| 0.256852
| 0.256852
| 0.256852
| 0
| 0.015474
| 0.217852
| 1,983
| 44
| 123
| 45.068182
| 0.807866
| 0
| 0
| 0.097561
| 0
| 0.097561
| 0.185073
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0
| 0.097561
| 0
| 0.146341
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e9b7d58a68d506fe7a43a816c3f565b193865ec
| 10,030
|
py
|
Python
|
crawler_shopee.py
|
HariWu1995/ecommerce_crawlers
|
578957dbbce2914f8af16c5f21c6529591a9f1d4
|
[
"CC0-1.0"
] | null | null | null |
crawler_shopee.py
|
HariWu1995/ecommerce_crawlers
|
578957dbbce2914f8af16c5f21c6529591a9f1d4
|
[
"CC0-1.0"
] | null | null | null |
crawler_shopee.py
|
HariWu1995/ecommerce_crawlers
|
578957dbbce2914f8af16c5f21c6529591a9f1d4
|
[
"CC0-1.0"
] | null | null | null |
import os
import sys
import time
from tqdm import tqdm as print_progress
import csv
import json
import logging
import numpy as np
import pandas as pd
import random
import cv2
from PIL import Image
from matplotlib import pyplot as plt
import re
import requests
from io import BytesIO
from bs4 import BeautifulSoup as BS
from urllib import request, response
from selenium import webdriver
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.common.proxy import *
from selenium.common.exceptions import *
import sqlite3 as sqllib
from sql_commands import *
from driver_utils import *
from utils import *
working_dir = os.path.dirname(__file__)
# Define global variables
page_url = 'https://www.shopee.vn'
data_source = 'shopee'
def crawl_all_categories(driver, first_time: bool=False):
driver.get(page_url)
# Scroll down to load all page
simulate_scroll(driver, 5, 1)
# Crawl
categories = []
categories_groups = driver.find_elements_by_css_selector('[class="home-category-list__group"]')
for cat_group in categories_groups:
categories_raw = cat_group.find_elements_by_css_selector('[class="home-category-list__category-grid"]')
for cat_raw in categories_raw:
cat_title = cat_raw.find_element_by_css_selector('[class="vvKCN3"]')
category_info = [
cat_title.get_attribute("innerHTML").replace('&', '&'),
cat_raw.get_attribute('href'),
data_source
]
if first_time:
insert_new_category(category_info)
categories.append(category_info)
return categories
def crawl_single_category(driver, category_url: str, category_id: int):
print(f"\n\n\nLoading\n\t{category_url}")
driver.get(category_url)
# Scroll down to load all page
simulate_scroll(driver, 11, 0, 0.69, 1.96)
random_sleep()
category_url += '/?page={}'
all_products = []
page_id, max_pages = 1, 49
while page_id <= max_pages:
product_css = '[class="col-xs-2-4 shopee-search-item-result__item"]'
try:
print(f"\n\n\nCrawling page {page_id} ...")
# Get the review details
WebDriverWait(driver, timeout=random.randint(6,9)).until(
method=expected_conditions.visibility_of_all_elements_located(
locator=(By.CSS_SELECTOR, product_css)
)
)
except Exception:
print("Can't find any item!")
break
# Get product info
products_raw = driver.find_elements_by_css_selector(product_css)
for product_raw in products_raw:
try:
product_url = product_raw.find_element_by_css_selector('[data-sqe="link"]').get_attribute('href').split('?', 1)[0]
product_title = product_raw.find_element_by_css_selector('[data-sqe="name"]').find_element_by_tag_name('div').text
if not (product_title != '' or product_title.strip()):
continue
product_info = [product_title, product_url, category_id]
insert_new_product(product_info)
all_products.append(product_info)
except Exception:
print("Cannot crawl product")
continue
# open new tab
current_tab = driver.current_window_handle
driver.execute_script("window.open('');")
driver.switch_to.window(driver.window_handles[-1])
# crawl products' reviews per category
product_title = product_info[0].replace('"', "'")
query = f'SELECT id FROM products WHERE title = "{product_title}" AND category_id = "{category_id}"'
execute_sql(query)
product_id = db_cursor.fetchone()[0]
try:
crawl_single_product(driver, product_info[1], product_id)
except Exception as e:
# print("Error while crawl\n\t", product_info[1])
# print(e)
pass
# close tab
driver.close()
driver.switch_to.window(current_tab)
# Go to next page
driver.get(category_url.format(page_id))
simulate_scroll(driver, 11, 0, 0.69, 1.96)
page_id += 1
def crawl_single_product(driver, product_url: str, product_id: int):
print(f"\n\n\nLoading\n\t{product_url}")
driver.get(product_url)
# Scroll down to load all page
simulate_scroll(driver)
page_id, max_pages = 1, 49
while page_id <= max_pages:
simulate_scroll(driver, 5, 1, 0.69, 0.96)
review_css = '[class="shopee-product-rating"]'
try:
print(f"\n\t\tCrawling page {page_id} ...")
# Get the review details
WebDriverWait(driver, timeout=random.randint(6,9)).until(
method=expected_conditions.visibility_of_all_elements_located(
locator=(By.CSS_SELECTOR, review_css)
)
)
except Exception:
print("Can't find any review!")
break
# Get product reviews
all_reviews = driver.find_elements_by_css_selector(review_css)
for raw_review in all_reviews:
try:
crawl_single_review(raw_review, product_id)
except Exception as e:
print("Error while crawling comment\n\t")
try:
page_buttons_css = '[class="shopee-button-no-outline"]'
page_buttons = driver.find_elements_by_css_selector(page_buttons_css)
if len(page_buttons) < 1:
print("\n\t\tOnly 1 page")
break
for page_button in page_buttons:
page_button_id = page_button.get_attribute("innerHTML")
if page_button_id == '':
continue
if int(page_button_id) > page_id:
page_button.click()
random_sleep()
page_id += 1
break
except Exception as e:
# print("\n\t\tOut-of-page Error: ", e)
break
def crawl_single_review(raw_review, product_id):
content = raw_review.find_element_by_css_selector("[class='shopee-product-rating__main']")
# Read review content
review = content.find_element_by_css_selector("[class='shopee-product-rating__content']").text
# Filter-out non-text reviews
if not (review != '' or review.strip()):
return 'Review is empty'
review = review.replace('\n', ' . ').replace('\t', ' . ')
# Read number of likes for this review
try:
n_likes = content.find_element_by_css_selector("[class='shopee-product-rating__like-count']")\
.get_attribute("innerHTML")
n_likes = re.sub('[^0-9]', '', n_likes)
if n_likes == '':
n_likes = 0
else:
n_likes = int(n_likes)
except Exception:
n_likes = -1
# Read rating
try:
rating = 5
stars = content.find_element_by_css_selector('div.shopee-product-rating__rating')\
.find_elements_by_tag_name("svg")
for star in stars:
star_color = star.find_element_by_tag_name('polygon')
try:
star_empty = star_color.get_attribute('fill')
if star_empty == 'none':
rating -= 1
except Exception:
pass
except Exception:
rating = -1
# Read verification
is_verified = 'đã xác thực' if n_likes > 0 else 'chưa xác thực'
insert_new_review([review, is_verified, n_likes, rating, product_id])
# print('\t\t\t', review, is_verified, n_likes, rating)
def main(driver, first_time: bool):
# Step 1: Get all categories in main page
all_categories = crawl_all_categories(driver, first_time)
db_cursor.execute("SELECT category_id FROM products;")
crawled_category_ids = list(set(
np.array(db_cursor.fetchall()).flatten().tolist()
))
print(f"Categories crawled: {crawled_category_ids}")
random_sleep()
# Step 2: Get products per categories page-by-page, then crawl their info & reviews
main_page = driver.current_window_handle
random.shuffle(all_categories)
for category_info in all_categories:
# open new tab
driver.execute_script("window.open('');")
driver.switch_to.window(driver.window_handles[-1])
random_sleep()
# crawl products' reviews per category
query = f'SELECT id FROM categories WHERE url = "{category_info[1]}" AND source = "{data_source}"'
execute_sql(query)
category_id = db_cursor.fetchone()[0]
if category_id not in crawled_category_ids:
crawl_single_category(driver, category_info[1], category_id)
random_sleep()
print(f'Finish crawling {category_info[1]} at {data_source}')
# close current tab
driver.close()
driver.switch_to.window(main_page)
if __name__ == "__main__":
initialize_db()
first_time = True
while True:
# Step 0: Initialize
browser = random.choice(['chrome', 'firefox', 'edge'])
driver = initialize_driver(browser)
try:
main(driver, first_time)
except Exception as e:
print("\n\n\nCrash ... Please wait a few seconds!!!")
for t in print_progress(range(69)):
time.sleep(1)
first_time = False
driver.quit()
db_connector.close()
| 34.826389
| 131
| 0.598006
| 1,205
| 10,030
| 4.720332
| 0.217427
| 0.012307
| 0.031997
| 0.019691
| 0.357771
| 0.29448
| 0.236111
| 0.213432
| 0.201477
| 0.132911
| 0
| 0.01064
| 0.30658
| 10,030
| 287
| 132
| 34.947735
| 0.807189
| 0.074576
| 0
| 0.263158
| 0
| 0
| 0.133326
| 0.045967
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023923
| false
| 0.009569
| 0.133971
| 0
| 0.167464
| 0.066986
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e9de159f87129c1ecdbf87d9939fc63a59cd88b
| 1,360
|
py
|
Python
|
Chapter 08/8.11_chaos_game.py
|
ACsBlack/Tkinter-GUI-Application-Development-Blueprints-Second-Edition
|
c6a045fbf5ba3ece5e8a02bbe33ac13bb57b2b8e
|
[
"MIT"
] | 120
|
2018-03-04T07:17:00.000Z
|
2022-01-25T08:09:57.000Z
|
Chapter 08/8.11_chaos_game.py
|
ACsBlack/Tkinter-GUI-Application-Development-Blueprints-Second-Edition
|
c6a045fbf5ba3ece5e8a02bbe33ac13bb57b2b8e
|
[
"MIT"
] | 3
|
2019-03-24T09:32:43.000Z
|
2020-07-28T07:35:49.000Z
|
Chapter 08/8.11_chaos_game.py
|
ACsBlack/Tkinter-GUI-Application-Development-Blueprints-Second-Edition
|
c6a045fbf5ba3ece5e8a02bbe33ac13bb57b2b8e
|
[
"MIT"
] | 81
|
2018-04-18T06:51:46.000Z
|
2022-03-30T01:31:35.000Z
|
"""
Code illustration: 8.11
Chaos Game
Tkinter GUI Application Development Blueprints
"""
import random
from tkinter import Tk, Canvas
import math
WIDTH = 800
HEIGHT = 500
v1 = (float(WIDTH/2), 0.0)
v2 = (0.00, float(HEIGHT))
v3 = (float(WIDTH), float(HEIGHT))
last_point = None
root = Tk()
canvas = Canvas(root, background="#660099", width = WIDTH, height = HEIGHT)
canvas.pack()
def midway_point(p1, p2):
x = p1[0] + (p2[0] - p1[0]) //2
y = p1[1] + (p2[1] - p1[1]) //2
return (x,y)
def random_point_inside_triangle(v1, v2, v3):
a = random.random()
b = random.random()
if a + b > 1:
a = 1-a
b = 1-b
c = 1 - a -b
x = (a*v1[0])+(b*v2[0])+(c*v3[0]);
y = (a*v1[1])+(b*v2[1])+(c*v3[1]);
return (x,y)
last_point = random_point_inside_triangle(v1, v2, v3)
def get_next_point():
global last_point
roll = random.choice(range(6))+1
mid_point = None
if roll == 1 or roll == 2:
mid_point = midway_point(last_point, v1)
elif roll == 3 or roll == 4:
mid_point = midway_point(last_point, v2)
elif roll == 5 or roll == 6:
mid_point = midway_point(last_point, v3)
last_point = mid_point
return mid_point
def update():
x,y = get_next_point()
canvas.create_rectangle(x, y, x, y, outline="#FFFF33")
root.after(1, update)
update()
root.mainloop()
| 21.25
| 75
| 0.605147
| 225
| 1,360
| 3.533333
| 0.306667
| 0.079245
| 0.05283
| 0.071698
| 0.183648
| 0.183648
| 0.077987
| 0
| 0
| 0
| 0
| 0.072797
| 0.232353
| 1,360
| 63
| 76
| 21.587302
| 0.688697
| 0.0625
| 0
| 0.044444
| 0
| 0
| 0.01105
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088889
| false
| 0
| 0.066667
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e9e105d1fe756d9759cec38597f51f376c2fdd7
| 1,129
|
py
|
Python
|
home/migrations/0010_auto_20150916_1146.py
|
taedori81/gentlecoffee
|
62de8ff17c934afdfde188ecc6b9dbfb400d0682
|
[
"BSD-3-Clause"
] | null | null | null |
home/migrations/0010_auto_20150916_1146.py
|
taedori81/gentlecoffee
|
62de8ff17c934afdfde188ecc6b9dbfb400d0682
|
[
"BSD-3-Clause"
] | null | null | null |
home/migrations/0010_auto_20150916_1146.py
|
taedori81/gentlecoffee
|
62de8ff17c934afdfde188ecc6b9dbfb400d0682
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import wagtail.wagtailcore.fields
import wagtail.wagtailcore.blocks
import datetime
import wagtail.wagtailimages.blocks
class Migration(migrations.Migration):
dependencies = [
('home', '0009_subscribepage'),
]
operations = [
migrations.AddField(
model_name='blogpage',
name='author',
field=models.CharField(max_length=255, default='Gentle Coffee'),
),
migrations.AddField(
model_name='blogpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField((('heading', wagtail.wagtailcore.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.wagtailcore.blocks.RichTextBlock()), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock())), blank=True),
),
migrations.AddField(
model_name='blogpage',
name='date',
field=models.DateField(verbose_name='Post Date', default=datetime.datetime(2015, 9, 16, 11, 46, 26, 479699)),
),
]
| 33.205882
| 266
| 0.648361
| 109
| 1,129
| 6.614679
| 0.568807
| 0.124827
| 0.099861
| 0.112344
| 0.162275
| 0.162275
| 0
| 0
| 0
| 0
| 0
| 0.030752
| 0.222321
| 1,129
| 33
| 267
| 34.212121
| 0.790433
| 0.018601
| 0
| 0.333333
| 0
| 0
| 0.10217
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e9f73be09a575abe5d0c2f48b7beb177bd85283
| 793
|
py
|
Python
|
setup.py
|
yoarch/replace
|
5255810c019141f7de03b96c26a9b732d2218597
|
[
"MIT"
] | null | null | null |
setup.py
|
yoarch/replace
|
5255810c019141f7de03b96c26a9b732d2218597
|
[
"MIT"
] | null | null | null |
setup.py
|
yoarch/replace
|
5255810c019141f7de03b96c26a9b732d2218597
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="replacefs",
version="1.2.0",
python_requires='>=3',
author="yoarch",
author_email="yo.managements@gmail.com",
description="Search and replace CLI tool for strings on the all system",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/yoarch/replace",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={
"console_scripts": [
"replacefs = replacefs.__main__:main",
"rp = replacefs.__main__:main"
]
})
| 28.321429
| 76
| 0.659521
| 90
| 793
| 5.6
| 0.722222
| 0.119048
| 0.075397
| 0.119048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007886
| 0.200504
| 793
| 27
| 77
| 29.37037
| 0.787066
| 0
| 0
| 0
| 0
| 0
| 0.435057
| 0.088272
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.04
| 0
| 0.04
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ea0a1cb7ee908d792303d9bcee0623c9c0029ae
| 774
|
py
|
Python
|
ZD2.py
|
Novomlinov/Lab5
|
bd86f277be60173472202329a86790ca08549c26
|
[
"MIT"
] | null | null | null |
ZD2.py
|
Novomlinov/Lab5
|
bd86f277be60173472202329a86790ca08549c26
|
[
"MIT"
] | null | null | null |
ZD2.py
|
Novomlinov/Lab5
|
bd86f277be60173472202329a86790ca08549c26
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
if __name__ == '__main__':
A = list(map(int, input().split()))
if len(A) != 10:
print("Неверный размер списка", file=sys.stderr)
exit(1)
s = 0
for item in A:
if not ((item % 2) == 0):
s += item
x0 = 0 # Позиция первого отрицательного элемента
x1 = 0 # Позиция последнего отрицательного элемента
for i, a in enumerate(A):
if a < 0:
x0 = i
break
for i, a in enumerate(A[::-1]):
if a < 0:
x1 = len(A) - 1 - i
break
print(s)
print(sum(A[x0 + 1:x1]))
snew=list(filter(lambda x: abs(x)>1, A))
for _ in range(len(A)-len(snew)): snew.append(0)
print(snew)
| 25.8
| 57
| 0.5
| 113
| 774
| 3.345133
| 0.469027
| 0.031746
| 0.026455
| 0.037037
| 0.089947
| 0.089947
| 0
| 0
| 0
| 0
| 0
| 0.045726
| 0.350129
| 774
| 30
| 58
| 25.8
| 0.705765
| 0.162791
| 0
| 0.16
| 0
| 0
| 0.048701
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.04
| 0
| 0.04
| 0.16
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ea72258a32209376e761138207efecbaefac54c
| 6,916
|
py
|
Python
|
tf_agents/policies/categorical_q_policy.py
|
gregorgebhardt/agents
|
b6aeae5e0ed68dd4e4ec2ca73ef971254d3208f3
|
[
"Apache-2.0"
] | null | null | null |
tf_agents/policies/categorical_q_policy.py
|
gregorgebhardt/agents
|
b6aeae5e0ed68dd4e4ec2ca73ef971254d3208f3
|
[
"Apache-2.0"
] | null | null | null |
tf_agents/policies/categorical_q_policy.py
|
gregorgebhardt/agents
|
b6aeae5e0ed68dd4e4ec2ca73ef971254d3208f3
|
[
"Apache-2.0"
] | 3
|
2019-09-08T22:05:56.000Z
|
2020-05-27T08:27:15.000Z
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple Categorical Q-Policy for Q-Learning with Categorical DQN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents.policies import tf_policy
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from tf_agents.trajectories import time_step as ts
from tf_agents.utils import common
from tf_agents.utils import nest_utils
@gin.configurable()
class CategoricalQPolicy(tf_policy.Base):
"""Class to build categorical Q-policies."""
def __init__(self,
min_q_value,
max_q_value,
q_network,
action_spec,
temperature=1.0):
"""Builds a categorical Q-policy given a categorical Q-network.
Args:
min_q_value: A float specifying the minimum Q-value, used for setting up
the support.
max_q_value: A float specifying the maximum Q-value, used for setting up
the support.
q_network: A network.Network to use for our policy.
action_spec: A `BoundedTensorSpec` representing the actions.
temperature: temperature for sampling, when close to 0.0 is arg_max.
Raises:
ValueError: if `q_network` does not have property `num_atoms`.
TypeError: if `action_spec` is not a `BoundedTensorSpec`.
"""
num_atoms = getattr(q_network, 'num_atoms', None)
if num_atoms is None:
raise ValueError('Expected q_network to have property `num_atoms`, but '
'it doesn\'t. Network is: %s' % q_network)
time_step_spec = ts.time_step_spec(q_network.input_tensor_spec)
super(CategoricalQPolicy, self).__init__(
time_step_spec, action_spec, q_network.state_spec)
if not isinstance(action_spec, tensor_spec.BoundedTensorSpec):
raise TypeError('action_spec must be a BoundedTensorSpec. Got: %s' % (
action_spec,))
self._temperature = tf.convert_to_tensor(temperature, dtype=tf.float32)
self._min_q_value = min_q_value
self._max_q_value = max_q_value
self._num_atoms = q_network.num_atoms
self._q_network = q_network
self._support = tf.linspace(min_q_value, max_q_value, self._num_atoms)
self._action_dtype = action_spec.dtype
def _variables(self):
return self._q_network.variables
@gin.configurable(module='CategoricalQPolicy')
def _action(self, time_step, policy_state, seed=None):
"""Generates next action given the time_step and optional policy_state.
Args:
time_step: A `TimeStep` tuple corresponding to `time_step_spec()`.
policy_state: A Tensor, or a nested dict, list or tuple of
Tensors representing the previous policy_state.
seed: Seed to use if action performs sampling (optional).
Returns:
An action Tensor, or a nested dict, list or tuple of Tensors,
matching the `action_spec()`.
A policy_state Tensor, or a nested dict, list or tuple of Tensors,
representing the new policy state.
"""
batched_time_step = nest_utils.batch_nested_tensors(time_step,
self.time_step_spec)
q_logits, policy_state = self._q_network(batched_time_step.observation,
batched_time_step.step_type,
policy_state)
q_logits.shape.assert_has_rank(3)
q_values = common.convert_q_logits_to_values(q_logits, self._support)
actions = tf.argmax(q_values, -1)
actions = tf.cast(actions, self._action_dtype, name='action')
actions = tf.nest.pack_sequence_as(self._action_spec, [actions])
return policy_step.PolicyStep(actions, policy_state)
@gin.configurable(module='CategoricalQPolicy')
def step(self, time_step, policy_state=(), num_samples=1):
"""Generates a random action given the time_step and policy_state.
Args:
time_step: A `TimeStep` tuple corresponding to `time_step_spec()`.
policy_state: A Tensor, or a nested dict, list or tuple of
Tensors representing the previous policy_state.
num_samples: Integer, number of samples per time_step.
Returns:
An action Tensor, or a nested dict, list or tuple of Tensors,
matching the `action_spec()`.
A policy_state Tensor, or a nested dict, list or tuple of Tensors,
representing the new policy state.
"""
batched_time_step = nest_utils.batch_nested_tensors(time_step,
self.time_step_spec)
q_logits, policy_state = self._q_network(batched_time_step.observation,
batched_time_step.step_type,
policy_state)
q_logits.shape.assert_has_rank(3)
q_values = common.convert_q_logits_to_values(q_logits, self._support)
logits = q_values / self._temperature
actions = tf.random.categorical(logits, num_samples)
if num_samples == 1:
actions = tf.squeeze(actions, [-1])
actions = tf.cast(actions, self._action_dtype, name='step')
actions = tf.nest.pack_sequence_as(self._action_spec, [actions])
return actions, policy_state
def _distribution(self, time_step, policy_state):
"""Generates the distribution over next actions given the time_step.
Args:
time_step: A `TimeStep` tuple corresponding to `time_step_spec()`.
policy_state: A Tensor, or a nested dict, list or tuple of
Tensors representing the previous policy_state.
Returns:
A tfp.distributions.Categorical capturing the distribution of next
actions.
A policy_state Tensor, or a nested dict, list or tuple of Tensors,
representing the new policy state.
"""
q_logits, policy_state = self._q_network(time_step.observation,
time_step.step_type,
policy_state)
q_logits.shape.assert_has_rank(3)
q_values = common.convert_q_logits_to_values(q_logits, self._support)
return policy_step.PolicyStep(
tfp.distributions.Categorical(logits=q_values,
dtype=self.action_spec.dtype),
policy_state)
| 41.915152
| 78
| 0.686669
| 933
| 6,916
| 4.831726
| 0.21865
| 0.051464
| 0.021295
| 0.026619
| 0.469831
| 0.412378
| 0.385537
| 0.378882
| 0.352706
| 0.33496
| 0
| 0.004192
| 0.24118
| 6,916
| 164
| 79
| 42.170732
| 0.854802
| 0.399942
| 0
| 0.282051
| 0
| 0
| 0.042028
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 1
| 0.064103
| false
| 0
| 0.153846
| 0.012821
| 0.282051
| 0.012821
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ead938794e4c6cd2d0971d9e594b96ceaee54b7
| 6,852
|
py
|
Python
|
interfaces/withdrawal_gui.py
|
firminoneto11/terceiro-projeto-curso-python
|
685a0e6fafdc07a28a4e7589ac40db0de61737c0
|
[
"MIT"
] | 1
|
2021-04-07T00:28:41.000Z
|
2021-04-07T00:28:41.000Z
|
interfaces/withdrawal_gui.py
|
firminoneto11/terceiro-projeto-curso-python
|
685a0e6fafdc07a28a4e7589ac40db0de61737c0
|
[
"MIT"
] | null | null | null |
interfaces/withdrawal_gui.py
|
firminoneto11/terceiro-projeto-curso-python
|
685a0e6fafdc07a28a4e7589ac40db0de61737c0
|
[
"MIT"
] | null | null | null |
from tkinter import *
from interfaces.functions import centralize
from tkinter import messagebox
from interfaces.functions import update_session_data_csv, update_clients_csv, get_current_balance
class WithdrawalGUI:
def __init__(self, frame, label):
"""
This __init__ method initializes the sub window for the withdrawal area.
:param frame: This is the self.buttons_frame from GUISession class. It's going to be used to update the new
balance after a successfully withdrawal.
:param label: This is the self.buttons_frame from GUISession class. It's going to be used to update the new
balance after a successfully withdrawal.
"""
# Saving frame and label in order to update them after the withdrawal
self.frame = frame
self.label = label
# Creating another window for the 'withdrawal' section
self.withdrawal_gui = Toplevel()
self.withdrawal_gui.configure(background='#393e46')
self.withdrawal_gui.iconbitmap(r'.\valware.ico')
self.withdrawal_gui.resizable(False, False)
self.withdrawal_gui.title("Saque")
centralize(width=900, height=500, element=self.withdrawal_gui)
# State of the system
self.state_label = Label(self.withdrawal_gui, text='Sacar', bg='#393e46', fg='#eeeeee',
font=('Helvetica', 24))
# Main frame
self.main_frame = LabelFrame(self.withdrawal_gui, text='Dados do saque', fg='#00adb5', bg='#393e46',
font=('Helvetica', 14))
# Data
self.withdrawal_amount_label = Label(self.main_frame, text='Insira o valor do saque - ',
font=('Helvetica', 14), bg='#393e46', fg='#eeeeee')
self.withdrawal_amount = Entry(self.main_frame, font=('Helvetica', 14), borderwidth=3)
# Buttons
self.withdrawal_button = Button(self.main_frame, text="Sacar", width=20, font=('Helvetica', 14),
bg='#00adb5', fg='#eeeeee', borderwidth=3, command=self.__withdrawing)
self.cancel_button = Button(self.main_frame, text="Cancelar", width=20, font=('Helvetica', 14), bg='#222831',
fg='#eeeeee', borderwidth=3, command=self.withdrawal_gui.destroy)
# Inserting the elements onto the screen
self.state_label.pack(pady=50)
self.main_frame.pack()
self.withdrawal_amount_label.grid(row=0, column=0, pady=10, sticky=E)
self.withdrawal_amount.grid(row=0, column=1, pady=10)
self.withdrawal_button.grid(row=1, column=0, padx=10, pady=50)
self.cancel_button.grid(row=1, column=1, padx=10, pady=50)
def __withdrawing(self):
"""
This method does the whole withdrawal logic into the current client's session.
:return: None
"""
# Storing the gathered data into a variable
withdrawal = self.withdrawal_amount.get()
# Collecting the current balance
current_balance = get_current_balance()
# Checking if the withdrawal amount is valid
if len(withdrawal) == 0:
self.withdrawal_amount.delete(0, END)
error = messagebox.showerror("Campo vazio", "O valor para o saque está vazio!")
if error == 'ok':
self.withdrawal_gui.destroy()
return None
elif ',' in withdrawal:
withdrawal = withdrawal.replace(',', '.')
# Checking inserted values
try:
withdrawal = float(withdrawal)
# Taking care of possible exceptions
except ValueError:
self.withdrawal_amount.delete(0, END)
error = messagebox.showerror("Valor inválido", "O valor informado para saque é inválido. Insira apenas "
"os dígitos/números para o saque.")
if error == 'ok':
self.withdrawal_gui.destroy()
return None
# Validating the withdrawal amount that is going to be subtracted from the current balance.
new_balance_amount = round((current_balance - withdrawal), 2)
if new_balance_amount < 0:
self.withdrawal_amount.delete(0, END)
error = messagebox.showerror("Saldo insuficiente", "O valor informado para o saque é insuficiente, pois o "
"seu saldo atual é menor do que a quantia solicitada.")
if error == 'ok':
self.withdrawal_gui.destroy()
return None
# This part will only be executed if it passes all the previous verifications
else:
# Cleaning the typed data from the input entry
self.withdrawal_amount.delete(0, END)
# Before updating stuff, i need to get a confirmation from the user
response = messagebox.askyesno("Confirmar saque", f"Deseja efetuar o saque no valor de R${withdrawal} "
f"da sua conta?")
# If response is 'Yes' or True
if response:
# Updating the current balance in the session_gui class
self.__update_balance_after_withdrawal(balance=new_balance_amount)
# Updating session_data.csv and clients.csv
updated_data = update_session_data_csv(new_balance=new_balance_amount)
update_clients_csv(updated_data=updated_data)
# Informing to the user that its deposit has been successfully made
success = messagebox.showinfo("Saque feito com sucesso", f"Parabéns! Seu saque foi efetuado com "
f"sucesso no valor de R${withdrawal}. "
f"Seu novo saldo é de R${new_balance_amount}.")
if success == 'ok':
self.withdrawal_gui.destroy()
return None
# If response is 'No' or False
else:
self.withdrawal_gui.destroy()
return None
def __update_balance_after_withdrawal(self, balance):
"""
This method updates the balance label from the GUISession class that was previously passed to the __init__
method. It will only be called after a successfully withdrawal.
:param balance: The new overall balance from the user. Previous balance minus the withdrawal.
:return: None
"""
self.label.destroy()
self.label = Label(self.frame, text=f"Saldo - R${balance}", font=('Helvetica', 14), bg='#393e46', fg='#eeeeee')
self.label.grid(row=1, column=0, pady=10)
| 48.253521
| 120
| 0.597052
| 796
| 6,852
| 5.017588
| 0.291457
| 0.087631
| 0.059589
| 0.036054
| 0.24662
| 0.223085
| 0.154482
| 0.145468
| 0.127942
| 0.082123
| 0
| 0.020766
| 0.318301
| 6,852
| 141
| 121
| 48.595745
| 0.834297
| 0.236427
| 0
| 0.2375
| 0
| 0
| 0.146711
| 0.004529
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0375
| false
| 0
| 0.05
| 0
| 0.1625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3eafedfb7f7bd2c80bdda6855f91326da49ebb9e
| 3,068
|
py
|
Python
|
backend/test/notification_tests/notification_rest_tests.py
|
raphaelrpl/portal
|
9e84e52a73500390187d3fc7c4871cf8a3620231
|
[
"MIT"
] | null | null | null |
backend/test/notification_tests/notification_rest_tests.py
|
raphaelrpl/portal
|
9e84e52a73500390187d3fc7c4871cf8a3620231
|
[
"MIT"
] | null | null | null |
backend/test/notification_tests/notification_rest_tests.py
|
raphaelrpl/portal
|
9e84e52a73500390187d3fc7c4871cf8a3620231
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from datetime import datetime, date
from decimal import Decimal
from base import GAETestCase
from notification_app.notification_model import Notification
from routes.notifications import rest
from gaegraph.model import Node
from mock import Mock
from mommygae import mommy
class IndexTests(GAETestCase):
def test_success(self):
mommy.save_one(Notification)
mommy.save_one(Notification)
json_response = rest.index()
context = json_response.context
self.assertEqual(2, len(context))
notification_dct = context[0]
self.assertSetEqual(set(['id', 'creation', 'message']), set(notification_dct.iterkeys()))
self.assert_can_serialize_as_json(json_response)
class NewTests(GAETestCase):
def test_success(self):
self.assertIsNone(Notification.query().get())
json_response = rest.new(None, message='message_string')
db_notification = Notification.query().get()
self.assertIsNotNone(db_notification)
self.assertEquals('message_string', db_notification.message)
self.assert_can_serialize_as_json(json_response)
def test_error(self):
resp = Mock()
json_response = rest.new(resp)
errors = json_response.context
self.assertEqual(500, resp.status_code)
self.assertSetEqual(set(['message']), set(errors.keys()))
self.assert_can_serialize_as_json(json_response)
class EditTests(GAETestCase):
def test_success(self):
notification = mommy.save_one(Notification)
old_properties = notification.to_dict()
json_response = rest.edit(None, notification.key.id(), message='message_string')
db_notification = notification.key.get()
self.assertEquals('message_string', db_notification.message)
self.assertNotEqual(old_properties, db_notification.to_dict())
self.assert_can_serialize_as_json(json_response)
def test_error(self):
notification = mommy.save_one(Notification)
old_properties = notification.to_dict()
resp = Mock()
json_response = rest.edit(resp, notification.key.id())
errors = json_response.context
self.assertEqual(500, resp.status_code)
self.assertSetEqual(set(['message']), set(errors.keys()))
self.assertEqual(old_properties, notification.key.get().to_dict())
self.assert_can_serialize_as_json(json_response)
class DeleteTests(GAETestCase):
def test_success(self):
notification = mommy.save_one(Notification)
rest.delete(None, notification.key.id())
self.assertIsNone(notification.key.get())
def test_non_notification_deletion(self):
non_notification = mommy.save_one(Node)
response = Mock()
json_response = rest.delete(response, non_notification.key.id())
self.assertIsNotNone(non_notification.key.get())
self.assertEqual(500, response.status_code)
self.assert_can_serialize_as_json(json_response)
| 38.835443
| 97
| 0.712842
| 358
| 3,068
| 5.863128
| 0.226257
| 0.085755
| 0.034302
| 0.062887
| 0.523583
| 0.439733
| 0.395903
| 0.395903
| 0.327299
| 0.283945
| 0
| 0.004806
| 0.186115
| 3,068
| 78
| 98
| 39.333333
| 0.835803
| 0.006845
| 0
| 0.446154
| 0
| 0
| 0.028581
| 0
| 0
| 0
| 0
| 0
| 0.323077
| 1
| 0.107692
| false
| 0
| 0.138462
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ebb613990f10c469e7937a90350ba7e43ef9d8e
| 10,709
|
py
|
Python
|
src/sparnn/layers/basic/conv_lstm_layer.py
|
JoeinChina/DeepWeather
|
2677edc16d9865ec98401aaf121aaabd24974aaf
|
[
"MIT"
] | 1
|
2020-07-23T04:13:02.000Z
|
2020-07-23T04:13:02.000Z
|
src/sparnn/layers/basic/conv_lstm_layer.py
|
JoeBuzh/DeepWeather
|
2677edc16d9865ec98401aaf121aaabd24974aaf
|
[
"MIT"
] | null | null | null |
src/sparnn/layers/basic/conv_lstm_layer.py
|
JoeBuzh/DeepWeather
|
2677edc16d9865ec98401aaf121aaabd24974aaf
|
[
"MIT"
] | null | null | null |
import numpy
import logging
import theano
import theano.tensor as TT
from theano.gradient import grad_clip
from sparnn.utils import *
from sparnn.layers import Layer
logger = logging.getLogger(__name__)
class ConvLSTMLayer(Layer):
def __init__(self, layer_param):
super(ConvLSTMLayer, self).__init__(layer_param)
if self.input is not None:
assert 5 == self.input.ndim
else:
assert ("init_hidden_state" in layer_param or "init_cell_state" in layer_param)
self.input_receptive_field = layer_param['input_receptive_field']
self.transition_receptive_field = layer_param['transition_receptive_field']
self.gate_activation = layer_param.get('gate_activation', 'sigmoid')
self.modular_activation = layer_param.get('modular_activation', 'tanh')
self.hidden_activation = layer_param.get('hidden_activation', 'tanh')
self.init_hidden_state = layer_param.get("init_hidden_state", quick_theano_zero((self.minibatch_size,) + self.dim_out))
self.init_cell_state = layer_param.get("init_cell_state", quick_theano_zero((self.minibatch_size,) + self.dim_out))
self.init_hidden_state = TT.unbroadcast(self.init_hidden_state, *range(self.init_hidden_state.ndim))
self.init_cell_state = TT.unbroadcast(self.init_cell_state, *range(self.init_cell_state.ndim))
self.learn_padding = layer_param.get('learn_padding', False)
self.input_padding = layer_param.get('input_padding', None)
if self.input is None:
assert 'n_steps' in layer_param
self.n_steps = layer_param['n_steps']
else:
self.n_steps = layer_param.get('n_steps', self.input.shape[0])
self.kernel_size = (self.feature_out, self.feature_in,
self.input_receptive_field[0], self.input_receptive_field[1])
self.transition_mat_size = (self.feature_out, self.feature_out,
self.transition_receptive_field[0], self.transition_receptive_field[1])
#print('ConvLSTMLayer', self.kernel_size, self.transition_mat_size)
self.W_hi = quick_init_xavier(self.rng, self.transition_mat_size, self._s("W_hi"))
self.W_hf = quick_init_xavier(self.rng, self.transition_mat_size, self._s("W_hf"))
self.W_ho = quick_init_xavier(self.rng, self.transition_mat_size, self._s("W_ho"))
self.W_hc = quick_init_xavier(self.rng, self.transition_mat_size, self._s("W_hc"))
if self.input is not None:
self.W_xi = quick_init_xavier(self.rng, self.kernel_size, self._s("W_xi"))
self.W_xf = quick_init_xavier(self.rng, self.kernel_size, self._s("W_xf"))
self.W_xo = quick_init_xavier(self.rng, self.kernel_size, self._s("W_xo"))
self.W_xc = quick_init_xavier(self.rng, self.kernel_size, self._s("W_xc"))
if self.learn_padding:
self.hidden_padding = quick_zero((self.feature_out, ), self._s("hidden_padding"))
else:
self.hidden_padding = None
self.b_i = quick_zero((self.feature_out, ), self._s("b_i"))
self.b_f = quick_zero((self.feature_out, ), self._s("b_f"))
self.b_o = quick_zero((self.feature_out, ), self._s("b_o"))
self.b_c = quick_zero((self.feature_out, ), self._s("b_c"))
self.W_ci = quick_zero((self.feature_out, ), self._s("W_ci"))
self.W_cf = quick_zero((self.feature_out, ), self._s("W_cf"))
self.W_co = quick_zero((self.feature_out, ), self._s("W_co"))
if self.input is not None:
self.param = [self.W_xi, self.W_hi, self.W_ci, self.b_i,
self.W_xf, self.W_hf, self.W_cf, self.b_f,
self.W_xo, self.W_ho, self.W_co, self.b_o,
self.W_xc, self.W_hc, self.b_c]
if self.learn_padding:
self.param.append(self.hidden_padding)
else:
self.param = [self.W_hi, self.W_ci, self.b_i,
self.W_hf, self.W_cf, self.b_f,
self.W_ho, self.W_co, self.b_o,
self.W_hc, self.b_c]
if self.learn_padding:
self.param.append(self.hidden_padding)
self.is_recurrent = True
self.fprop()
def set_name(self):
self.name = "ConvLSTMLayer-" + str(self.id)
def step_fprop(self, x_t, mask_t, h_tm1, c_tm1):
#print('step fprop in conv lstm layer:', self.dim_in, self.kernel_size)
if x_t is not None:
# input_gate = x_t*W + h_t*W + c_t W
input_gate = quick_activation(conv2d_same(x_t, self.W_xi, (None, ) + self.dim_in,
self.kernel_size, self.input_padding)
+ conv2d_same(h_tm1, self.W_hi, (None, ) + self.dim_out,
self.transition_mat_size, self.hidden_padding)
+ c_tm1 * self.W_ci.dimshuffle('x', 0, 'x', 'x')
+ self.b_i.dimshuffle('x', 0, 'x', 'x'), "sigmoid")
forget_gate = quick_activation(conv2d_same(x_t, self.W_xf, (None, ) + self.dim_in,
self.kernel_size, self.input_padding)
+ conv2d_same(h_tm1, self.W_hf, (None, ) + self.dim_out,
self.transition_mat_size, self.hidden_padding)
+ c_tm1 * self.W_cf.dimshuffle('x', 0, 'x', 'x')
+ self.b_f.dimshuffle('x', 0, 'x', 'x'), "sigmoid")
c_t = forget_gate * c_tm1 \
+ input_gate * quick_activation(conv2d_same(x_t, self.W_xc, (None, ) + self.dim_in,
self.kernel_size, self.input_padding)
+ conv2d_same(h_tm1, self.W_hc, (None, ) + self.dim_out,
self.transition_mat_size, self.hidden_padding)
+ self.b_c.dimshuffle('x', 0, 'x', 'x'), "tanh")
output_gate = quick_activation(conv2d_same(x_t, self.W_xo, (None, ) + self.dim_in,
self.kernel_size, self.input_padding)
+ conv2d_same(h_tm1, self.W_ho, (None, ) + self.dim_out,
self.transition_mat_size, self.hidden_padding)
+ c_t * self.W_co.dimshuffle('x', 0, 'x', 'x')
+ self.b_o.dimshuffle('x', 0, 'x', 'x'), "sigmoid")
h_t = output_gate * quick_activation(c_t, "tanh")
else:
#input_gate = h_t * W
input_gate = quick_activation(
conv2d_same(h_tm1, self.W_hi, (None, ) + self.dim_out, self.transition_mat_size, self.hidden_padding)
+ c_tm1 * self.W_ci.dimshuffle('x', 0, 'x', 'x')
+ self.b_i.dimshuffle('x', 0, 'x', 'x'), "sigmoid")
forget_gate = quick_activation(conv2d_same(h_tm1, self.W_hf, (None, ) + self.dim_out,
self.transition_mat_size, self.hidden_padding)
+ c_tm1 * self.W_cf.dimshuffle('x', 0, 'x', 'x')
+ self.b_f.dimshuffle('x', 0, 'x', 'x'), "sigmoid")
c_t = forget_gate * c_tm1 \
+ input_gate * quick_activation(conv2d_same(h_tm1, self.W_hc, (None, ) + self.dim_out,
self.transition_mat_size, self.hidden_padding)
+ self.b_c.dimshuffle('x', 0, 'x', 'x'), "tanh")
output_gate = quick_activation(conv2d_same(h_tm1, self.W_ho, (None, ) + self.dim_out,
self.transition_mat_size, self.hidden_padding)
+ c_t * self.W_co.dimshuffle('x', 0, 'x', 'x')
+ self.b_o.dimshuffle('x', 0, 'x', 'x'), "sigmoid")
h_t = output_gate * quick_activation(c_t, "tanh")
if mask_t is not None:
h_t = mask_t * h_t + (1 - mask_t) * h_tm1
c_t = mask_t * c_t + (1 - mask_t) * c_tm1
#print h_t.ndim, c_t.ndim
#h_t = quick_aggregate_pooling(h_t, "max", mask=None)
#c_t = quick_aggregate_pooling(c_t, "max", mask=None)
return h_t, c_t
def init_states(self):
return self.init_hidden_state, self.init_cell_state
def fprop(self):
# The dimension of self.mask is (Timestep, Minibatch).
# We need to pad it to (Timestep, Minibatch, FeatureDim, Row, Col)
# and keep the last three added dimensions broadcastable. TT.shape_padright
# function is thus a good choice
if self.mask is None:
if self.input is not None:
scan_input = [self.input]
scan_fn = lambda x_t, h_tm1, c_tm1: self.step_fprop(x_t, None, h_tm1, c_tm1)
else:
scan_input = None
scan_fn = lambda h_tm1, c_tm1: self.step_fprop(None, None, h_tm1, c_tm1)
else:
if self.input is not None:
scan_input = [self.input, TT.shape_padright(self.mask, 3)]
scan_fn = lambda x_t, mask_t, h_tm1, c_tm1: self.step_fprop(x_t, mask_t, h_tm1, c_tm1)
else:
scan_input = [TT.shape_padright(self.mask, 3)]
scan_fn = lambda mask_t, h_tm1, c_tm1: self.step_fprop(None, mask_t, h_tm1, c_tm1)
#print('conv lstm output:', scan_fn, self.init_cell_state, scan_input, self.n_steps)
[self.output, self.cell_output], self.output_update = quick_scan(fn=scan_fn,
outputs_info=[self.init_hidden_state,
self.init_cell_state],
sequences=scan_input,
name=self._s("lstm_output_func"),
n_steps=self.n_steps
)
| 59.494444
| 127
| 0.530488
| 1,381
| 10,709
| 3.781318
| 0.106445
| 0.045002
| 0.045576
| 0.0563
| 0.626388
| 0.579663
| 0.556492
| 0.53198
| 0.467254
| 0.458828
| 0
| 0.010209
| 0.359697
| 10,709
| 179
| 128
| 59.826816
| 0.751349
| 0.058362
| 0
| 0.321918
| 0
| 0
| 0.042002
| 0.004667
| 0
| 0
| 0
| 0
| 0.020548
| 1
| 0.034247
| false
| 0
| 0.047945
| 0.006849
| 0.10274
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ebbdc540dc8fa6d2e3f28778c968adced8e307f
| 779
|
py
|
Python
|
build_script.py
|
lammda/mercari-solution
|
e6e216d33d19b62fdd4fb2a906bd904ede9c5aaa
|
[
"MIT"
] | 249
|
2018-03-31T13:08:55.000Z
|
2022-02-23T16:13:16.000Z
|
build_script.py
|
arita37/mercari-solution
|
374301ad1c32cbc93dcc40313d5d7bb9c5503746
|
[
"MIT"
] | 1
|
2018-10-24T00:49:12.000Z
|
2019-08-28T17:37:00.000Z
|
build_script.py
|
arita37/mercari-solution
|
374301ad1c32cbc93dcc40313d5d7bb9c5503746
|
[
"MIT"
] | 84
|
2018-03-31T20:32:10.000Z
|
2022-03-06T10:56:58.000Z
|
import base64
import glob
import gzip
def build_script(submission_name):
script_template = open('script_template.tmpl')
script = open('script/script_{name}.py'.format(name=submission_name), 'wt')
file_data = {}
for fn in glob.glob('mercari/*.py') + glob.glob('mercari/*.pyx'):
content = open(fn).read()
compressed = gzip.compress(content.encode('utf-8'), compresslevel=9)
encoded = base64.b64encode(compressed).decode('utf-8')
name = fn.split('/')[1]
file_data[name] = encoded
script.write(script_template.read().replace('{file_data}', str(file_data)).replace('{name}', submission_name))
script.close()
if __name__ == '__main__':
for submission_name in ['tf', 'mx']:
build_script(submission_name)
| 31.16
| 114
| 0.661104
| 99
| 779
| 4.969697
| 0.444444
| 0.142276
| 0.085366
| 0.101626
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015552
| 0.174583
| 779
| 24
| 115
| 32.458333
| 0.749611
| 0
| 0
| 0
| 0
| 0
| 0.141207
| 0.029525
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.166667
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ebd20504757b6e90b9dac2cead03c7e3e9835fc
| 2,188
|
py
|
Python
|
graphjson.py
|
suprfanz/flask-fb-neo4j-alchemy
|
8ee5692bdbddc94342b38144d299e9d1a1b0b68d
|
[
"MIT"
] | 2
|
2018-03-09T03:10:49.000Z
|
2020-10-22T10:28:03.000Z
|
graphjson.py
|
suprfanz/flask-fb-neo4j-alchemy
|
8ee5692bdbddc94342b38144d299e9d1a1b0b68d
|
[
"MIT"
] | null | null | null |
graphjson.py
|
suprfanz/flask-fb-neo4j-alchemy
|
8ee5692bdbddc94342b38144d299e9d1a1b0b68d
|
[
"MIT"
] | null | null | null |
"""
graphjson module pull an event from neo4j and creates
graphjson formated file to be used with AlchemyJS
Written by Ray Bernard ray@suprfanz.com
"""
import json
from neo4j.v1 import GraphDatabase, basic_auth
from config import neo4j_dbip, neo4j_admin, neo4j_password
session = GraphDatabase.driver("bolt://{}:7687".format(neo4j_dbip),
auth=basic_auth("{}".format(neo4j_admin), "{}".format(neo4j_password))).session()
def create_guest_node():
# fetches the guest nodes from neo4j
insert_query_guest = '''
MATCH (a:fb_guest)
WITH collect({name: a.fb_guest_name, nodeType:'guest', id:a.fb_usr_id}) AS nodes RETURN nodes
'''
result = session.run(insert_query_guest)
for record in result:
guest_node = json.dumps(dict(record))
return guest_node
def create_guest_edge():
# fetches the guest-event edges from neo4j
insert_query_guest = '''
MATCH (a:fb_guest)-[r:RSVP]->(b:fb_event)
WITH collect({source: a.fb_usr_id,target: b.fb_event_id, rsvp:r.rsvp_status}) AS edges RETURN edges
'''
result = session.run(insert_query_guest)
for record in result:
return json.dumps(dict(record))
def create_event_node():
# fetches the event nodes from neo4j
insert_query_guest = '''
MATCH (b:fb_event)
WITH collect ({name: b.event_name, nodeType:'event', id:b.fb_event_id}) AS nodes RETURN nodes
'''
result = session.run(insert_query_guest)
for record in result:
return json.dumps(record['nodes'])
def main():
# puts the data together in graphjson format
comment = '{"comment":" This is a test",'
guest_nodes = str(create_guest_node())[1:][:-2]
guest_edges = str(create_guest_edge())[1:]
event_node = str((create_event_node())) + ']'
graphjson = str(comment) + str(guest_nodes) + ', ' + str(event_node) + ',' + str(guest_edges)
print(graphjson)
# put your file path to json data here
with open(
"C:\\Users\\yourname\\Documents\\path\\to\\alchemy\\app\\static\\data\\fb_events.json",
"w") as f:
f.write(graphjson)
return graphjson
if __name__ == '__main__':
main()
| 29.173333
| 112
| 0.664534
| 306
| 2,188
| 4.539216
| 0.326797
| 0.047516
| 0.069114
| 0.043197
| 0.267099
| 0.239741
| 0.239741
| 0.210943
| 0.210943
| 0.156228
| 0
| 0.010957
| 0.207495
| 2,188
| 74
| 113
| 29.567568
| 0.790081
| 0.153108
| 0
| 0.272727
| 0
| 0.090909
| 0.302174
| 0.064674
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0.045455
| 0.068182
| 0
| 0.25
| 0.022727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ebd492c672da94f9dbeb381a25939527becda92
| 2,507
|
py
|
Python
|
dataset.py
|
kisom/aipnd-classifier
|
a361fc5f25402bbdfb23ddc08ad1b071fff50210
|
[
"MIT"
] | null | null | null |
dataset.py
|
kisom/aipnd-classifier
|
a361fc5f25402bbdfb23ddc08ad1b071fff50210
|
[
"MIT"
] | null | null | null |
dataset.py
|
kisom/aipnd-classifier
|
a361fc5f25402bbdfb23ddc08ad1b071fff50210
|
[
"MIT"
] | null | null | null |
"""
dataset.py defines a container for a training dataset.
"""
import os
import torch
from torchvision import datasets, transforms
class Dataset:
"""
Dataset encapsulations training, validation, and testing datasets
from a single top-level directory.
"""
def __init__(self, data_dir, batchsize):
test_transforms = [
transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
train_transforms = [
transforms.RandomRotation(45),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
validate_transforms = [
transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
self.batchsize = batchsize
self.datadir = data_dir
train_dir = os.path.join(data_dir, "train")
valid_dir = os.path.join(data_dir, "valid")
test_dir = os.path.join(data_dir, "test")
dataset_training = datasets.ImageFolder(
train_dir, transforms.Compose(train_transforms)
)
dataset_validation = datasets.ImageFolder(
valid_dir, transforms.Compose(validate_transforms)
)
dataset_testing = datasets.ImageFolder(
test_dir, transforms.Compose(test_transforms)
)
self.class_to_idx = dataset_training.class_to_idx
self.training = torch.utils.data.DataLoader(
dataset_training, batchsize * 2, shuffle=True
)
self.validation = torch.utils.data.DataLoader(
dataset_validation, batchsize, shuffle=True
)
self.testing = torch.utils.data.DataLoader(
dataset_testing, batchsize, shuffle=True
)
def __repr__(self):
return "dataset(data_dir={}, batchsize={})".format(self.datadir, self.batchsize)
def training_set(self):
"Returns the training dataset."
return self.training
def validation_set(self):
"Returns the validation dataset."
return self.validation
def testing_set(self):
"Returns the testing dataset."
return self.testing
| 32.986842
| 88
| 0.618668
| 269
| 2,507
| 5.6171
| 0.256506
| 0.027796
| 0.055592
| 0.073461
| 0.305096
| 0.243547
| 0.203839
| 0.203839
| 0.203839
| 0.203839
| 0
| 0.049587
| 0.276027
| 2,507
| 75
| 89
| 33.426667
| 0.78292
| 0.098125
| 0
| 0.169492
| 0
| 0
| 0.058495
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084746
| false
| 0
| 0.050847
| 0.016949
| 0.220339
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ebf6af93d750f46cb0ac8a35e125abd85daa7b2
| 710
|
py
|
Python
|
mlps/core/data/cnvrtr/functions/IPTransferDivide.py
|
seculayer/automl-mlps
|
80569909ec1c25db1ceafbb85b27d069d1a66aa3
|
[
"Apache-2.0"
] | null | null | null |
mlps/core/data/cnvrtr/functions/IPTransferDivide.py
|
seculayer/automl-mlps
|
80569909ec1c25db1ceafbb85b27d069d1a66aa3
|
[
"Apache-2.0"
] | 2
|
2022-03-31T07:39:59.000Z
|
2022-03-31T07:40:18.000Z
|
mlps/core/data/cnvrtr/functions/IPTransferDivide.py
|
seculayer/AutoAPE-mlps
|
80569909ec1c25db1ceafbb85b27d069d1a66aa3
|
[
"Apache-2.0"
] | 1
|
2021-11-03T09:09:07.000Z
|
2021-11-03T09:09:07.000Z
|
# -*- coding: utf-8 -*-
# Author : Manki Baek
# e-mail : bmg8551@seculayer.co.kr
# Powered by Seculayer © 2021 Service Model Team
from mlps.core.data.cnvrtr.ConvertAbstract import ConvertAbstract
class IPTransferDivide(ConvertAbstract):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.num_feat = 4
# 토크나이징 하는곳
def apply(self, data):
try:
row = data.split(".")
except Exception as e:
# self.LOGGER.error(e)
row = ["0", "0", "0", "0"]
return row
if __name__ == "__main__":
payload = "192.168.1.110"
tokenizer = IPTransferDivide(stat_dict=None, arg_list=None)
print(tokenizer.apply(payload))
| 24.482759
| 65
| 0.612676
| 87
| 710
| 4.793103
| 0.735632
| 0.014388
| 0.014388
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045198
| 0.252113
| 710
| 28
| 66
| 25.357143
| 0.73823
| 0.214085
| 0
| 0
| 0
| 0
| 0.047187
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.066667
| 0
| 0.333333
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ec715aa850089a5f7b7b582922c73d2960606c8
| 778
|
py
|
Python
|
tests/test_kubernetes_master.py
|
damoxc/charm-kubernetes-master
|
624095b278e9f235a03d061132e9fdf029d45b71
|
[
"Apache-2.0"
] | null | null | null |
tests/test_kubernetes_master.py
|
damoxc/charm-kubernetes-master
|
624095b278e9f235a03d061132e9fdf029d45b71
|
[
"Apache-2.0"
] | null | null | null |
tests/test_kubernetes_master.py
|
damoxc/charm-kubernetes-master
|
624095b278e9f235a03d061132e9fdf029d45b71
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from unittest import mock
from reactive import kubernetes_master
from charms.reactive import endpoint_from_flag, remove_state
from charmhelpers.core import hookenv
def patch_fixture(patch_target):
@pytest.fixture()
def _fixture():
with mock.patch(patch_target) as m:
yield m
return _fixture
def test_send_default_cni():
hookenv.config.return_value = 'test-default-cni'
kubernetes_master.send_default_cni()
kube_control = endpoint_from_flag('kube-control.connected')
kube_control.set_default_cni.assert_called_once_with('test-default-cni')
def test_default_cni_changed():
kubernetes_master.default_cni_changed()
remove_state.assert_called_once_with(
'kubernetes-master.components.started'
)
| 27.785714
| 76
| 0.767352
| 102
| 778
| 5.509804
| 0.401961
| 0.124555
| 0.074733
| 0.071174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156812
| 778
| 27
| 77
| 28.814815
| 0.856707
| 0
| 0
| 0
| 0
| 0
| 0.115681
| 0.07455
| 0
| 0
| 0
| 0
| 0.095238
| 1
| 0.190476
| false
| 0
| 0.238095
| 0
| 0.47619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ecbd115bcfdc5ce591f196d4fe1390310b89ddc
| 576
|
py
|
Python
|
example/runscripts/nhilton/run_batch_job.py
|
weegreenblobbie/pith-tool
|
25708bd2354cc5d97eb0c0a0046ca4704e4ced0a
|
[
"MIT"
] | 2
|
2016-03-04T19:25:29.000Z
|
2016-03-10T02:22:36.000Z
|
example/runscripts/nhilton/run_batch_job.py
|
weegreenblobbie/pith-tool
|
25708bd2354cc5d97eb0c0a0046ca4704e4ced0a
|
[
"MIT"
] | 10
|
2016-03-01T03:23:17.000Z
|
2017-04-27T00:37:09.000Z
|
example/runscripts/nhilton/run_batch_job.py
|
weegreenblobbie/pith-tool
|
25708bd2354cc5d97eb0c0a0046ca4704e4ced0a
|
[
"MIT"
] | null | null | null |
import argparse
from module_a.fun_1 import fun_1
from module_c.fun_4 import fun_4
from external_a.extra_fun import extra_fun
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'input_txt_file',
help = 'Some input file',
)
args = parser.parse_args()
print('Running batch job ...')
with open(args.input_txt_file, 'r') as fd:
text = fd.read()
print('Read "%s" from file' % repr(text))
fun_1()
fun_4()
extra_fun()
print('batch job complete!')
if __name__ == "__main__":
main()
| 16
| 46
| 0.626736
| 82
| 576
| 4.085366
| 0.487805
| 0.035821
| 0.071642
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013921
| 0.251736
| 576
| 35
| 47
| 16.457143
| 0.763341
| 0
| 0
| 0
| 0
| 0
| 0.168403
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.190476
| 0
| 0.238095
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3eccd3d6d89a4b4cb5aaf3d2889bce0836f4e413
| 395
|
py
|
Python
|
DFS BFS/Leetcode 1436. Destination City.py
|
kaizhengny/LeetCode
|
67d64536ab80f4966699fe7460d165f2a98d6a82
|
[
"MIT"
] | 31
|
2020-06-23T00:40:04.000Z
|
2022-01-08T11:06:24.000Z
|
DFS BFS/Leetcode 1436. Destination City.py
|
kaizhengny/LeetCode
|
67d64536ab80f4966699fe7460d165f2a98d6a82
|
[
"MIT"
] | null | null | null |
DFS BFS/Leetcode 1436. Destination City.py
|
kaizhengny/LeetCode
|
67d64536ab80f4966699fe7460d165f2a98d6a82
|
[
"MIT"
] | 7
|
2020-04-30T08:46:03.000Z
|
2021-08-28T16:25:54.000Z
|
class Solution:
def destCity(self, paths: List[List[str]]) -> str:
dic = collections.defaultdict(list)
for [x,y] in paths:
dic[x].append(y)
res = set()
stack = []
stack.append(paths[0][0])
while stack:
while dic[stack[-1]]:
stack.append(dic[stack[-1]].pop())
return stack[-1]
| 30.384615
| 54
| 0.473418
| 46
| 395
| 4.065217
| 0.521739
| 0.096257
| 0.096257
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020325
| 0.377215
| 395
| 13
| 55
| 30.384615
| 0.739837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3eccf323bfafeef7616f6d78bb34226073a6758e
| 3,039
|
py
|
Python
|
i18n/listeners/proxyContainer/ListShouldContainSubListProxy.py
|
Rexmen/i18n
|
b615f2d1e06b58f4647f1b269fc37d7921bc5c4b
|
[
"MIT"
] | null | null | null |
i18n/listeners/proxyContainer/ListShouldContainSubListProxy.py
|
Rexmen/i18n
|
b615f2d1e06b58f4647f1b269fc37d7921bc5c4b
|
[
"MIT"
] | null | null | null |
i18n/listeners/proxyContainer/ListShouldContainSubListProxy.py
|
Rexmen/i18n
|
b615f2d1e06b58f4647f1b269fc37d7921bc5c4b
|
[
"MIT"
] | null | null | null |
from .Proxy import Proxy
from robot.libraries.BuiltIn import BuiltIn
import sys
from robot.libraries.Screenshot import Screenshot
from robot.api import logger
import I18nListener as i18n
import ManyTranslations as ui
from robot.utils import unic
class ListShouldContainSubListProxy(Proxy):
def __init__(self, arg_format):
arg_format[repr(['list1', 'list2', 'msg=None', 'values=True'])] = self
def i18n_Proxy(self, func):
def proxy(self, list1, list2, msg=None, values=True):
full_args = [str(list1), str(list2)]
list1_trans = i18n.I18nListener.MAP.values(list1, full_args)
list2_trans = i18n.I18nListener.MAP.values(list2, full_args)
list1_have_multi_trans = False
for lt in list1_trans:
if len(lt) >1:
list1_have_multi_trans = True
break
list2_have_multi_trans = False
for lt in list2_trans:
if len(lt) >1:
list2_have_multi_trans = True
break
if list1_have_multi_trans or list2_have_multi_trans:
ListShouldContainSubListProxy.show_warning(self, list1, list2, full_args)
diffs = ', '.join(unic(item) for item in list2 if item not in list1)
if not diffs:
i18n.I18nListener.Is_Multi_Trans = True
for i, lt in enumerate(list1_trans):
if len(lt)>1 and str(full_args)+list1[i] not in ui.UI.unique_log:
multi_trans_word = [list1[i]]
ui.UI.origin_xpaths_or_arguments.append(full_args)
ui.UI.add_trans_info(self, multi_trans_word, lt, full_args, func.__name__)
for i, lt in enumerate(list2_trans):
if len(lt)>1 and str(full_args)+list2[i] not in ui.UI.unique_log:
multi_trans_word = [list2[i]]
ui.UI.origin_xpaths_or_arguments.append(full_args)
ui.UI.add_trans_info(self, multi_trans_word, lt, full_args, func.__name__)
return func(self, list1_trans, list2_trans, msg, values)
return proxy
def show_warning(self, list1, list2, full_args):
language = 'i18n in %s:\n ' %i18n.I18nListener.LOCALE
test_name = ('Test Name: %s') %BuiltIn().get_variable_value("${TEST NAME}") + '=> Exist multiple translations of the word' + '\n'
message_for_list1 = Proxy().deal_warning_message_for_list(list1, full_args, 'LIST1')
message_for_list2 = Proxy().deal_warning_message_for_list(list2, full_args, 'LIST2')
if message_for_list1 or message_for_list2:
message = language + test_name + message_for_list1 + '\n' + message_for_list2 + '\n'\
'You should verify translation is correct!'
logger.warn(message)
| 50.65
| 137
| 0.58901
| 374
| 3,039
| 4.513369
| 0.243316
| 0.061611
| 0.049763
| 0.028436
| 0.430095
| 0.347156
| 0.25237
| 0.182464
| 0.182464
| 0.150474
| 0
| 0.034163
| 0.325765
| 3,039
| 60
| 138
| 50.65
| 0.789653
| 0
| 0
| 0.153846
| 0
| 0
| 0.055592
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.153846
| 0
| 0.288462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ecdb050826a3f9850819307adc5e13bc204f458
| 3,218
|
py
|
Python
|
dl_bounds/src/experiments/exp_bad_minima_branchout.py
|
google/dl_bounds
|
b38fbd73f30d2fd8d1b57ad8706c07a223689365
|
[
"Apache-2.0"
] | 12
|
2018-02-23T11:57:26.000Z
|
2021-04-20T20:38:16.000Z
|
dl_bounds/src/experiments/exp_bad_minima_branchout.py
|
google/dl_bounds
|
b38fbd73f30d2fd8d1b57ad8706c07a223689365
|
[
"Apache-2.0"
] | null | null | null |
dl_bounds/src/experiments/exp_bad_minima_branchout.py
|
google/dl_bounds
|
b38fbd73f30d2fd8d1b57ad8706c07a223689365
|
[
"Apache-2.0"
] | 7
|
2018-06-28T04:10:45.000Z
|
2021-10-14T01:18:59.000Z
|
# coding=utf-8
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements experimental logic."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import copy
from dl_bounds.src.data import LocalDatasetProvider
from dl_bounds.src.exp_helpers import aggregate_dicts
from dl_bounds.src.experiments.exp_base import Experiment
import numpy as np
from scipy.stats import truncnorm
import tensorflow as tf
class BadMinimaBranchoutExperiment(Experiment):
"""Runs the branchout version of "bad minima" experiment."""
def __init__(self, conf, subexp_factory):
super(BadMinimaBranchoutExperiment, self).__init__(conf)
self.subexp_factory = subexp_factory
def run(self):
"""Runs experiment."""
tf.logging.info("Pre-training network with 50% labels flipped...")
conf = copy(self.conf)
conf.flip_labels = 0.5
conf.split_n = -1
conf.log2_snapshots = True
exp = Experiment(conf)
(x_train, y_train, _, _, _) = exp.get_data()
noisy_dataset = LocalDatasetProvider(
x_train, y_train, shuffle_seed=self.conf.data_shuffle_seed)
all_rs = []
bad_min_weight_snapshots = []
# Training model on the dataset with 50% labels randomly flipped, while
# keeping intermediate weights
for (p, model) in exp.train(noisy_dataset):
init_weights = model.weights.eval()
bad_min_weight_snapshots.append(init_weights)
# Training & evaluating models initialized from intermediate weights
for (p, init_weights) in enumerate(bad_min_weight_snapshots):
tf.logging.info(
"""Initializing weights and running actual experiment from
weights of noisy experiment at pass %d.""",
p)
exp = self.subexp_factory(self.conf)
exp.is_persistent_experiment = False
exp.init_weights = init_weights
rs = exp.run()
rs["bad_min_branchout_pass"] = p
all_rs.append(rs)
aggregated_rs = aggregate_dicts(all_rs)
self.save(aggregated_rs)
w_l2_norm_at_bad_min = np.linalg.norm(bad_min_weight_snapshots[-1])
dim = len(bad_min_weight_snapshots[-1])
new_init_w = truncnorm(
a=-2 / self.conf.init_stddev,
b=2 / self.conf.init_stddev,
scale=self.conf.init_stddev).rvs(size=dim).astype(np.float32)
new_init_w = (
new_init_w / np.linalg.norm(new_init_w)) * w_l2_norm_at_bad_min
conf = copy(self.conf)
exp = self.subexp_factory(conf)
exp.is_persistent_experiment = False
exp.init_weights = new_init_w
rs = exp.run()
rs["blown_up_stddev"] = True
self.conf.result_filename += "_blown_up_stddev"
self.save(rs)
return aggregated_rs
| 32.836735
| 75
| 0.724984
| 456
| 3,218
| 4.868421
| 0.390351
| 0.032432
| 0.027027
| 0.047297
| 0.093694
| 0.056757
| 0.043243
| 0.043243
| 0.043243
| 0
| 0
| 0.009549
| 0.186451
| 3,218
| 97
| 76
| 33.175258
| 0.838426
| 0.257924
| 0
| 0.103448
| 0
| 0
| 0.044346
| 0.009756
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0.017241
| 0.172414
| 0
| 0.241379
| 0.017241
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ece59f5395837726a33f7c182a3a996e31afa97
| 699
|
py
|
Python
|
World 1/If...Else/ex029 - Eletronic Radar.py
|
MiguelChichorro/PythonExercises
|
3b2726e7d9ef92c1eb6b977088692c42a2a7b86e
|
[
"MIT"
] | 2
|
2021-04-23T19:18:06.000Z
|
2021-05-15T17:45:21.000Z
|
World 1/If...Else/ex029 - Eletronic Radar.py
|
MiguelChichorro/PythonExercises
|
3b2726e7d9ef92c1eb6b977088692c42a2a7b86e
|
[
"MIT"
] | 1
|
2021-05-14T00:29:23.000Z
|
2021-05-14T00:29:23.000Z
|
World 1/If...Else/ex029 - Eletronic Radar.py
|
MiguelChichorro/PythonExercises
|
3b2726e7d9ef92c1eb6b977088692c42a2a7b86e
|
[
"MIT"
] | 1
|
2021-05-14T00:19:33.000Z
|
2021-05-14T00:19:33.000Z
|
from time import sleep
colors = {"clean": "\033[m",
"red": "\033[31m",
"green": "\033[32m",
"yellow": "\033[33m",
"blue": "\033[34m",
"purple": "\033[35m",
"cian": "\033[36m"}
v = float(input("Enter the car speed was: "))
tic = (v - 80) * 7
print("{}Loading...{}".format(colors["green"], colors["clean"]))
sleep(2)
if v > 80:
print("{}You were very fast, your speed was {} km{}".format(colors["red"], v, colors["clean"]))
print("{}Now you need to pay {} $US because of that{}".format(colors["red"], tic, colors["clean"]))
else:
print("{}You were in the right speed, you can move on{}".format(colors["green"], colors["clean"]))
| 38.833333
| 103
| 0.546495
| 98
| 699
| 3.897959
| 0.571429
| 0.143979
| 0.089005
| 0.120419
| 0.146597
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070909
| 0.213162
| 699
| 17
| 104
| 41.117647
| 0.623636
| 0
| 0
| 0
| 0
| 0
| 0.429185
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.058824
| 0.235294
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ece62c6129ee74730b7e33194559a50dbbdff89
| 1,913
|
py
|
Python
|
687.longest-univalue-path.py
|
Lonitch/hackerRank
|
84991b8340e725422bc47eec664532cc84a3447e
|
[
"MIT"
] | null | null | null |
687.longest-univalue-path.py
|
Lonitch/hackerRank
|
84991b8340e725422bc47eec664532cc84a3447e
|
[
"MIT"
] | null | null | null |
687.longest-univalue-path.py
|
Lonitch/hackerRank
|
84991b8340e725422bc47eec664532cc84a3447e
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode id=687 lang=python3
#
# [687] Longest Univalue Path
#
# https://leetcode.com/problems/longest-univalue-path/description/
#
# algorithms
# Easy (34.69%)
# Likes: 1312
# Dislikes: 351
# Total Accepted: 76.5K
# Total Submissions: 218.3K
# Testcase Example: '[5,4,5,1,1,5]'
#
# Given a binary tree, find the length of the longest path where each node in
# the path has the same value. This path may or may not pass through the root.
#
# The length of path between two nodes is represented by the number of edges
# between them.
#
#
#
# Example 1:
#
# Input:
#
#
# 5
# / \
# 4 5
# / \ \
# 1 1 5
#
#
# Output: 2
#
#
#
# Example 2:
#
# Input:
#
#
# 1
# / \
# 4 5
# / \ \
# 4 4 5
#
#
# Output: 2
#
#
#
# Note: The given binary tree has not more than 10000 nodes. The height of the
# tree is not more than 1000.
#
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def __init__(self):
self.ans = 0
def longestUnivaluePath(self, root: TreeNode) -> int:
def util(node,cur,l):
if node is None:
return l
else:
if node.val==cur:
left=util(node.left,cur,l+1)
right=util(node.right,cur,l+1)
self.ans = max(self.ans,left+right-2*l-2)
return max(left,right)
else:
left=util(node.left,node.val,0)
right=util(node.right,node.val,0)
self.ans = max(self.ans,left+right)
return l
util(root,2**31,0)
return self.ans
# @lc code=end
| 21.021978
| 78
| 0.504966
| 260
| 1,913
| 3.723077
| 0.396154
| 0.043388
| 0.009298
| 0.008264
| 0.066116
| 0.066116
| 0.053719
| 0
| 0
| 0
| 0
| 0.055927
| 0.373758
| 1,913
| 90
| 79
| 21.255556
| 0.74374
| 0.549399
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ecfb19dcb608f2b63fc8fd0aece69c83033985f
| 6,085
|
py
|
Python
|
scripts/wordnet.py
|
WladimirSidorenko/SentimentLexicon
|
0d7203b7b7e3ca5d11759fdad656f775fa5d6e95
|
[
"MIT"
] | 13
|
2016-08-03T18:46:02.000Z
|
2022-02-22T22:30:19.000Z
|
scripts/wordnet.py
|
WladimirSidorenko/SentimentLexicon
|
0d7203b7b7e3ca5d11759fdad656f775fa5d6e95
|
[
"MIT"
] | 2
|
2019-10-22T13:03:48.000Z
|
2019-12-05T21:41:36.000Z
|
scripts/wordnet.py
|
WladimirSidorenko/SentimentLexicon
|
0d7203b7b7e3ca5d11759fdad656f775fa5d6e95
|
[
"MIT"
] | 5
|
2019-12-25T13:53:18.000Z
|
2020-06-05T20:47:31.000Z
|
#!/usr/bin/env python2.7
# -*- coding: utf-8; mode: python; -*-
"""
Module for reading and processing GemaNet files.
Constants:
POS - list of parts-of-speech present in GermaNet
RELTYPES - types of GermaNet relations
Classes:
Germanet - main class for processing GermaNet files
"""
##################################################################
# Imports
from __future__ import unicode_literals, print_function
from itertools import chain
from collections import defaultdict
import argparse
import codecs
import glob
import os
import re
import sys
import xml.etree.ElementTree as ET
##################################################################
# Variables and Constants
SKIP_RE = re.compile(r"\s+[1-9]")
ENCODING = "utf-8"
POS = [".adj", ".adv", ".noun", ".verb"]
RELSYM2NAME = {
"~": "Hyponym",
"~i": "Instance Hyponym",
"!": "Antonym",
"#m": "Member holonym",
"#p": "Part holonym",
"#s": "Substance holonym",
"$": "Verb Group",
"%m": "Member meronym",
"%p": "Part meronym",
"%s": "Substance meronym",
"&": "Similar to",
"*": "Entailment",
"+": "Derivationally related form",
"-c": "Member of this domain - TOPIC",
"-r": "Member of this domain - REGION",
"-u": "Member of this domain - USAGE",
";c": "Domain of synset - TOPIC",
";r": "Domain of synset - REGION",
";u": "Domain of synset - USAGE",
"<": "Participle of verb",
"=": "Attribute",
">": "Cause",
"@": "Hypernym",
"@i": "Instance Hypernym",
"\\": "Derived from adjective",
"^": "Also see"
}
##################################################################
# Class
class Wordnet(object):
"""
Class for reading and pocessing GermaNet files
Instance variables:
lexid2lex - mapping from lexeme IDs to lexemes
lex2lexid - mapping from lexemes to lexeme IDs
lexid2synids - mapping from lexeme IDs to synset IDs
synid2lexids - mapping from synset IDs to lexemes
synid2defexmp - mapping from synset IDs to synset definitions and examples
con_relations - adjacency lists of relations between synsets
lex_relations - adjacency lists of relations between lexemes
"""
def __init__(self, a_dir=os.getcwd()):
"""Class constructor.
@param a_dir - directory containing GermaNet files
"""
if not os.path.isdir(a_dir) or not os.access(a_dir, os.R_OK):
raise RuntimeError("Can't read from directory: {:s}".format(a_dir))
## mapping from synset IDs to synset definitions and examples
self.synid2defexmp = dict()
## mapping from synset IDs to part-of-speech categories
self.synid2pos = dict()
## mapping from synset IDs to lexemes
self.synid2lexemes = defaultdict(set)
## mapping from lexeme IDs to lexemes
self.lexeme2synids = defaultdict(set)
## adjacency lists of relations between synsets
self.relations = defaultdict(set)
# parse synsets
for ifile in chain.from_iterable(
glob.iglob(os.path.join(a_dir, "data" + ipos))
for ipos in POS):
self._parse_synsets(ifile)
assert self.lexeme2synids, \
"No synset files found in directory {:s}".format(a_dir)
def _parse_synsets(self, a_fname):
"""Parse GemaNet XML file
@param a_fname - name of input file
@return \c void
"""
ptr_sym = ""
i = w_cnt = rel_cnt = 0
ilex = toks = syn_id = pos = trg_id = trg_synid = trg_pos = None
with codecs.open(a_fname, 'r', ENCODING) as ifile:
for iline in ifile:
iline = iline.rstrip()
if SKIP_RE.match(iline):
continue
# print("iline = ", repr(iline), file=sys.stderr)
toks = iline.split()
syn_id, pos = toks[0], toks[2]
syn_id = (syn_id, pos)
self.synid2pos[syn_id] = pos
# print("syn_id =", repr(syn_id), file=sys.stderr)
# print("pos =", repr(pos), file=sys.stderr)
w_cnt = int(toks[3], 16)
# print("w_cnt =", repr(w_cnt), file=sys.stderr)
# read lexemes
for j in xrange(4, 4 + w_cnt * 2, 2):
ilex = toks[j]
self.synid2lexemes[syn_id].add(ilex)
self.lexeme2synids[ilex].add(syn_id)
# print("self.synid2lexemes[syn_id] =",
# repr(self.synid2lexemes[syn_id]), file=sys.stderr)
# print("self.lexeme2synids[ilex] =",
# repr(self.lexeme2synids[ilex]), file=sys.stderr)
# read relations
i = 4 + w_cnt * 2
rel_cnt = int(toks[i])
i += 1
# print("rel_cnt =",
# repr(rel_cnt), file=sys.stderr)
# print("i =", repr(i), file=sys.stderr)
for j in xrange(i, i + rel_cnt * 4, 4):
ptr_sym, trg_synid, trg_pos, _ = toks[j:j+4]
# print("ptr_sym =",
# repr(ptr_sym), file=sys.stderr)
# print("trg_synid =",
# repr(trg_synid), file=sys.stderr)
# print("trg_pos =",
# repr(trg_pos), file=sys.stderr)
trg_id = (trg_synid, trg_pos)
self.relations[syn_id].add((trg_id, RELSYM2NAME[ptr_sym]))
i += rel_cnt * 4
# print("i =", repr(i), file=sys.stderr)
if pos == 'v':
f_cnt = int(toks[i])
i += f_cnt * 3 + 1
assert toks[i] == '|', \
"Invalid line format '{:s}' token {:d} expected" \
" to be '|', but it is '{:s}' ".format(repr(iline), i,
repr(toks[i]))
self.synid2defexmp[syn_id] = ' '.join(toks[i + 1:])
| 35.794118
| 79
| 0.517173
| 694
| 6,085
| 4.425072
| 0.299712
| 0.021166
| 0.050798
| 0.032563
| 0.206447
| 0.164767
| 0.048193
| 0.032563
| 0.032563
| 0
| 0
| 0.011241
| 0.327527
| 6,085
| 169
| 80
| 36.005917
| 0.739247
| 0.307642
| 0
| 0
| 0
| 0
| 0.166968
| 0
| 0
| 0
| 0
| 0
| 0.022222
| 1
| 0.022222
| false
| 0
| 0.111111
| 0
| 0.144444
| 0.011111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ed921fc020d2c520c2bb21c3fba179cbc45d373
| 2,836
|
py
|
Python
|
ducky/asm/lexer.py
|
happz/ducky
|
1c6a875ca5a7a9cc71836bad5b7e45cc398d42ad
|
[
"MIT"
] | 3
|
2015-04-25T18:25:37.000Z
|
2017-08-31T20:52:29.000Z
|
ducky/asm/lexer.py
|
happz/ducky-legacy
|
1c6a875ca5a7a9cc71836bad5b7e45cc398d42ad
|
[
"MIT"
] | 27
|
2015-01-06T21:59:22.000Z
|
2016-11-12T07:31:39.000Z
|
ducky/asm/lexer.py
|
happz/ducky-legacy
|
1c6a875ca5a7a9cc71836bad5b7e45cc398d42ad
|
[
"MIT"
] | 1
|
2017-05-14T18:52:34.000Z
|
2017-05-14T18:52:34.000Z
|
import ply.lex
#
# Lexer setup
#
instructions = (
'NOP', 'INT', 'IPI', 'RETINT', 'CALL', 'RET', 'CLI', 'STI', 'HLT', 'RST', 'IDLE',
'PUSH', 'POP', 'INC', 'DEC', 'ADD', 'SUB', 'CMP', 'J', 'AND', 'OR', 'XOR', 'NOT',
'SHL', 'SHR', 'SHRS', 'LW', 'LS', 'LB', 'LI', 'LIU', 'LA', 'STW', 'STS', 'STB',
'MOV', 'SWP', 'MUL', 'UDIV', 'MOD', 'CMPU', 'CAS', 'SIS', 'DIV',
'BE', 'BNE', 'BS', 'BNS', 'BZ', 'BNZ', 'BO', 'BNO', "BL", "BLE", "BGE", "BG",
'SETE', 'SETNE', 'SETZ', 'SETNZ', 'SETO', 'SETNO', 'SETS', 'SETNS', "SETL", "SETLE", "SETGE", "SETG",
'SELE', 'SELNE', 'SELZ', 'SELNZ', 'SELS', 'SELNS', 'SELO', 'SELNO', "SELL", "SELLE", "SELGE", "SELG",
'LPM', 'CTR', 'CTW', 'FPTC'
)
math_instructions = (
'PUSHW', 'SAVEW', 'POPW', 'LOADW', 'POPUW', 'LOADUW', 'SAVE', 'LOAD',
'INCL', 'DECL', 'ADDL', 'MULL', 'DIVL', 'MODL', 'UDIVL', 'UMODL',
'DUP', 'DUP2', 'SWPL', 'DROP', 'SYMDIVL', 'SYMMODL',
'PUSHL', 'POPL'
)
directives = (
'data', 'text',
'type', 'global',
'ascii', 'byte', 'short', 'space', 'string', 'word',
'section',
'align', 'file',
'set'
)
# Construct list of tokens, and map of reserved words
tokens = instructions + math_instructions + (
'COMMA', 'COLON', 'HASH', 'LBRAC', 'RBRAC', 'DOT', 'PLUS',
'SCONST', 'ICONST',
'ID', 'REGISTER'
)
reserved_map = {
# Special registers
'sp': 'REGISTER',
'fp': 'REGISTER',
# Special instructions
'shiftl': 'SHL',
'shiftr': 'SHR',
'shiftrs': 'SHRS'
}
reserved_map.update({i.lower(): i for i in instructions})
reserved_map.update({i.lower(): i for i in math_instructions})
tokens = tokens + tuple([i.upper() for i in directives])
reserved_map.update({'.' + i: i.upper() for i in directives})
reserved_map.update({i: i.upper() for i in directives})
reserved_map.update({'r%d' % i: 'REGISTER' for i in range(0, 32)})
# Newlines
def t_NEWLINE(t):
r'\n+'
t.lexer.lineno += t.value.count('\n')
# Tokens
t_COMMA = r','
t_COLON = r':'
t_HASH = r'\#'
t_LBRAC = r'\['
t_RBRAC = r'\]'
t_DOT = r'\.'
t_PLUS = r'\+'
t_SCONST = r'\"([^\\\n]|(\\.))*?\"'
t_ICONST = r'-?(?:(?:0x[0-9a-fA-F][0-9a-fA-F]*)|(?:[0-9][0-9]*))'
def t_ID(t):
r'[a-zA-Z_\.][a-zA-Z0-9_\.]*'
t.type = reserved_map.get(t.value, 'ID')
return t
t_ignore = " \t"
def t_error(t):
from ..errors import AssemblyIllegalCharError
loc = t.lexer.location.copy()
loc.lineno = t.lineno - loc.lineno
loc.column = t.lexer.parser.lexpos_to_lineno(t.lexpos)
raise AssemblyIllegalCharError(c = t.value[0], location = loc, line = t.lexer.parser.lineno_to_line(t.lineno))
class AssemblyLexer(object):
def __init__(self):
self._lexer = ply.lex.lex()
def token(self, *args, **kwargs):
return self._lexer.token(*args, **kwargs)
def input(self, *args, **kwargs):
return self._lexer.input(*args, **kwargs)
| 27.269231
| 112
| 0.563822
| 394
| 2,836
| 3.967005
| 0.532995
| 0.049264
| 0.023033
| 0.046065
| 0.159949
| 0.151631
| 0.114523
| 0.114523
| 0.114523
| 0.076136
| 0
| 0.006838
| 0.174894
| 2,836
| 103
| 113
| 27.533981
| 0.661111
| 0.052891
| 0
| 0
| 0
| 0.013699
| 0.256183
| 0.036176
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082192
| false
| 0
| 0.027397
| 0.027397
| 0.164384
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ed9a5ed8b96c7fface62084d850daafe13c098c
| 1,478
|
py
|
Python
|
flashcards/commands/sets.py
|
zergov/flashcards
|
4d1b1c277585b95517ed6c00ceff7555c8c131eb
|
[
"MIT"
] | 21
|
2016-06-13T00:51:49.000Z
|
2021-03-20T05:04:23.000Z
|
flashcards/commands/sets.py
|
zergov/flashcards
|
4d1b1c277585b95517ed6c00ceff7555c8c131eb
|
[
"MIT"
] | 11
|
2016-06-10T10:17:57.000Z
|
2020-01-30T15:14:35.000Z
|
flashcards/commands/sets.py
|
zergov/flashcards
|
4d1b1c277585b95517ed6c00ceff7555c8c131eb
|
[
"MIT"
] | 4
|
2017-01-02T13:26:21.000Z
|
2021-07-07T04:20:00.000Z
|
"""
flashcards.commands.sets
~~~~~~~~~~~~~~~~~~~
Contains the commands and subcommands related to the sets resource.
"""
import os
import click
from flashcards import sets
from flashcards import storage
@click.group('sets')
def sets_group():
"""Command related to the StudySet object """
pass
@click.command('new')
@click.option('--title', prompt='Title of the study set')
@click.option('--desc', prompt='Description for the study set (optional)')
def new(title, desc):
"""
Create a new study set.
User supplies a title and a description.
If this study set does not exist, it is created.
"""
study_set = sets.StudySet(title, desc)
filepath = storage.create_studyset_file(study_set)
# automatically select this studyset
storage.link_selected_studyset(filepath)
click.echo('Study set created !')
@click.command('select')
@click.argument('studyset')
def select(studyset):
"""
Select a studyset.
Focus on a studyset, every new added cards are going to be put directly in
this studyset.
"""
studyset_path = os.path.join(storage.studyset_storage_path(), studyset)
storage.link_selected_studyset(studyset_path)
studyset_obj = storage.load_studyset(studyset_path).load()
click.echo('Selected studyset: %s' % studyset_obj.title)
click.echo('Next created cards will be automatically added '
'to this studyset.')
sets_group.add_command(new)
sets_group.add_command(select)
| 25.482759
| 78
| 0.703654
| 196
| 1,478
| 5.19898
| 0.362245
| 0.054956
| 0.058881
| 0.052993
| 0.068695
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.17862
| 1,478
| 57
| 79
| 25.929825
| 0.839374
| 0.280108
| 0
| 0
| 0
| 0
| 0.199005
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115385
| false
| 0.038462
| 0.153846
| 0
| 0.269231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3eda7f92aad073987eebca83a079837bb3553721
| 5,908
|
py
|
Python
|
sdf/step.py
|
pschou/py-sdf
|
0a269ed155d026e29429d76666fb63c95d2b4b2c
|
[
"MIT"
] | null | null | null |
sdf/step.py
|
pschou/py-sdf
|
0a269ed155d026e29429d76666fb63c95d2b4b2c
|
[
"MIT"
] | null | null | null |
sdf/step.py
|
pschou/py-sdf
|
0a269ed155d026e29429d76666fb63c95d2b4b2c
|
[
"MIT"
] | null | null | null |
import numpy as np
import struct
import getpass
import struct
from datetime import datetime
edge_curve = {}
def _make_edge_curve(i,a,b,fp,v0,v1,s01):
a_str = struct.pack('<fff',a[0],a[1],a[2])
b_str = struct.pack('<fff',b[0],b[1],b[2])
f_val = a_str+b_str
r_val = b_str+a_str
if f_val in edge_curve:
n = edge_curve[f_val]
ec_dir = ".T."
elif r_val in edge_curve:
n = edge_curve[r_val]
ec_dir = ".F."
else:
fp.write("#{} = EDGE_CURVE('', #{}, #{}, #{},.T.);\n".format(i,v0,v1,s01)); n=i; i+=1
edge_curve[f_val] = n
ec_dir = ".T."
return i, n, ec_dir
def write_step(path, points, tol=0):
n = len(points) // 3
points = np.array(points, dtype='float32').reshape((-1, 3, 3))
normals = np.cross(points[:,1] - points[:,0], points[:,2] - points[:,0])
normals_len = np.linalg.norm(normals, axis=1).reshape((-1, 1))
normals /= normals_len
vec01 = points[:,1] - points[:,0]
vec01_len = np.linalg.norm(vec01, axis=1).reshape((-1, 1))
vec01 /= vec01_len
vec12 = points[:,2] - points[:,1]
vec12_len = np.linalg.norm(vec12, axis=1).reshape((-1, 1))
vec12 /= vec12_len
vec20 = points[:,0] - points[:,2]
vec20_len= np.linalg.norm(vec20, axis=1).reshape((-1, 1))
vec20 /= vec20_len
OPEN_SHELL = []
with open(path, 'w') as fp:
fp.write("ISO-10303-21;\n")
fp.write("HEADER;\n")
fp.write("FILE_DESCRIPTION(('STP203'),'2;1');\n")
fp.write("FILE_NAME('{}','{}',('{}'),('PythonSDF'),' ','pschou/py-sdf',' ');\n".format(path,datetime.now().strftime('%Y-%m-%dT%H:%M:%S'),getpass.getuser()))
fp.write("FILE_SCHEMA(('CONFIG_CONTROL_DESIGN'));\n")
fp.write("ENDSEC;\n")
fp.write("DATA;\n")
fp.write("#1 = CARTESIAN_POINT('', (0,0,0));\n")
fp.write("#2 = DIRECTION('', (0, 0, 1));\n")
fp.write("#3 = DIRECTION('', (1, 0, 0));\n")
fp.write("#4 = AXIS2_PLACEMENT_3D('',#1,#2,#3);\n")
i = 5
for j in range(n):
if any([vec01_len[j] < tol, vec12_len[j] < tol, vec20_len[j] < tol, normals_len[j] < tol]):
continue
#fp.write("#{} ".format(i))
fp.write("#{} = CARTESIAN_POINT('', ({},{},{}));\n".format(i,points[j,0,0],points[j,0,1],points[j,0,2])); p0=i;i+=1
fp.write("#{} = VERTEX_POINT('', #{});\n".format(i,p0)); v0=i;i+=1
fp.write("#{} = CARTESIAN_POINT('', ({},{},{}));\n".format(i,points[j,1,0],points[j,1,1],points[j,1,2])); p1=i;i+=1
fp.write("#{} = VERTEX_POINT('', #{});\n".format(i,p1)); v1=i;i+=1
fp.write("#{} = CARTESIAN_POINT('', ({},{},{}));\n".format(i,points[j,2,0],points[j,2,1],points[j,2,2])); p2=i;i+=1
fp.write("#{} = VERTEX_POINT('', #{});\n".format(i,p2)); v2=i;i+=1
#fp.write("#{} = CARTESIAN_POINT('', ({},{},{}));\n".format(i,points[j,0,0],points[j,0,1],points[j,0,2])); i+=1
#fp.write("#{} = DIRECTION('', ({}, {}, {}));\n".format(i, normals[j,0],normals[j,1],normals[j,2]); i+=1
fp.write("#{} = DIRECTION('', ({}, {}, {}));\n".format(i, vec01[j,0],vec01[j,1],vec01[j,2])); d01=i; i+=1
fp.write("#{} = VECTOR('',#{},1);\n".format(i,d01)); v01=i; i+=1
fp.write("#{} = LINE('',#{}, #{});\n".format(i,p0,v01)); L01=i; i+=1
fp.write("#{} = SURFACE_CURVE('', #{});\n".format(i,L01)); s01=i; i+=1
i, ec01, ec_dir01 = _make_edge_curve(i,points[j,0,:],points[j,1,:],fp,v0,v1,s01)
fp.write("#{} = DIRECTION('', ({}, {}, {}));\n".format(i, vec12[j,0],vec12[j,1],vec12[j,2])); d12=i; i+=1
fp.write("#{} = VECTOR('',#{},1);\n".format(i,d12)); v12=i; i+=1
fp.write("#{} = LINE('',#{}, #{});\n".format(i,p1,v12)); L12=i; i+=1
fp.write("#{} = SURFACE_CURVE('', #{});\n".format(i,L12)); s12=i; i+=1
#fp.write("#{} = EDGE_CURVE('', #{}, #{}, #{},.T.);\n".format(i,v1,v2,s12)); ec12=i; i+=1
i, ec12, ec_dir12 = _make_edge_curve(i,points[j,1,:],points[j,2,:],fp,v1,v2,s12)
fp.write("#{} = DIRECTION('', ({}, {}, {}));\n".format(i, vec20[j,0],vec20[j,1],vec20[j,2])); d20=i; i+=1
fp.write("#{} = VECTOR('',#{},1);\n".format(i,d20)); v20=i; i+=1
fp.write("#{} = LINE('',#{}, #{});\n".format(i,p2,v20)); L20=i; i+=1
fp.write("#{} = SURFACE_CURVE('', #{});\n".format(i,L20)); s20=i; i+=1
#fp.write("#{} = EDGE_CURVE('', #{}, #{}, #{},.T.);\n".format(i,v2,v0,s20)); ec20=i; i+=1
i, ec20, ec_dir20 = _make_edge_curve(i,points[j,2,:],points[j,0,:],fp,v2,v0,s20)
fp.write("#{} = ORIENTED_EDGE('',*,*,#{},{});\n".format(i,ec01,ec_dir01)); oe01=i; i+=1
fp.write("#{} = ORIENTED_EDGE('',*,*,#{},{});\n".format(i,ec12,ec_dir12)); oe12=i; i+=1
fp.write("#{} = ORIENTED_EDGE('',*,*,#{},{});\n".format(i,ec20,ec_dir20)); oe20=i; i+=1
fp.write("#{} = DIRECTION('', ({}, {}, {}));\n".format(i, normals[j,0],normals[j,1],normals[j,2])); n=i; i+=1
fp.write("#{} = AXIS2_PLACEMENT_3D('',#{},#{},#{});\n".format(i,p0,n,d01)); ap=i; i+=1
fp.write("#{} = PLANE('',#{});\n".format(i,ap)); plane=i; i+=1
fp.write("#{} = EDGE_LOOP('', (#{},#{},#{}));\n".format(i,oe01,oe12,oe20)); eL=i; i+=1
fp.write("#{} = FACE_BOUND('', #{},.T.);\n".format(i,eL)); fb=i; i+=1
fp.write("#{} = ADVANCED_FACE('', (#{}),#{},.T.);\n".format(i,fb,plane)); OPEN_SHELL.append(i); i+=1
fp.write("#{} = OPEN_SHELL('',(#{}));\n".format(i,",#".join([str(i) for i in OPEN_SHELL]))); osh=i; i+=1
fp.write("#{} = SHELL_BASED_SURFACE_MODEL('', (#{}));\n".format(i,osh)); sm=i; i+=1
fp.write("#{} = MANIFOLD_SURFACE_SHAPE_REPRESENTATION('', (#4, #{}));\n".format(i,sm)); i+=1
fp.write("ENDSEC;\n")
fp.write("END-ISO-10303-21;\n")
| 52.283186
| 164
| 0.488659
| 929
| 5,908
| 3.007535
| 0.158235
| 0.122763
| 0.100215
| 0.099857
| 0.419828
| 0.367215
| 0.307445
| 0.280601
| 0.271654
| 0.238726
| 0
| 0.071595
| 0.200914
| 5,908
| 112
| 165
| 52.75
| 0.520229
| 0.068382
| 0
| 0.067416
| 0
| 0
| 0.266035
| 0.068695
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022472
| false
| 0.022472
| 0.05618
| 0
| 0.089888
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ee1169f26e39df8113aa1b6b00e7646bd86f543
| 6,100
|
py
|
Python
|
ros/src/tl_detector/light_classification/tl_classifier.py
|
jkoloda/CarND-Capstone
|
79ccd31930f5aab307a16db7b6c799a2ea54dc41
|
[
"MIT"
] | null | null | null |
ros/src/tl_detector/light_classification/tl_classifier.py
|
jkoloda/CarND-Capstone
|
79ccd31930f5aab307a16db7b6c799a2ea54dc41
|
[
"MIT"
] | null | null | null |
ros/src/tl_detector/light_classification/tl_classifier.py
|
jkoloda/CarND-Capstone
|
79ccd31930f5aab307a16db7b6c799a2ea54dc41
|
[
"MIT"
] | null | null | null |
from styx_msgs.msg import TrafficLight
import tensorflow as tf
import numpy as np
import rospy
import cv2
import os
MAX_IMAGE_WIDTH = 300
MAX_IMAGE_HEIGHT = 300
class TLClassifier(object):
"""Traffic light classifier based on a tensorflow model."""
def __init__(self, is_site=True):
"""Build, load and prepare traffic light classifier object.
Loads classifier trained on simulator or real data, depending on the
is_site flag coming from the configuration file.
"""
self.session = None
self.detection_graph = None
self.classes = {1: TrafficLight.RED,
2: TrafficLight.YELLOW,
3: TrafficLight.GREEN,
4: TrafficLight.UNKNOWN}
self.light_labels = ['RED', 'YELLOW', 'GREEN', 'UNKNOWN']
temp = os.path.dirname(os.path.realpath(__file__))
temp = temp.replace(
'ros/src/tl_detector/light_classification',
'models',
)
if is_site is False:
self.model_path = os.path.join(temp,
'frozen_inference_graph_sim.pb')
else:
self.model_path = os.path.join(temp,
'frozen_inference_graph_real.pb')
self.load_model(model_path=self.model_path)
def get_classification(self, image):
"""Determine the color of the traffic light in the image.
Args
----
image (cv::Mat): image containing the traffic light
Returns
-------
int: ID of traffic light color
(specified in styx_msgs/TrafficLight)
"""
class_idx, confidence = self.predict(image)
return class_idx
def load_model(self, model_path):
"""Load classifier (graph and session)."""
self.detection_graph = tf.Graph()
with tf.Session(graph=self.detection_graph) as sess:
self.session = sess
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(model_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
def predict(self, image_np, min_score_thresh=0.5):
"""Predict traffic light state from image.
Parameters
----------
image_np : ndarray
Input image.
min_score_threshold : float
Confidence threshold for traffic light classification.
Returns
-------
light : TrafficLight
Light color of traffic light detected on input image.
score : float
Classification confidence score.
"""
image_tensor = self.detection_graph.\
get_tensor_by_name('image_tensor:0')
detection_boxes = self.detection_graph.\
get_tensor_by_name('detection_boxes:0')
detection_scores = self.detection_graph.\
get_tensor_by_name('detection_scores:0')
detection_classes = self.detection_graph.\
get_tensor_by_name('detection_classes:0')
num_detections = self.detection_graph.\
get_tensor_by_name('num_detections:0')
image_np = self.process_image(image_np)
input = [detection_boxes, detection_scores, detection_classes]
(boxes, scores, classes) = self.session.run(
input,
feed_dict={image_tensor: np.expand_dims(image_np, axis=0)})
scores = np.squeeze(scores)
classes = np.squeeze(classes)
boxes = np.squeeze(boxes)
# Traffic light state decision
# In case mutliple traffic lights are detected (as e.g. is the case of
# the simulator) we select the light with the highest accumulated score
accumulated_scores = np.zeros(len(self.classes))
accumulated_classes = np.zeros(len(self.classes))
for ii, score in enumerate(scores):
if score > min_score_thresh:
# light_class = self.classes[classes[ii]]
# return light_class, score
rospy.loginfo(self.light_labels[int(classes[ii] - 1)])
accumulated_scores[classes[ii] - 1] += score
accumulated_classes[classes[ii] - 1] += 1
if np.sum(accumulated_scores) > 0:
light_class_idx = np.argmax(accumulated_scores) + 1
confidence = accumulated_scores[light_class_idx - 1] / \
float(accumulated_classes[light_class_idx - 1])
return self.classes[light_class_idx], confidence
else:
return None, None
def process_image(self, img):
"""Pre-process imae so it can be passed directly to classifier.
Pre-processing consists of shrinkng the image to default maximum size
and converting in to RGB format (assuming that input is BGR).
Parameters
----------
img : ndarray
Input image to be processed.
Returns
-------
img : ndarray
Processed image.
"""
img = cv2.resize(img, (MAX_IMAGE_WIDTH, MAX_IMAGE_HEIGHT))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def shrink_image(self, img):
"""Shrink image if bigger than default maximum dimensions.
Aspect ratio is kept. If the image is smaller it is return as it is.
Parameters
----------
img : ndarray
Input image to be shrinked if necessary.
Returns
-------
img : ndarray
Shrinked image.
"""
height, width = img.shape[:2]
if MAX_IMAGE_HEIGHT < height or MAX_IMAGE_WIDTH < width:
scaling_factor = np.min(MAX_IMAGE_HEIGHT / float(height),
MAX_IMAGE_WIDTH / float(width))
img = cv2.resize(img, None, fx=scaling_factor,
fy=scaling_factor, interpolation=cv2.INTER_AREA)
return img
| 32.972973
| 79
| 0.587869
| 697
| 6,100
| 4.949785
| 0.288379
| 0.031304
| 0.041739
| 0.030435
| 0.114783
| 0.102609
| 0.102609
| 0.063768
| 0.027246
| 0.027246
| 0
| 0.008264
| 0.325574
| 6,100
| 184
| 80
| 33.152174
| 0.830335
| 0.267541
| 0
| 0.069767
| 0
| 0
| 0.052242
| 0.024396
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069767
| false
| 0
| 0.081395
| 0
| 0.22093
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ee194cecfeab3512df97384ab2ebd0feb3a1a32
| 3,554
|
py
|
Python
|
esp.py
|
dries007/MicroPythonUtils
|
fba10989713f85ce4afa598c550737720df24648
|
[
"MIT"
] | null | null | null |
esp.py
|
dries007/MicroPythonUtils
|
fba10989713f85ce4afa598c550737720df24648
|
[
"MIT"
] | null | null | null |
esp.py
|
dries007/MicroPythonUtils
|
fba10989713f85ce4afa598c550737720df24648
|
[
"MIT"
] | null | null | null |
import os
import serial
import time
import binascii
import textwrap
import re
from wifi import WIFI_SSID, WIFI_PASS
def ctrl(key):
# Thank you https://github.com/zeevro/esp_file_sender/
return chr(ord(key.upper()) - ord('A') + 1)
class Esp:
def __init__(self, port, baudrate):
super().__init__()
# self.raw = serial.Serial(port, baudrate)
# if not self.raw.is_open:
# raise RuntimeError("Port {} won't open.".format(port))
def __del__(self):
self.reset()
def kill(self):
self.send(ctrl('C'), 2)
def reset(self):
# self.send(ctrl('D'), 5)
pass
def send(self, data, wait=0.100):
print(data.replace('\r\n', ''))
# self.raw.write(data.encode('ascii'))
# time.sleep(wait)
# out = self.raw.read_all()
# print(out.decode('ascii'), end="")
# return out
def settings(self, data=None, app=None):
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'boot.py'), 'rb') as f:
text = f.read().decode('ascii')
if data is None:
data = {}
data.setdefault('WIFI_SSID', WIFI_SSID)
data.setdefault('WIFI_PASS', WIFI_PASS)
for k, v in data.items():
text += '{} = {!r}\r\n'.format(k, v)
self.save_file('boot.py', text.encode('ascii'))
if app is not None:
app = app.replace('.py', '')
text = 'from boot import *\r\n'
text += "if machine.reset_cause() == SLEEP_RESET or not wait_for(timeout=5, message='To abort booting \"{}\", press GPIO0'):\r\n".format(app)
text += '\timport {}\r\n'.format(app)
text += '\t{}.main()\r\n'.format(app)
self.save_file('main.py', text.encode('ascii'))
def save_file(self, filename, text):
# self.send(ctrl('E'))
self.send('import os\r\n')
# self.send('import ubinascii\r\n')
self.send('import gc\r\n')
self.send('gc.collect()\r\n')
# self.send('os.remove("{}")\r\n'.format(filename))
self.send('f = open("{}", "wb")\r\n'.format(filename))
# for part in re.findall('.{1,100}', text.decode('ascii'), re.DOTALL):
# self.send('f.write(ubinascii.a2b_base64("{}"))\r\n'.format(binascii.b2a_base64(part.encode('ascii')).decode('ascii')[:-1]))
for part in re.findall('.{1,1000}', text.decode('ascii'), re.DOTALL):
self.send('f.write({!r})\r\n'.format(part))
# self.send('f.write({!r})\r\n'.format(text))
self.send('f.close()\r\n')
self.send('del f\r\n')
self.send('gc.collect()\r\n')
# self.send(ctrl('D'))
def delete(self, *params):
self.send('import os\r\n')
for param in params:
self.send('os.remove({!r})\r\n'.format(param))
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('port', help='Serial port')
parser.add_argument('-b', '--baudrate', help='Serial baudrate', type=int, default=115200)
parser.add_argument('app', help='Input file')
parser.add_argument('drivers', help='Extra driver files', nargs='*')
args = parser.parse_args()
esp = Esp(args.port, args.baudrate)
esp.settings(app=args.app)
with open('apps/' + args.app, 'rb') as in_f:
esp.save_file(args.app, in_f.read())
for file in args.drivers:
with open('drivers/' + file, 'rb') as in_f:
esp.save_file(file, in_f.read())
if __name__ == '__main__':
main()
| 32.018018
| 153
| 0.565841
| 504
| 3,554
| 3.884921
| 0.281746
| 0.020429
| 0.040858
| 0.035751
| 0.183861
| 0.139939
| 0.102145
| 0.083759
| 0.067416
| 0.029622
| 0
| 0.011499
| 0.241418
| 3,554
| 110
| 154
| 32.309091
| 0.714763
| 0.195273
| 0
| 0.058824
| 0
| 0.014706
| 0.181914
| 0.007389
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147059
| false
| 0.044118
| 0.191176
| 0.014706
| 0.367647
| 0.014706
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ee1be975102fb088be8688d19317a0aa2d3e773
| 3,909
|
py
|
Python
|
dev/tools/leveleditor/pandac/libpandaodeModules.py
|
CrankySupertoon01/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 1
|
2021-02-13T22:40:50.000Z
|
2021-02-13T22:40:50.000Z
|
dev/tools/leveleditor/pandac/libpandaodeModules.py
|
CrankySupertoonArchive/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 1
|
2018-07-28T20:07:04.000Z
|
2018-07-30T18:28:34.000Z
|
dev/tools/leveleditor/pandac/libpandaodeModules.py
|
CrankySupertoonArchive/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 2
|
2019-12-02T01:39:10.000Z
|
2021-02-13T22:41:00.000Z
|
from extension_native_helpers import *
Dtool_PreloadDLL('libpandaode')
from libpandaode import *
from extension_native_helpers import *
Dtool_PreloadDLL('libpanda')
from libpanda import *
def convert(self):
if self.getClass() == OdeGeom.GCSphere:
return self.convertToSphere()
elif self.getClass() == OdeGeom.GCBox:
return self.convertToBox()
elif self.getClass() == OdeGeom.GCCappedCylinder:
return self.convertToCappedCylinder()
elif self.getClass() == OdeGeom.GCPlane:
return self.convertToPlane()
elif self.getClass() == OdeGeom.GCRay:
return self.convertToRay()
elif self.getClass() == OdeGeom.GCTriMesh:
return self.convertToTriMesh()
elif self.getClass() == OdeGeom.GCSimpleSpace:
return self.convertToSimpleSpace()
elif self.getClass() == OdeGeom.GCHashSpace:
return self.convertToHashSpace()
elif self.getClass() == OdeGeom.GCQuadTreeSpace:
return self.convertToQuadTreeSpace()
Dtool_funcToMethod(convert, OdeGeom)
del convert
def getConvertedSpace(self):
return self.getSpace().convert()
Dtool_funcToMethod(getConvertedSpace, OdeGeom)
del getConvertedSpace
def getAABounds(self):
min = Point3()
max = Point3()
self.getAABB(min, max)
return (min, max)
Dtool_funcToMethod(getAABounds, OdeGeom)
del getAABounds
from extension_native_helpers import *
Dtool_PreloadDLL('libpanda')
from libpanda import *
def convert(self):
if self.getClass() == OdeGeom.GCSimpleSpace:
return self.convertToSimpleSpace()
elif self.getClass() == OdeGeom.GCHashSpace:
return self.convertToHashSpace()
elif self.getClass() == OdeGeom.GCQuadTreeSpace:
return self.convertToQuadTreeSpace()
Dtool_funcToMethod(convert, OdeSpace)
del convert
def getConvertedGeom(self, index):
return self.getGeom(index).convert()
Dtool_funcToMethod(getConvertedGeom, OdeSpace)
del getConvertedGeom
def getConvertedSpace(self):
return self.getSpace().convert()
Dtool_funcToMethod(getConvertedSpace, OdeSpace)
del getConvertedSpace
def getAABounds(self):
min = Point3()
max = Point3()
self.getAABB(min, max)
return (min, max)
Dtool_funcToMethod(getAABounds, OdeSpace)
del getAABounds
from extension_native_helpers import *
Dtool_PreloadDLL('libpanda')
from libpanda import *
def attach(self, body1, body2):
if body1 and body2:
self.attachBodies(body1, body2)
elif body1 and not body2:
self.attachBody(body1, 0)
elif not body1 and body2:
self.attachBody(body2, 1)
Dtool_funcToMethod(attach, OdeJoint)
del attach
def convert(self):
if self.getJointType() == OdeJoint.JTBall:
return self.convertToBall()
elif self.getJointType() == OdeJoint.JTHinge:
return self.convertToHinge()
elif self.getJointType() == OdeJoint.JTSlider:
return self.convertToSlider()
elif self.getJointType() == OdeJoint.JTContact:
return self.convertToContact()
elif self.getJointType() == OdeJoint.JTUniversal:
return self.convertToUniversal()
elif self.getJointType() == OdeJoint.JTHinge2:
return self.convertToHinge2()
elif self.getJointType() == OdeJoint.JTFixed:
return self.convertToFixed()
elif self.getJointType() == OdeJoint.JTNull:
return self.convertToNull()
elif self.getJointType() == OdeJoint.JTAMotor:
return self.convertToAMotor()
elif self.getJointType() == OdeJoint.JTLMotor:
return self.convertToLMotor()
elif self.getJointType() == OdeJoint.JTPlane2d:
return self.convertToPlane2d()
Dtool_funcToMethod(convert, OdeJoint)
del convert
from extension_native_helpers import *
Dtool_PreloadDLL('libpanda')
from libpanda import *
def getConvertedJoint(self, index):
return self.getJoint(index).convert()
Dtool_funcToMethod(getConvertedJoint, OdeBody)
del getConvertedJoint
| 27.921429
| 53
| 0.724738
| 399
| 3,909
| 7.037594
| 0.210526
| 0.096154
| 0.081197
| 0.081909
| 0.452991
| 0.445869
| 0.445869
| 0.429131
| 0.429131
| 0.429131
| 0
| 0.006828
| 0.175748
| 3,909
| 139
| 54
| 28.122302
| 0.86468
| 0
| 0
| 0.431193
| 0
| 0
| 0.011
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.091743
| false
| 0
| 0.091743
| 0.036697
| 0.449541
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ee350f95efe4a8c2344a53f97be58f2e3f0dcc2
| 1,087
|
py
|
Python
|
view_note.py
|
pushkar-anand/make-a-note
|
ca129dd1df1b62faad0c451e0818742bb1b1bc08
|
[
"Apache-2.0"
] | 1
|
2018-10-02T07:09:29.000Z
|
2018-10-02T07:09:29.000Z
|
view_note.py
|
pushkar-anand/make-a-note
|
ca129dd1df1b62faad0c451e0818742bb1b1bc08
|
[
"Apache-2.0"
] | 3
|
2018-10-01T13:40:13.000Z
|
2019-05-02T23:17:52.000Z
|
view_note.py
|
pushkar-anand/make-a-note
|
ca129dd1df1b62faad0c451e0818742bb1b1bc08
|
[
"Apache-2.0"
] | 6
|
2018-10-02T07:09:30.000Z
|
2019-06-09T17:09:49.000Z
|
import gi
import json
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
class NewNoteWindow(Gtk.Window):
def __init__(self, nid):
Gtk.Window.__init__(self, title="Note")
with open('notes.json') as data_file:
data = json.load(data_file)
notes = data["notes"]
#Looping through all the notes to check which note is being viewed
for note in notes:
print(note)
if note["note-id"] == nid:
self.title = note["note-title"]
self.note_text = note["note-text"]
self.cat = note["note-category"]
break
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=3)
self.add(box)
self.label = Gtk.Label()
self.label.set_markup("<big><b>"+self.title+"</b></big>")
box.pack_start(self.label, True, True, 0)
self.label = Gtk.Label(self.note_text)
self.label.set_line_wrap(True)
self.label.set_justify(Gtk.Justification.FILL)
box.pack_start(self.label, True, True, 0)
| 30.194444
| 74
| 0.589696
| 146
| 1,087
| 4.260274
| 0.431507
| 0.101286
| 0.057878
| 0.054662
| 0.163987
| 0.096463
| 0.096463
| 0.096463
| 0
| 0
| 0
| 0.006386
| 0.279669
| 1,087
| 35
| 75
| 31.057143
| 0.787995
| 0.059798
| 0
| 0.076923
| 0
| 0
| 0.080313
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.115385
| 0
| 0.192308
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ee37804938d5d76c8a9bfe4608d76c629788f81
| 4,588
|
py
|
Python
|
homeserver/voice_control/voice_controller.py
|
miikama/home-server
|
07a9dbb9438e3c316c37cb52ca3c709d0b059af1
|
[
"MIT"
] | null | null | null |
homeserver/voice_control/voice_controller.py
|
miikama/home-server
|
07a9dbb9438e3c316c37cb52ca3c709d0b059af1
|
[
"MIT"
] | 1
|
2019-11-30T10:59:28.000Z
|
2019-11-30T10:59:28.000Z
|
homeserver/voice_control/voice_controller.py
|
miikama/home-server
|
07a9dbb9438e3c316c37cb52ca3c709d0b059af1
|
[
"MIT"
] | null | null | null |
from homeserver.voice_control.google_speech import GoogleVoiceRecognition
from homeserver.voice_control.snowboy.snowboydecoder import HotwordDetector, play_audio_file
#make the voicecontrol follow the device interface structure for control
from homeserver.interface import DeviceInterface, DeviceTarget
# import the DeviceCommand
from homeserver.command_handler import DeviceCommand
from homeserver import app, logger, device_handler
import datetime
import threading
class VoiceThread(threading.Thread):
def __init__(self, parent=None, **kvargs):
self.parent = parent
super(VoiceThread, self).__init__(**kvargs)
class VoiceController(DeviceInterface):
def __init__(self, start=True):
#### variables for the DeviceInterface ###
self.name="Voice Control"
self.connected = False
self.is_on = False
self.running = False
self._devices = []
self.targets = set('voice_control')
self.commands = [DeviceTarget(self.targets, "toggle", self.toggle_detection)]
self.dev_id = 200000 #TODO: read this from some config or smth
### ############# ###
self.google_recognizer = GoogleVoiceRecognition(app.config['GOOGLE_CREDENTIALS'])
#a list of strings to help google speect to text
self.google_keyphrases = device_handler.get_voice_keys()
self.interrupted = False
#some parameters, seem okay for two word command
self.silent_count_threshold = 2
self.recording_timeout = 10
# param to the snowboy detector
self.sensitivity = 0.5
self.model = app.config['SNOWBOY_MODEL']
self.recording_path = app.config['AUDIO_PATH_AFTER_DETECTION']
# the keyword detector is initialized in the start detector
self.detector = None
self.vthread = None
self.voice_callbacks = {}
if start:
self.start_detector()
def initialize_detector(self):
logger.info("model path: {}".format(self.model))
self.detector = HotwordDetector(self.model, sensitivity=self.sensitivity)
#set the path of the audio file saved
self.detector.set_recording_filepath(self.recording_path)
#the voicethread
self.vthread = VoiceThread(target=self._start_detection, parent=self)
def start_detector(self):
"""
Method to be called outside the VoiceController class to start
the detection.
"""
self.initialize_detector()
self.vthread.start()
self.is_on = True
self.connected = True
self.running = True
logger.info('Keyword detector started')
def _start_detection(self):
# main loop
self.detector.start(detected_callback=self.detection_callback,
interrupt_check=self.interrupt_callback,
sleep_time=0.03,
audio_recorder_callback=self.audio_recorded_callback,
silent_count_threshold=self.silent_count_threshold,
recording_timeout=self.recording_timeout)
def detection_callback(self):
"""This is called when the hot word is detected, this just logs the time
keyword is detected. The actual handling is done after audio is recorder
in audio detection callback
"""
logger.debug("Keyword detected at {}".format(datetime.datetime.now().isoformat() ) )
def audio_recorded_callback(self, fname):
"""
Called when after detecting keyword an audioclip has done recorded and saved
recognizes what was said and then acts on the interpreted audio
"""
command_string = self.google_recognizer.interpret_command(fname,
keyphrases=self.google_keyphrases)
logger.debug("command_string: {}".format(command_string))
if command_string:
command = DeviceCommand.command_from_string(command_string)
logger.debug("sending command to device_handler: {}".format(command))
device_handler.handle_voice_command(command)
def toggle_detection(self):
if self.running:
self.stop_detection()
else:
self.start_detector()
def stop_detection(self):
logger.info("Stopping voice detection")
self.interrupted = True
self.vthread.join()
self.running = False
self.is_on = False
logger.info("Voice detection halted")
def interrupt_callback(self):
return self.interrupted
def command_subjects(self,command, *args):
"""Base methods, common error checking for all base classes implemented here"""
super().command_subjects(command)
#parse command
if len(command.arguments) < 1:
return
action = command.arguments[0]
func = None
# match the action in the command to the commands of this class
for command in self.commands:
if action == command.action:
func = command.action_func
break
if func is not None:
func()
| 24.275132
| 92
| 0.735833
| 577
| 4,588
| 5.691508
| 0.317158
| 0.023752
| 0.007308
| 0.015834
| 0.010962
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004231
| 0.175676
| 4,588
| 188
| 93
| 24.404255
| 0.864093
| 0.209024
| 0
| 0.067416
| 0
| 0
| 0.070641
| 0.007347
| 0
| 0
| 0
| 0.005319
| 0
| 1
| 0.123596
| false
| 0
| 0.078652
| 0.011236
| 0.247191
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ee8fa63da0e0bfe5eb55277fd9f507afe7bfefe
| 1,528
|
py
|
Python
|
CondTools/SiPixel/test/SiPixelCPEGenericErrorParmReader_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 13
|
2015-11-30T15:49:45.000Z
|
2022-02-08T16:11:30.000Z
|
CondTools/SiPixel/test/SiPixelCPEGenericErrorParmReader_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 640
|
2015-02-11T18:55:47.000Z
|
2022-03-31T14:12:23.000Z
|
CondTools/SiPixel/test/SiPixelCPEGenericErrorParmReader_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 51
|
2015-08-11T21:01:40.000Z
|
2022-03-30T07:31:34.000Z
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("SiPixelCPEGenericErrorParmReaderTest")
process.load("CondCore.DBCommon.CondDBSetup_cfi")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
#Uncomment these two lines to get from the global tag
#process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
#process.GlobalTag.globaltag = 'IDEAL_30X::All'
process.PoolDBESSource = cms.ESSource("PoolDBESSource",
process.CondDBSetup,
loadAll = cms.bool(True),
toGet = cms.VPSet(cms.PSet(
record = cms.string('SiPixelCPEGenericErrorParmRcd'),
tag = cms.string('SiPixelCPEGenericErrorParm')
)),
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(0),
authenticationPath = cms.untracked.string('.')
),
catalog = cms.untracked.string('file:PoolFileCatalog.xml'),
timetype = cms.string('runnumber'),
connect = cms.string('sqlite_file:siPixelCPEGenericErrorParm.db')
)
process.reader = cms.EDAnalyzer("SiPixelCPEGenericErrorParmReader")
process.myprint = cms.OutputModule("AsciiOutputModule")
process.p = cms.Path(process.reader)
| 40.210526
| 103
| 0.620419
| 130
| 1,528
| 7.246154
| 0.561538
| 0.063694
| 0.036093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00724
| 0.276832
| 1,528
| 37
| 104
| 41.297297
| 0.845249
| 0.116492
| 0
| 0
| 0
| 0
| 0.231626
| 0.193022
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.038462
| 0
| 0.038462
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3eec09187c14d47ed9948ca3461f050626849937
| 31,103
|
py
|
Python
|
carcassonne.py
|
pierre-dejoue/carcassonne
|
046c39fd61f17072e6d75a48ef65afa7be82a973
|
[
"MIT"
] | null | null | null |
carcassonne.py
|
pierre-dejoue/carcassonne
|
046c39fd61f17072e6d75a48ef65afa7be82a973
|
[
"MIT"
] | null | null | null |
carcassonne.py
|
pierre-dejoue/carcassonne
|
046c39fd61f17072e6d75a48ef65afa7be82a973
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import argparse
import boundary
import functools
import graphics
import itertools
import json
import operator
import os.path
import random
import re
import secrets
import sys
import traceback
from boundary import Boundary
from boundary import Domain
from boundary import Orientation
from boundary import Vect
from collections import deque
from enum import Enum, auto
DEBUG_PRINTOUT = False
DEFAULT_TILE_SIZE = 100
SCREENSHOT_PATH = './screenshot.jpg'
DUMP_PATH = './dump.bmp'
class RiverPlacement(Enum):
USE_T = auto()
EXCLUDE_T = auto()
SHORT_RIVER = auto()
LONG_RIVER = auto()
RIVER_PLACEMENT_DEFAULT_T_POLICY = RiverPlacement.USE_T
RIVER_PLACEMENT_DEFAULT_LENGTH_POLICY = RiverPlacement.SHORT_RIVER
def warn(msg):
print('Warning: ' + msg)
def error(msg):
print('Error: ' + msg, file = sys.stderr)
exit(-1)
def override(f):
# Eye-candy decorator
return f
def handle_assertion_error():
_, _, tb = sys.exc_info()
tb_info = traceback.extract_tb(tb)
filename, line, func, text = tb_info[-1]
warn('An error occurred in file {} line {} in statement "{}"'.format(filename, line, text))
class Tile:
"""A tile (usually a game tile) defined by the description of its four sides (desc), its cardinality (max_nb) and optionally a graphical representation (img)"""
def __init__(self, desc = [None, None, None, None], max_nb = 1, img_path = '', tags = []):
self.desc = desc
self.max_nb = max_nb
self.img_path = img_path
self.img = None
self.tags = tags
def __repr__(self):
return 'Tile({})'.format(self.desc)
@classmethod
def from_json_description(cls, json_obj, basedir):
assert 'description' in json_obj.keys()
desc = json_obj['description']
max_nb = json_obj['cardinality'] if 'cardinality' in json_obj.keys() else 1
img_path = os.path.join(basedir, json_obj['img']) if 'img' in json_obj.keys() and json_obj['img'] else ''
tags = []
for id in range(10):
key = 'tag' + str(id)
if key in json_obj.keys():
tags.append(json_obj[key])
return cls(desc, max_nb, img_path, tags)
@classmethod
def from_uniform_color(cls, color, size, tag = ''):
tile = cls()
tile.img = graphics.draw_uniform_tile(color, size)
tile.tags.append(tag)
assert tile.get_size() == size
return tile
def load_image(self):
try:
self.img = graphics.load_image(self.img_path)
except Exception as e:
warn('Could not load image: {} (message: {})'.format(self.img_path, e))
self.img = None
def draw_image(self, size):
assert self.img is None
self.img = graphics.draw_game_tile(self.desc, size)
assert self.get_size() == size
def get_size(self):
if self.img is not None:
assert self.img.height() == self.img.width()
return self.img.width()
else:
return 0
def parse_tileset_description_file(json_file):
fp = None
cumul = 0
try:
fp = open(json_file, 'r')
tileset_json = json.load(fp)
assert 'tiles' in tileset_json.keys()
for tile_json in tileset_json['tiles']:
tile = Tile.from_json_description(tile_json, os.path.dirname(json_file))
assert tile.max_nb >= 0
if tile.max_nb > 0:
if 'start' in tile.tags:
assert tile.max_nb == 1
cumul += tile.max_nb
yield tile
except FileNotFoundError:
warn('Could not load file {}'.format(json_file))
except AssertionError:
handle_assertion_error()
except Exception:
warn('Error parsing file {}'.format(json_file))
raise
finally:
if fp is not None:
fp.close()
if cumul > 0:
print('Loaded {} tiles from file {}'.format(cumul, json_file))
def load_or_draw_tile_images(tileset, draw_all = False):
assert graphics.is_init()
tile_size = 0
if not draw_all:
for tile in tileset:
tile.load_image()
if tile.get_size() != 0:
if tile_size == 0:
tile_size = tile.get_size()
elif tile.get_size() != tile_size:
error('Image size of file {} ({}) does not match the previous size ({})'.format(tile.img_path, tile.get_size(), tile_size))
if tile_size == 0:
tile_size = DEFAULT_TILE_SIZE
for tile in tileset:
if tile.img is None:
tile.draw_image(tile_size)
assert tile.img is not None
return tile_size
class PositionedTile:
"""Declare a position on the grid where a tile could be placed"""
def __init__(self, pos, segments = []):
assert isinstance(pos, Vect)
self.pos = pos
if len(segments) == 1:
self.segment = segments[0] # Common segment between the current map boundary and this tile
else:
self.segment = None # Use None if unknown, or to indicate a forbidden position
@classmethod
def from_boundary_edge(cls, border, point, edge, domain = Domain.EXTERIOR):
assert isinstance(border, Boundary)
assert isinstance(point, Vect)
assert isinstance(edge, Vect)
tile_border = boundary.from_edge(point, edge, Orientation.COUNTERCLOCKWISE, domain)
pos = tile_border.bottom_left()
tile_border.rotate_to_start_with(pos)
return cls(pos, border.common_segments(tile_border))
def __repr__(self):
return 'PositionedTile(pos = {}, segment = {})'.format(self.pos, self.segment)
def get_l1_distance(self):
return self.pos.l1_distance()
def get_segment(self):
return self.segment if self.segment is not None else (0, 0, 0)
def get_segment_length(self):
(_, _, L) = self.get_segment()
return L
def iter_segment(self):
(_, j, L) = self.get_segment()
return self.get_boundary().iter_slice(j, j + L)
def iter_complement_segment(self):
(_, j, L) = self.get_segment()
tile_border = self.get_boundary()
if L == 0:
return tile_border.iter_all(j)
else:
return tile_border.iter_slice(j + L, j)
def get_boundary(self, desc = [None, None, None, None]):
return boundary.get_tile(self.pos, desc)
class PlacedTile(PositionedTile):
"""Declares a Tile placed on the grid, with its position and orientation (r)"""
def __init__(self, tile, pos, r, segment = None):
assert isinstance(tile, Tile)
PositionedTile.__init__(self, pos, [] if segment is None else [segment])
self.tile = tile
self.r = r
@override
def __repr__(self):
return 'PlacedTile(pos = {}, r = {}, segment = {}, tile = {})'.format(self.pos, self.r, self.segment, self.tile)
@classmethod
def from_positioned_tile(cls, pos_tile, tile, r):
assert isinstance(pos_tile, PositionedTile)
assert isinstance(tile, Tile)
return cls(tile, pos_tile.pos, r, pos_tile.segment)
def draw(self, display):
assert isinstance(display, graphics.GridDisplay)
assert self.tile.img is not None
display.set_tile(self.tile.img, self.pos.x, self.pos.y, self.r)
@override
def get_boundary(self):
desc = deque(self.tile.desc)
desc.rotate(self.r)
return PositionedTile.get_boundary(self, desc)
class CompositeTile:
"""A super-tile made of several unit tiles (e.g. the city of Carcasonne)"""
class Elt:
def __init__(self, tile, offset):
assert isinstance(tile, Tile)
assert isinstance(offset, Vect)
self.tile = tile
self.offset = offset
vect_re = re.compile(r'[Vv]ect_(\d+)_(\d+)')
def __init__(self):
self.elts = []
def append(self, tile):
offset = None
for tag in tile.tags:
result = self.vect_re.match(tag)
if result:
offset = Vect(int(result.group(1)), int(result.group(2)))
if offset:
self.elts.append(CompositeTile.Elt(tile, offset))
else:
warn('Could not find the offset pattern in the tags for tile {}. Tags = {}.'.format(tile, tile.tags))
def __reduce(self, fun, initializer = None):
self.elts.sort(key=operator.attrgetter('offset'))
return functools.reduce(fun, self.elts, initializer)
def draw(self, display, pos, r = 0):
assert isinstance(pos, Vect)
assert isinstance(display, graphics.GridDisplay)
def draw_elt(_, elt):
PlacedTile(elt.tile, pos + elt.offset.rotate(r), r).draw(display)
return None
self.__reduce(draw_elt)
def get_boundary(self, pos, r = 0):
assert isinstance(pos, Vect)
def merge_boundary(border, elt):
border.merge(PlacedTile(elt.tile, pos + elt.offset.rotate(r), r).get_boundary())
return border
return self.__reduce(merge_boundary, Boundary())
class TileSubset:
def __init__(self, predicate, shuffle = True, output_n = -1):
self.predicate = predicate
self.shuffle = shuffle # Shuffle result
self.output_n = output_n # If < 0, output all
def partition_iter(self, tileset_iter):
it0, it1 = itertools.tee(tileset_iter)
selection = list(filter(self.predicate, it0))
if self.shuffle:
selection = random.sample(selection, len(selection))
if self.output_n >= 0:
selection = selection[:self.output_n]
return selection, itertools.filterfalse(self.predicate, it1)
def partition(self, tileset_iter):
part1, part2_iter = self.partition_iter(tileset_iter)
return part1, list(part2_iter)
@staticmethod
def regular_start():
def pred_regular_start(tile):
return 'start' in tile.tags and 'river' not in tile.tags
return TileSubset(pred_regular_start, output_n = 1)
@staticmethod
def carcassonne_city():
def pred_city(tile):
return 'carcassonne_city' in tile.tags
return TileSubset(pred_city, shuffle = False)
@staticmethod
def river():
def pred_river(tile):
return 'river' in tile.tags
return TileSubset(pred_river, shuffle = False)
@staticmethod
def river_source(n = -1):
def pred_river_source(tile):
return 'river' in tile.tags and 'source' in tile.tags
return TileSubset(pred_river_source, output_n = n)
@staticmethod
def river_exclude_t_shaped():
def pred_river_t_shaped(tile):
return 'river' in tile.tags and list(tile.desc).count('R') == 3
return TileSubset(pred_river_t_shaped, output_n = 0)
@staticmethod
def river_not_source_nor_sink():
def pred_river_others(tile):
return 'river' in tile.tags and 'source' not in tile.tags and 'lake' not in tile.tags
return TileSubset(pred_river_others)
@staticmethod
def river_sink(n = -1):
def pred_river_sink(tile):
return 'river' in tile.tags and 'lake' in tile.tags
return TileSubset(pred_river_sink, output_n = n)
@staticmethod
def shuffle_remaining():
return TileSubset(lambda _: True)
@staticmethod
def exclude_remaining(warn_on_excluded = True):
def pred_exclude_remaining(tile):
if warn_on_excluded:
warn('Excluded tile: {}'.format(tile))
return True
return TileSubset(pred_exclude_remaining, output_n = 0)
def iterate_tile_predicates(tile_predicates, tileset_iter):
remaining = tileset_iter
for predicate in tile_predicates:
tile_subset, remaining = predicate.partition_iter(remaining)
yield tile_subset
TileSubset.exclude_remaining().partition_iter(remaining)
def iterate_tilesets(river_tileset, regular_tileset, river_tileset_period = 0, infinite = False):
river_flag = len(river_tileset) > 0
first = True
while True:
if river_flag:
if river_tileset_period == 0:
# Single use of the river tileset
if first:
yield river_tileset
else:
# Reuse the river tileset periodically
yield river_tileset
for _ in range(max(1, river_tileset_period)):
yield regular_tileset
else:
yield regular_tileset
if not infinite:
break
first = False
def shuffle_tileset(tileset, first_tileset_flag, river_placement_policies = []):
river_flag = any('river' in tile.tags for tile in tileset)
all_tiles = itertools.chain.from_iterable(itertools.repeat(tile, tile.max_nb) for tile in tileset)
if river_flag:
river_long = RiverPlacement.LONG_RIVER in river_placement_policies
river_exclude_t_shaped = RiverPlacement.EXCLUDE_T in river_placement_policies
# River sources
if river_long and not first_tileset_flag:
nb_of_sources = 0
else:
nb_of_sources = 1
# River sinks
if river_exclude_t_shaped:
nb_of_sinks = 1
else:
nb_of_sinks = 2
if river_long:
nb_of_sinks = nb_of_sinks - 1
# Predicates
tile_predicates = [
TileSubset.river_source(nb_of_sources)
]
if river_exclude_t_shaped:
tile_predicates += [
TileSubset.river_exclude_t_shaped()
]
tile_predicates += [
TileSubset.river_not_source_nor_sink(),
TileSubset.river_sink(nb_of_sinks),
]
elif first_tileset_flag:
tile_predicates = [
TileSubset.regular_start(),
TileSubset.shuffle_remaining()
]
else:
tile_predicates = [
TileSubset.shuffle_remaining()
]
return iterate_tile_predicates(tile_predicates, all_tiles)
class CandidateTiles:
def __init__(self, on_update = None, on_delete = None):
assert not on_update or callable(on_update)
assert not on_delete or callable(on_delete)
self.sorted_positions = [] # List of positions
self.tiles = dict() # Dict of position -> PositionedTile
self.nb_to_be_deleted = 0
self.on_update = on_update
self.on_delete = on_delete
def __len__(self):
return len(self.tiles)
def allocated(self):
return len(self.sorted_positions)
@staticmethod
def to_be_deleted(pos_tile):
# Ad hoc criteria to identify a tile to be deleted
return pos_tile.get_segment_length() == 0
def iterate(self):
for pos in self.sorted_positions:
if pos in self.tiles:
yield self.tiles[pos]
def update(self, pos_tile):
assert isinstance(pos_tile, PositionedTile)
if self.on_update:
self.on_update(pos_tile)
if self.to_be_deleted(pos_tile):
self.delete(pos_tile.pos)
else:
if pos_tile.pos not in self.tiles:
if pos_tile.pos not in self.sorted_positions:
self.sorted_positions.append(pos_tile.pos)
else:
# We are restoring a deleted entry
assert self.nb_to_be_deleted > 0
self.nb_to_be_deleted -= 1
self.tiles[pos_tile.pos] = pos_tile
def delete(self, pos):
assert isinstance(pos, Vect)
if self.on_delete:
self.on_delete(pos)
if pos in self.tiles:
self.nb_to_be_deleted += 1
del self.tiles[pos]
def __resize(self):
assert self.allocated() == len(self) + self.nb_to_be_deleted
assert all(self.sorted_positions[idx] not in self.tiles for idx in range(len(self), self.allocated()))
del self.sorted_positions[len(self):]
self.nb_to_be_deleted = 0
assert self.allocated() == len(self) + self.nb_to_be_deleted
def force_resize(self):
self.sorted_positions.sort(key = lambda pos: 0 if pos in self.tiles else 1)
self.__resize()
def __sort_key(self, key_on_positioned_tile, reverse, pos):
if pos not in self.tiles:
return -sys.maxsize if reverse else sys.maxsize
else:
return key_on_positioned_tile(self.tiles[pos])
def __sort(self, key_on_positioned_tile, reverse):
self.sorted_positions.sort(key = lambda pos: self.__sort_key(key_on_positioned_tile, reverse, pos), reverse = reverse)
def sort(self, key, reverse = False):
self.__sort(key, reverse)
# Resize if the nb of tiles marked for deletion is passed a certain threshold
if len(self) > 0 and (self.allocated() / len(self)) > 1.333:
self.__resize()
def debug_printout(self):
print('Candidates: (used/total: {}/{})'.format(len(self.tiles), len(self.sorted_positions)))
for pos in self.sorted_positions:
if pos in self.tiles:
print('nb_contact_sides={}, pos={}'.format(self.tiles[pos].get_segment_length(), pos))
else:
print('to_be_deleted, pos={}'.format(pos))
def validate_tile_placement(placed_tile, border):
# Trivial except for river tiles
if 'R' in Boundary.label_getter(placed_tile.iter_segment()):
test_border = border.copy()
test_border.merge(placed_tile.get_boundary())
for (point, edge, label) in placed_tile.iter_complement_segment():
if label == 'R':
test_tile_border = boundary.from_edge(point, edge, Orientation.COUNTERCLOCKWISE, Domain.EXTERIOR)
common_segments = test_border.common_segments(test_tile_border)
if len(common_segments) != 1:
return False
(_, _, L) = common_segments[0]
if L != 1:
return False
return True
def update_border_and_candidate_tiles(placed_tile, border, candidate_tiles):
"""
This function updates the map boundary and the candidate tile placements
Arguments:
placed_tile The tile being added to the map boundary
border The current map boundary
candidate_tiles The list of candidate tiles along the map boundary
Notes:
A candidate tile placement is an unoccupied tile adjacent to the map boundary.
In order to prioritize a tile placement among other candidates, the following parameters are used:
- The length of the segment in contact with the map boundary
- The L1 distance of the tile to the center of the map
"""
assert isinstance(placed_tile, PlacedTile)
assert isinstance(border, Boundary)
assert isinstance(candidate_tiles, CandidateTiles)
# Merge the newly placed tile to the map boundary
border.merge(placed_tile.get_boundary())
# Account for the change in the map boundary in candidate_tiles
candidate_tiles.delete(placed_tile.pos)
neighbor_edges = [(point, edge) for (point, edge, _) in placed_tile.iter_complement_segment()]
neighbor_edges.extend([(point + edge, edge) for (point, edge) in neighbor_edges[:-1]])
tiles_to_update = [PositionedTile.from_boundary_edge(border, point, edge) for (point, edge) in neighbor_edges]
for pos_tile in tiles_to_update:
candidate_tiles.update(pos_tile)
# Sort the updated list of candidates
candidate_tiles.sort(key=PlacedTile.get_l1_distance)
candidate_tiles.sort(key=PlacedTile.get_segment_length, reverse=True)
if DEBUG_PRINTOUT:
candidate_tiles.debug_printout()
return placed_tile
def select_tile_placement(candidate_placements):
assert isinstance(candidate_placements, list) # NB: A list of PlacedTile
assert len(candidate_placements) > 0
# Nothing fancy
return candidate_placements[0]
def find_candidate_placements(tile, border, candidate_tiles, max_candidates = -1, force_edge_label = None):
assert isinstance(tile, Tile)
assert isinstance(border, Boundary)
assert len(border) > 0
assert isinstance(candidate_tiles, CandidateTiles)
assert len(candidate_tiles) > 0
candidate_placements = []
for pos_tile in candidate_tiles.iterate():
(i0, j0, L0) = pos_tile.get_segment()
assert L0 > 0
tile_border = pos_tile.get_boundary(list(tile.desc))
# Recompute PositionedTile because the common segment's 'i' index will not match
pos_tile = PositionedTile(pos_tile.pos, border.common_segments(tile_border))
(i1, j1, L1) = pos_tile.get_segment()
if (j0, L0) != (j1, L1):
warn('Incoherent common segments for tile at {} in candidate_tiles: {} and computed against the current border: {}'.format(pos_tile.pos, (i0, j0, L0), (i1, j1, L1)))
continue
if force_edge_label is not None and force_edge_label not in Boundary.label_getter(border.iter_slice(i1, i1 + L1)):
continue
for r in border.find_matching_rotations(tile_border, pos_tile.get_segment()):
placed_tile = PlacedTile.from_positioned_tile(pos_tile, tile, r)
if validate_tile_placement(placed_tile, border):
candidate_placements.append(placed_tile)
if max_candidates > 0 and len(candidate_placements) >= max_candidates:
break
return candidate_placements
def place_carcassonne_city(tileset, candidate_tiles, display, z, pos, r = 0):
assert len(tileset) > 0
assert isinstance(pos, Vect)
if len(tileset) != 12:
warn('Expected 12 tiles for the city of Carcassonne')
composite_tile = CompositeTile()
for tile in tileset:
assert 'carcassonne_city' in tile.tags
composite_tile.append(tile)
composite_tile.draw(display, pos, r)
display.update(z)
border = composite_tile.get_boundary(pos, r)
neighbor_tiles = [PositionedTile.from_boundary_edge(border, point, edge) for (point, edge, _) in border.iter_all()]
for pos_tile in neighbor_tiles:
candidate_tiles.update(pos_tile)
return border
def parse_river_placement_policies(policies):
result = []
# Policy: T-shaped tile
if RiverPlacement.USE_T in policies:
result.append(RiverPlacement.USE_T)
elif RiverPlacement.EXCLUDE_T in policies:
result.append(RiverPlacement.EXCLUDE_T)
else:
result.append(RIVER_PLACEMENT_DEFAULT_T_POLICY)
# Policy: River length
if RiverPlacement.SHORT_RIVER in policies:
result.append(RiverPlacement.SHORT_RIVER)
elif RiverPlacement.LONG_RIVER in policies:
result.append(RiverPlacement.LONG_RIVER)
else:
result.append(RIVER_PLACEMENT_DEFAULT_LENGTH_POLICY)
assert len(result) == 2
return result
def main():
parser = argparse.ArgumentParser(description='Display a randomized Carcassonne map')
parser.add_argument('files', metavar='FILE', nargs='*', help='Tile description file (JSON format)')
parser.add_argument('-d', '--debug', dest='debug_mode', action='store_true', help='Display non-game tiles, etc.')
parser.add_argument('-n', metavar='N', type=int, dest='max_tiles', default = 0, help='Number of tiles to display (Default: The whole tileset)')
parser.add_argument('-z', '--zoom-factor', metavar='Z', type=float, dest='zoom_factor', default = 1.0, help='Initial zoom factor (Default: 1.0)')
parser.add_argument('--draw-all', dest='draw_all', action='store_true', help='Draw all tiles')
parser.add_argument('-f', '--full-screen', dest='full_screen', action='store_true', help='Full screen')
parser.add_argument('-s', '--screenshot', dest='take_screenshot', action='store_true', help='Take a screenshot of the final display')
parser.add_argument('--dump', dest='dump_to_img', action='store_true', help='Dump the final grid to an image')
parser.add_argument('--river-policy', type=str, dest='river_policy', choices=[policy.name for policy in RiverPlacement], action='append', default=[], help='Placement policies for the river tileset. Can be used multiple times')
parser.add_argument('--river-period', metavar='P', type=int, dest='river_period', default=1, help='Period of repetition of the river tileset. Set to zero for a single use of the river tileset')
parser.add_argument('--seed', metavar='INT', type=int, dest='seed', default = 0, help='A seed for the random generator (Default: Use a system generated seed)')
args = parser.parse_args()
# Set random seed
rng_seed = args.seed
if rng_seed == 0:
rng_seed = secrets.randbits(64)
print('Random seed: {}'.format(rng_seed))
random.seed(rng_seed)
# Load tileset (JSON files)
tileset = list(itertools.chain.from_iterable(parse_tileset_description_file(json_file) for json_file in args.files))
if len(tileset) == 0:
error('No tiles loaded')
# River tiles placement policy and period
river_placement_policies = parse_river_placement_policies([RiverPlacement[policy] for policy in args.river_policy])
river_tileset_period = args.river_period if args.river_period >= 0 else 0
if args.debug_mode and any('river' in tile.tags for tile in tileset):
print('river_placement_policies: {}'.format([policy.name for policy in river_placement_policies]))
print('river_tileset_period: {}'.format(river_tileset_period))
try:
# Load tile images, and draw missing ones
graphics.init()
tile_size = load_or_draw_tile_images(tileset, args.draw_all)
carcassonne_city_tileset, tileset = TileSubset.carcassonne_city().partition_iter(tileset)
city_start_flag = len(carcassonne_city_tileset) > 0
river_tileset, regular_tileset = TileSubset.river().partition(tileset)
del tileset
# Non-game tiles
riverside_tile = Tile.from_uniform_color((217, 236, 255), tile_size, 'riverside')
forbidden_tile = Tile.from_uniform_color((100, 20, 20), tile_size, 'forbidden')
segment_length_tiles = {
0: forbidden_tile,
1: Tile.from_uniform_color((10, 60, 10), tile_size, 'one_side'),
2: Tile.from_uniform_color((40, 120, 40), tile_size, 'two_sides'),
3: Tile.from_uniform_color((70, 180, 70), tile_size, 'three_sides')
}
# Open display
(w, h) = (0, 0) if args.full_screen else (1280, 720)
display = graphics.GridDisplay(w, h, tile_size)
print('Press ESCAPE in the graphics window to quit', flush = True)
# Place random tiles. The map must grow!
candidate_tiles = CandidateTiles(
on_update = lambda pos_tile: display.set_tile(segment_length_tiles[pos_tile.get_segment_length()].img, pos_tile.pos.x, pos_tile.pos.y) if args.debug_mode else None,
on_delete = None)
z = args.zoom_factor
border = place_carcassonne_city(carcassonne_city_tileset, candidate_tiles, display, z, Vect(-2, -1)) if city_start_flag else Boundary()
total_nb_tiles_placed = 0
total_nb_tiles_not_placed = 0
first_tileset_flag = not city_start_flag
all_done_flag = False
for tileset in iterate_tilesets(river_tileset, regular_tileset, river_tileset_period, infinite = (args.max_tiles > 0)):
for tiles_to_place in shuffle_tileset(tileset, first_tileset_flag, river_placement_policies):
local_nb_tiles_placed = 0
while len(tiles_to_place) > 0:
tiles_not_placed = []
for tile in tiles_to_place:
if args.max_tiles > 0 and total_nb_tiles_placed >= args.max_tiles:
all_done_flag = True
break
if len(border) == 0:
# The first tile of the map is placed at the center
placed_tile = PlacedTile(tile, Vect(0, 0), r = 0)
else:
forced_segment = 'R' if 'river' in tile.tags and 'source' not in tile.tags else None
max_candidates = 1
candidate_placements = find_candidate_placements(tile, border, candidate_tiles, max_candidates, forced_segment)
placed_tile = select_tile_placement(candidate_placements) if len(candidate_placements) > 0 else None
if placed_tile:
update_border_and_candidate_tiles(placed_tile, border, candidate_tiles)
placed_tile.draw(display)
total_nb_tiles_placed += 1
local_nb_tiles_placed += 1
# z = 0.995 * z
# display.update(z, 100)
else:
tiles_not_placed.append(tile)
if all_done_flag:
break
if len(tiles_not_placed) == len(tiles_to_place):
# making no progress, stop there
total_nb_tiles_not_placed += len(tiles_not_placed)
for tile in tiles_not_placed:
warn('Could not place tile: {}'.format(tile))
break
assert len(tiles_not_placed) < len(tiles_to_place)
tiles_to_place = tiles_not_placed
# Done with the current tiles subset
if DEBUG_PRINTOUT or args.debug_mode:
print('total_nb_tiles_placed: {} (+{})'.format(total_nb_tiles_placed, local_nb_tiles_placed))
if all_done_flag:
break
# Done with the current tileset
if all_done_flag:
break
first_tileset_flag = False
display.update(z)
# Completely done!
display.update(z)
print('Done!')
print('total_nb_tiles_not_placed: {}'.format(total_nb_tiles_not_placed))
print('total_nb_tiles_placed: {}'.format(total_nb_tiles_placed))
sys.stdout.flush()
# Wait until the user quits
while True:
display.check_event_queue(200)
except graphics.MustQuit:
pass
finally:
if args.debug_mode and 'display' in locals():
print(display.get_debug_info())
if (args.take_screenshot or args.debug_mode) and 'display' in locals():
display.take_screenshot(SCREENSHOT_PATH)
print('Screenshot saved in {}'.format(SCREENSHOT_PATH))
if args.dump_to_img and 'display' in locals():
display.dump_to_img(DUMP_PATH, args.zoom_factor)
print('Dump grid to {}'.format(DUMP_PATH))
graphics.quit()
return 0
if __name__ == "__main__":
main()
| 36.764775
| 230
| 0.635051
| 3,985
| 31,103
| 4.727227
| 0.118444
| 0.012634
| 0.010086
| 0.00637
| 0.266907
| 0.179106
| 0.115777
| 0.074796
| 0.064232
| 0.037796
| 0
| 0.008826
| 0.271453
| 31,103
| 845
| 231
| 36.808284
| 0.822507
| 0.070347
| 0
| 0.197115
| 0
| 0
| 0.070754
| 0.004027
| 0
| 0
| 0
| 0
| 0.081731
| 1
| 0.125
| false
| 0.001603
| 0.030449
| 0.028846
| 0.267628
| 0.030449
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3eecad1535b44d09acd50ef4de76145c633066a1
| 2,890
|
py
|
Python
|
project/classification/posture_classification/ops/data_processor.py
|
jh-lau/solid_ai_waddle
|
b966f2c6e8b6b48c62064d58461692231aa2116b
|
[
"MIT"
] | null | null | null |
project/classification/posture_classification/ops/data_processor.py
|
jh-lau/solid_ai_waddle
|
b966f2c6e8b6b48c62064d58461692231aa2116b
|
[
"MIT"
] | null | null | null |
project/classification/posture_classification/ops/data_processor.py
|
jh-lau/solid_ai_waddle
|
b966f2c6e8b6b48c62064d58461692231aa2116b
|
[
"MIT"
] | null | null | null |
"""
@Author : liujianhan
@Date : 2018/6/2 上午11:59
@Project : posture_classification
@FileName : data_processor.py
@Description : Placeholder
"""
import os
from typing import Tuple
import pandas as pd
from keras.applications.resnet50 import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from sklearn.utils import shuffle
import numpy as np
def data_generator_flow(_train_dir: str,
_valid_dir: str,
_test_dir: str,
batch_size: int = 32,
target_size: Tuple = (256, 256),
multi_output_mode: bool = False) -> Tuple:
"""
数据生成器函数
@param _train_dir: 训练数据文件路径
@param _valid_dir: 验证数据文件路径
@param _test_dir: 测试数据文件路径
@param batch_size: 批量参数
@param target_size: 目标转换形状
@param multi_output_mode: 多输出模式
@return: 生成器元组
"""
train_df = pd.read_csv(os.path.join(_train_dir, 'train.csv'))
valid_df = pd.read_csv(os.path.join(_valid_dir, 'valid.csv'))
test_df = pd.read_csv(os.path.join(_test_dir, 'test.csv'))
if not multi_output_mode:
train_df.label = train_df.label.astype('str')
valid_df.label = valid_df.label.astype('str')
test_df.label = test_df.label.astype('str')
train_data_gen = ImageDataGenerator(
preprocessing_function=preprocess_input,
width_shift_range=.2,
height_shift_range=.2,
shear_range=.2,
zoom_range=.2,
channel_shift_range=np.random.choice(100),
horizontal_flip=True,
)
train_data_flow = train_data_gen.flow_from_dataframe(
dataframe=train_df,
target_size=target_size,
directory=_train_dir,
batch_size=batch_size,
class_mode='multi_output' if multi_output_mode else 'binary',
x_col='filename',
y_col=['label', 'score'] if multi_output_mode else 'label',
)
# 验证集不要做数据增强
valid_data_gen = ImageDataGenerator(preprocessing_function=preprocess_input)
valid_data_flow = valid_data_gen.flow_from_dataframe(
dataframe=valid_df,
target_size=target_size,
directory=_valid_dir,
batch_size=batch_size,
class_mode='multi_output' if multi_output_mode else 'binary',
x_col='filename',
y_col=['label', 'score'] if multi_output_mode else 'label',
)
test_data_gen = ImageDataGenerator(preprocessing_function=preprocess_input)
test_data_flow = test_data_gen.flow_from_dataframe(
dataframe=test_df,
target_size=target_size,
directory=_test_dir,
batch_size=batch_size,
class_mode='multi_output' if multi_output_mode else 'binary',
x_col='filename',
y_col=['label', 'score'] if multi_output_mode else 'label',
)
return train_data_flow, valid_data_flow, test_data_flow
| 34.404762
| 80
| 0.664014
| 367
| 2,890
| 4.861035
| 0.275204
| 0.073991
| 0.075673
| 0.057175
| 0.447309
| 0.447309
| 0.339686
| 0.201794
| 0.201794
| 0.201794
| 0
| 0.012374
| 0.244983
| 2,890
| 83
| 81
| 34.819277
| 0.805225
| 0.124913
| 0
| 0.254237
| 0
| 0
| 0.064019
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016949
| false
| 0
| 0.118644
| 0
| 0.152542
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3eedf65adf2ccf1a08d61e0dec0f3caf4fa9559f
| 985
|
py
|
Python
|
twitch_the_universim_chat/views.py
|
gaelfargeas/twitch_universim_streamer_chat
|
4773bf30e6aab3d9f950ba027e7aa3e51278428c
|
[
"BSD-3-Clause"
] | null | null | null |
twitch_the_universim_chat/views.py
|
gaelfargeas/twitch_universim_streamer_chat
|
4773bf30e6aab3d9f950ba027e7aa3e51278428c
|
[
"BSD-3-Clause"
] | null | null | null |
twitch_the_universim_chat/views.py
|
gaelfargeas/twitch_universim_streamer_chat
|
4773bf30e6aab3d9f950ba027e7aa3e51278428c
|
[
"BSD-3-Clause"
] | null | null | null |
from django.shortcuts import render, redirect
from django.utils.datastructures import MultiValueDictKeyError
def index(request):
return render(request, "chat.html", {})
def logged(request):
try :
bot_name = request.POST["bot_name"]
streamer_name = request.POST["streamer_name"]
stream_token = request.POST["stream_token"]
return render(
request,
"chat_logged.html",
{
"streamer_name": streamer_name,
"stream_token": stream_token,
"bot_name": bot_name,
},
)
except MultiValueDictKeyError :
return redirect("/")
def redirect_style_css(request):
response = redirect("/static/css/styles.css")
print("redirect to /static/css/styles.css")
return response
def redirect_favicon(request):
response = redirect("/static/images/favicon.ico")
print("redirect to /static/images/favicon.ico")
return response
| 27.361111
| 62
| 0.631472
| 103
| 985
| 5.883495
| 0.330097
| 0.046205
| 0.062706
| 0.075908
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.260914
| 985
| 36
| 63
| 27.361111
| 0.832418
| 0
| 0
| 0.071429
| 0
| 0
| 0.21501
| 0.097363
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.071429
| 0.035714
| 0.392857
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3eef9508412716c264b3b444bb752f75ff044dba
| 1,480
|
py
|
Python
|
numba/exttypes/tests/test_extension_attributes.py
|
liuzhenhai/numba
|
855a2b262ae3d82bd6ac1c3e1c0acb36ee2e2acf
|
[
"BSD-2-Clause"
] | 1
|
2015-01-29T06:52:36.000Z
|
2015-01-29T06:52:36.000Z
|
numba/exttypes/tests/test_extension_attributes.py
|
shiquanwang/numba
|
a41c85fdd7d6abf8ea1ebe9116939ddc2217193b
|
[
"BSD-2-Clause"
] | null | null | null |
numba/exttypes/tests/test_extension_attributes.py
|
shiquanwang/numba
|
a41c85fdd7d6abf8ea1ebe9116939ddc2217193b
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Test class attributes.
"""
import numba
from numba import *
from numba.testing.test_support import parametrize, main
def make_base(compiler):
@compiler
class Base(object):
value1 = double
value2 = int_
@void(int_, double)
def __init__(self, value1, value2):
self.value1 = value1
self.value2 = value2
@void(int_)
def setvalue(self, value):
self.value1 = value
@double()
def getvalue1(self):
return self.value1
return Base
def make_derived(compiler):
Base = make_base(compiler)
@compiler
class Derived(Base):
value3 = float_
@void(int_)
def setvalue(self, value):
self.value3 = value
return Base, Derived
#------------------------------------------------------------------------
# Tests
#------------------------------------------------------------------------
@parametrize(jit, autojit)
def test_baseclass_attrs(compiler):
Base = make_base(compiler)
assert Base(10, 11.0).value1 == 10.0
assert Base(10, 11.0).value2 == 11
obj = Base(10, 11.0)
obj.setvalue(12)
assert obj.getvalue1() == 12.0
@parametrize(jit) #, autojit)
def test_derivedclass_attrs(compiler):
Base, Derived = make_derived(compiler)
obj = Derived(10, 11.0)
obj.setvalue(9)
assert obj.value3 == 9.0
if __name__ == '__main__':
# test_derivedclass_attrs(autojit)
main()
| 20.555556
| 73
| 0.551351
| 161
| 1,480
| 4.888199
| 0.273292
| 0.050826
| 0.025413
| 0.034308
| 0.35324
| 0.07878
| 0.07878
| 0
| 0
| 0
| 0
| 0.045167
| 0.252027
| 1,480
| 71
| 74
| 20.84507
| 0.665763
| 0.145946
| 0
| 0.181818
| 0
| 0
| 0.00639
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.181818
| false
| 0
| 0.068182
| 0.022727
| 0.431818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ef2102b67623964df7f8e5b43fb31855632c83c
| 1,386
|
py
|
Python
|
tests/plugins/pull/test_poll.py
|
HazardDede/pnp
|
469ca17254dcca1a4eefe0dc5ac574692a9ab38e
|
[
"MIT"
] | 4
|
2018-10-07T11:32:00.000Z
|
2019-04-23T09:34:23.000Z
|
tests/plugins/pull/test_poll.py
|
HazardDede/pnp
|
469ca17254dcca1a4eefe0dc5ac574692a9ab38e
|
[
"MIT"
] | null | null | null |
tests/plugins/pull/test_poll.py
|
HazardDede/pnp
|
469ca17254dcca1a4eefe0dc5ac574692a9ab38e
|
[
"MIT"
] | 1
|
2019-08-12T19:56:10.000Z
|
2019-08-12T19:56:10.000Z
|
import time
from datetime import datetime
import pytest
from pnp.plugins.pull import StopPollingError
from pnp.plugins.pull.simple import CustomPolling
from . import make_runner, start_runner
@pytest.mark.asyncio
async def test_poll():
events = []
def callback(plugin, payload):
events.append(payload)
def poll():
return datetime.now()
dut = CustomPolling(name='pytest', interval="1s", scheduled_callable=poll)
assert not dut.is_cron
assert dut._poll_interval == 1
runner = await make_runner(dut, callback)
async with start_runner(runner):
time.sleep(3)
assert len(events) >= 2
@pytest.mark.asyncio
async def test_poll_for_aborting():
events = []
def callback(plugin, payload):
events.append(payload)
def poll():
raise StopPollingError()
runner = await make_runner(CustomPolling(name='pytest', interval="1s", scheduled_callable=poll), callback)
async with start_runner(runner):
time.sleep(1)
assert len(events) == 0
def test_poll_with_cron_expression():
from cronex import CronExpression
def poll():
pass
dut = CustomPolling(name='pytest', interval="*/1 * * * *", scheduled_callable=poll)
assert dut.is_cron
assert isinstance(dut._cron_interval, CronExpression)
assert dut._cron_interval.string_tab == ['*/1', '*', '*', '*', '*']
| 24.75
| 110
| 0.683983
| 169
| 1,386
| 5.455621
| 0.325444
| 0.032538
| 0.035792
| 0.100868
| 0.443601
| 0.403471
| 0.403471
| 0.331887
| 0.121475
| 0.121475
| 0
| 0.008123
| 0.200577
| 1,386
| 55
| 111
| 25.2
| 0.824007
| 0
| 0
| 0.333333
| 0
| 0
| 0.02886
| 0
| 0
| 0
| 0
| 0
| 0.179487
| 1
| 0.153846
| false
| 0.025641
| 0.179487
| 0.025641
| 0.358974
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ef60e720154164bc72b950006e65765140586cd
| 860
|
py
|
Python
|
mlib/web/shadow_lib.py
|
mgroth0/mlib
|
0442ed51eab417b6972f885605afd351892a3a9a
|
[
"MIT"
] | 1
|
2020-06-16T17:26:45.000Z
|
2020-06-16T17:26:45.000Z
|
mlib/web/shadow_lib.py
|
mgroth0/mlib
|
0442ed51eab417b6972f885605afd351892a3a9a
|
[
"MIT"
] | null | null | null |
mlib/web/shadow_lib.py
|
mgroth0/mlib
|
0442ed51eab417b6972f885605afd351892a3a9a
|
[
"MIT"
] | null | null | null |
from mlib.proj.struct import Project
from mlib.web.html import HTMLPage, Hyperlink, HTMLImage
SKIPPED_SOURCE = [
'@log_invokation',
'global DOC',
'@staticmethod'
]
def scipy_doc_url(funname): return f'https://docs.scipy.org/doc/scipy/reference/generated/{funname}.html'
FUN_LINKS = {
'bilinear': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.bilinear.html'
}
FUN_LINKS.update(
{fun.split('.')[-1]: scipy_doc_url(fun) for fun in [
'scipy.signal.filtfilt',
'scipy.signal.lfilter',
'scipy.signal.butter'
]}
)
def ShadowIndex(*pages):
return HTMLPage(
'index',
*[Hyperlink(page.rootpath, f"{page.rootpath}/{page.name}.html") for page in pages],
HTMLImage(Project.PYCALL_FILE, fix_abs_path=True),
HTMLImage(Project.PYDEPS_OUTPUT, fix_abs_path=True)
)
| 27.741935
| 105
| 0.676744
| 110
| 860
| 5.163636
| 0.5
| 0.077465
| 0.038732
| 0.059859
| 0.151408
| 0.151408
| 0.151408
| 0.151408
| 0
| 0
| 0
| 0.001406
| 0.173256
| 860
| 30
| 106
| 28.666667
| 0.797468
| 0
| 0
| 0
| 0
| 0.04
| 0.337209
| 0.061628
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.08
| 0.08
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3efa82e7a5854f87ab9bf7282fade9ac7afa8bff
| 3,607
|
py
|
Python
|
markdown-journal.py
|
fire-wally/markdown-notebook
|
8fe22f645d6aca65f5f02cf4a67993e809795396
|
[
"Apache-2.0"
] | null | null | null |
markdown-journal.py
|
fire-wally/markdown-notebook
|
8fe22f645d6aca65f5f02cf4a67993e809795396
|
[
"Apache-2.0"
] | null | null | null |
markdown-journal.py
|
fire-wally/markdown-notebook
|
8fe22f645d6aca65f5f02cf4a67993e809795396
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/local/bin/python3
import sys
import os
import shutil
import markdown
class Page(object):
def __init__(self, filename, mtime):
self.file_name = filename
self.modified_at = mtime
def main(argv):
if len(argv) != 3:
print("USAGE: markdown-journal.py source-dir output-dir")
return
source = argv[1]
dest = argv[2]
if not (os.path.isdir(source)):
print(source+ " is not a directory!")
return
if not (os.path.isdir(dest)):
print(dest + " is not a directory!")
return
print("Source directory is " + argv[1])
print("Output directory is " + argv[2])
clean_output(dest)
generate_output(source, dest)
def clean_output(dest):
print("Cleaning Output Directory")
for root, dirs, files in os.walk(dest, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
def generate_output(source, dest):
files_written=[]
print("Creating .html Files in Output Directory")
for root, dirs, files in os.walk(source):
for name in files:
print(os.path.join(dest,name))
##Transform Markdown files to HTML. Copy all other files as-is
if (name.endswith(".md")):
file_path = os.path.join(root, name)
new_file_name = generate_markdown(file_path, dest)
new_file = Page(new_file_name, os.path.getmtime(file_path))
files_written.append(new_file)
else:
shutil.copy(os.path.join(root, name), dest)
for name in dirs:
os.mkdir(os.path.join(dest, name))
#Now generate the index file
generate_index(files_written, dest)
def generate_index(files, dest_dir):
html = generate_index_html(files)
index_path = os.path.join(dest_dir, "index.html")
with open(index_path, "w+") as opened_file:
opened_file.write(html)
def generate_markdown(source_file, dest_dir):
'''generates a new html file in the dest directory, returns the name of the
newly-created file'''
md = ""
with open(source_file, 'r') as opened_file:
md = opened_file.read()
html = content_to_html(md)
new_name = os.path.split(source_file)[1].replace("md", "html")
new_path = os.path.join(dest_dir, new_name)
with open(new_path, "w+") as opened_file:
opened_file.write(html)
return new_name
def generate_index_html(pages):
with open("index-template.html") as template_file:
html_template = template_file.read()
alpha_page_list = "<ul>"
for page in pages:
alpha_page_list += "\n<li><a href='http://localhost/notes/{0}'>{0}</a></li>".format(page.file_name)
alpha_page_list += '\n</ul>'
recent_page_list = "<ul>"
for page in sorted(pages, key=lambda p: p.modified_at, reverse=True):
recent_page_list += "\n<li><a href='http://localhost/notes/{0}'>{0}</a></li>".format(page.file_name)
recent_page_list += "</ul>"
html_page = html_template.replace("{{PAGE_LIST_RECENT}}", recent_page_list) \
.replace("{{PAGE_LIST_ALPHA}}", alpha_page_list)
return html_page
def content_to_html(source_string):
with open("page-template.html") as template_file: #Assume in same directory as code
html_template = template_file.read()
page_fragment = markdown.markdown(source_string)
html_page = html_template.replace("{{PAGE_GOES_HERE}}", page_fragment)
return html_page
if __name__ == "__main__":
main(sys.argv)
| 33.71028
| 108
| 0.640144
| 512
| 3,607
| 4.308594
| 0.230469
| 0.032638
| 0.036265
| 0.025385
| 0.329102
| 0.184044
| 0.119674
| 0.119674
| 0.119674
| 0.051677
| 0
| 0.003975
| 0.232881
| 3,607
| 107
| 109
| 33.71028
| 0.793278
| 0.065151
| 0
| 0.154762
| 0
| 0.02381
| 0.133671
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.047619
| 0
| 0.22619
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3efdff5f0d2a7d90af9b5f718370bdc45e63e120
| 5,829
|
py
|
Python
|
py/src/api_custo.py
|
Ennoriel/veille-pedagogique
|
63f368ad1faee2f6fca86fff68ccccc7ac89f81b
|
[
"FSFAP"
] | null | null | null |
py/src/api_custo.py
|
Ennoriel/veille-pedagogique
|
63f368ad1faee2f6fca86fff68ccccc7ac89f81b
|
[
"FSFAP"
] | null | null | null |
py/src/api_custo.py
|
Ennoriel/veille-pedagogique
|
63f368ad1faee2f6fca86fff68ccccc7ac89f81b
|
[
"FSFAP"
] | null | null | null |
from itertools import chain, combinations
from re import search
from urllib.parse import urlparse
from pymongo.errors import BulkWriteError
from tweepy import OAuthHandler, API
from yaml import load as yaml_load, BaseLoader
from objects.article import Article
from objects.hashtag import Hashtag
from objects.tweet import Tweet
from mongo.article_mongo import ArticleMongo
from mongo.hashtag_mongo import HashtagMongo
from mongo.theme_mongo import ThemeMongo
from mongo.tweet_mongo import TweetMongo
from utils.log_utils import dir_log
from utils.url_utils import unshorten
class ApiCusto:
def __init__(self):
conf = yaml_load(open("./../resources/credentials.yaml"), Loader=BaseLoader)["twitter_api"]
consumer_key = conf["consumer_key"]
consumer_secret = conf["consumer_secret"]
access_token = conf["access_token"]
access_token_secret = conf["access_token_secret"]
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
self.api = API(auth)
self.article_mongo = ArticleMongo()
self.tweet_mongo = TweetMongo()
self.hashtag_mongo = HashtagMongo()
self.theme_mongo = ThemeMongo()
self.articles = []
def fetch(self, fetch_local=True):
"""
fetch remote or local if time is remote fetch limit is not reached
:param fetch_local: if true, retrieve tweets id from a file, otherwise from a hashtag on twitter
:return: tweets
"""
return self.fetch_local() if fetch_local else self.fetch_remote()
def fetch_remote(self):
"""
Fetch to Twitter API tweets by their hashtags
:return:
"""
return self.api.search(q='#pedagogie', result_type='recent', tweet_mode='extended', lang='fr', count=20)
def fetch_local(self):
"""
Fetch to Twitter API local saved tweet ids
:return:
"""
tweet_ids_to_fetch = Tweet.get_saved_tweet_ids()
if tweet_ids_to_fetch:
return self.api.statuses_lookup(tweet_ids_to_fetch, tweet_mode='extended')
else:
return []
def parse(self, fetch_local):
"""
Gets statuses from Twitter, make an article out of links and dowload article content
:param fetch_local: if true, retrieve tweets id from a file, otherwise from a hashtag on twitter
"""
statuses = self.fetch(fetch_local)
for index_status, status in enumerate(statuses):
dir_log(0, index_status + 1, len(statuses))
# Suppression des tweets non francophones
if status.__getattribute__("lang") != 'fr':
continue
# Suppression des tweets déjà enregistrés
if self.tweet_mongo.exists(status.__getattribute__("_json")["id"]):
continue
article_courants = []
_json = status.__getattribute__("_json")
urls = status.entities["urls"]
# variable counting url already indexed as an article, in order to save the tweet eventhoug all its urls
# are already referenced. If the tweet as at least one url not indexed as an article, it will be saved later
# TODO vérifier l'utilité de ce truc car comme le tweet est ajouté à un article, il devrait être enregistré
count_url_already_indexed = 0
for index_article, url in enumerate(urls):
dir_log(1, index_article + 1, len(urls))
print(' ' + str(_json["id"]))
unshorten_url = unshorten(url["expanded_url"])
# Suppression des url en double dans un tweet
if unshorten_url in [a.url for a in article_courants]:
continue
# Suppression des url qui sont des liens vers d'autres status Twitter
if search("^https://twitter.com/\\w+/status/\\d{19}", unshorten_url):
continue
# Suppression des url qui sont des urls de sites et non d'articles
url_path = urlparse(unshorten_url).path
if url_path == '' or url_path == '/':
continue
# Si l'url pointe vers un article déjà référencé, on le mets à jour et on passe à l'url suivante
if Article.update_article_if_exists(self.article_mongo, unshorten_url, _json["id"]):
count_url_already_indexed += 1
continue
# Si article déjà référencé, on le met à jour localement
if unshorten_url in [article.url for article in self.articles]:
for article in self.articles:
if article.url == unshorten_url:
article.add_tweet(status)
break
continue
article_courant = Article.get_article_content(unshorten_url, status)
if not article_courant:
continue
article_courants.append(article_courant)
if count_url_already_indexed == len(urls):
self.tweet_mongo.saves_one(Tweet(status).get())
self.articles.extend(article_courants)
def save(self):
"""
Save articles, tweets, hashtags and updates themes
"""
if self.articles:
for article in self.articles:
print(str(article.get()))
# save articles
try:
self.article_mongo.saves_many([article.get() for article in self.articles])
except BulkWriteError as e:
print(e.details)
raise
# save tweets
tweets = list(chain.from_iterable([article.tweets for article in self.articles]))
self.tweet_mongo.saves_many([tweet.get() for tweet in tweets])
# save hashtags
hashtags = []
for article in self.articles:
hashtags.extend([theme for theme in article.not_indexed_theme_entries])
# clean duplicates
hashtags = list(dict.fromkeys(hashtags))
hashtags = [Hashtag(hashtag).get() for hashtag in hashtags]
if len(hashtags):
self.hashtag_mongo.saves_many(hashtags)
# update themes
for article in self.articles:
for [themeA, themeB] in combinations(article.indexed_theme_entries, 2):
self.theme_mongo.update_weight(themeA, themeB)
self.theme_mongo.update_weight(themeB, themeA)
def fetch_and_parse(self, fetch_local):
"""
fetch statuses, parse them to articles and saves articles
:param fetch_local: if true, retrieve tweets id from a file, otherwise from a hashtag on twitter
"""
self.parse(fetch_local)
self.save()
| 31.33871
| 111
| 0.731515
| 828
| 5,829
| 4.977053
| 0.262077
| 0.026693
| 0.020383
| 0.027178
| 0.174472
| 0.099248
| 0.089784
| 0.055326
| 0.055326
| 0.055326
| 0
| 0.002299
| 0.179276
| 5,829
| 186
| 112
| 31.33871
| 0.859114
| 0.252016
| 0
| 0.115385
| 0
| 0
| 0.053187
| 0.007263
| 0
| 0
| 0
| 0.005376
| 0
| 1
| 0.067308
| false
| 0
| 0.144231
| 0
| 0.259615
| 0.028846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4108814557aaf45fa9bd2469a548f631f9648812
| 29,383
|
py
|
Python
|
pauxy/walkers/thermal.py
|
pauxy-qmc/pauxy
|
1da80284284769b59361c73cfa3c2d914c74a73f
|
[
"Apache-2.0"
] | 16
|
2020-08-05T17:17:17.000Z
|
2022-03-18T04:06:18.000Z
|
pauxy/walkers/thermal.py
|
pauxy-qmc/pauxy
|
1da80284284769b59361c73cfa3c2d914c74a73f
|
[
"Apache-2.0"
] | 4
|
2020-05-17T21:28:20.000Z
|
2021-04-22T18:05:50.000Z
|
pauxy/walkers/thermal.py
|
pauxy-qmc/pauxy
|
1da80284284769b59361c73cfa3c2d914c74a73f
|
[
"Apache-2.0"
] | 5
|
2020-05-18T01:03:18.000Z
|
2021-04-13T15:36:29.000Z
|
import copy
import cmath
import numpy
import scipy.linalg
from pauxy.estimators.thermal import greens_function, one_rdm_from_G, particle_number
from pauxy.estimators.mixed import local_energy
from pauxy.walkers.stack import PropagatorStack
from pauxy.walkers.walker import Walker
from pauxy.utils.linalg import regularise_matrix_inverse
from pauxy.utils.misc import update_stack, get_numeric_names
class ThermalWalker(Walker):
def __init__(self, system, trial, walker_opts={}, verbose=False):
Walker.__init__(self, system, trial, walker_opts=walker_opts)
self.num_slices = trial.num_slices
dtype = numpy.complex128
self.G = numpy.zeros(trial.dmat.shape, dtype=dtype)
self.nbasis = trial.dmat[0].shape[0]
self.stack_size = walker_opts.get('stack_size', None)
max_diff_diag = numpy.linalg.norm((numpy.diag(trial.dmat[0].diagonal())-trial.dmat[0]))
if max_diff_diag < 1e-10:
self.diagonal_trial = True
if verbose:
print("# Trial density matrix is diagonal.")
else:
self.diagonal_trial = False
if verbose:
print("# Trial density matrix is not diagonal.")
if self.stack_size == None:
self.stack_size = trial.stack_size
if (self.num_slices//self.stack_size)*self.stack_size != self.num_slices:
if verbose:
print("# Input stack size does not divide number of slices.")
self.stack_size = update_stack(self.stack_size, self.num_slices, verbose)
if self.stack_size > trial.stack_size:
if verbose:
print("# Walker stack size differs from that estimated from "
"trial density matrix.")
print("# Be careful. cond(BT)**stack_size: %10.3e."
%(trial.cond**self.stack_size))
self.stack_length = self.num_slices // self.stack_size
if verbose:
print("# Walker stack size: {}".format(self.stack_size))
self.lowrank = walker_opts.get('low_rank', False)
self.lowrank_thresh = walker_opts.get('low_rank_thresh', 1e-6)
if verbose:
print("# Using low rank trick: {}".format(self.lowrank))
self.stack = PropagatorStack(self.stack_size, trial.num_slices,
trial.dmat.shape[-1], dtype,
trial.dmat, trial.dmat_inv,
diagonal=self.diagonal_trial,
lowrank=self.lowrank,
thresh=self.lowrank_thresh)
# Initialise all propagators to the trial density matrix.
self.stack.set_all(trial.dmat)
self.greens_function_qr_strat(trial)
self.stack.G = self.G
self.M0 = numpy.array([scipy.linalg.det(self.G[0], check_finite=False),
scipy.linalg.det(self.G[1], check_finite=False)])
self.stack.ovlp = numpy.array([1.0/self.M0[0], 1.0/self.M0[1]])
# # temporary storage for stacks...
I = numpy.identity(system.nbasis, dtype=dtype)
One = numpy.ones(system.nbasis, dtype=dtype)
self.Tl = numpy.array([I, I])
self.Ql = numpy.array([I, I])
self.Dl = numpy.array([One, One])
self.Tr = numpy.array([I, I])
self.Qr = numpy.array([I, I])
self.Dr = numpy.array([One, One])
self.hybrid_energy = 0.0
if verbose:
eloc = self.local_energy(system)
P = one_rdm_from_G(self.G)
nav = particle_number(P)
print("# Initial walker energy: {} {} {}".format(*eloc))
print("# Initial walker electron number: {}".format(nav))
# self.buff_names = ['weight', 'G', 'unscaled_weight', 'phase', 'Tl',
# 'Ql', 'Dl', 'Tr', 'Qr', 'Dr', 'M0']
self.buff_names, self.buff_size = get_numeric_names(self.__dict__)
# self.buff_size = (self.G.size+3+self.Tl.size+2+
# self.Ql.size+self.Dl.size+self.Tr.size+self.Qr.size
# +self.Dr.size)
def greens_function(self, trial, slice_ix=None, inplace=True):
if self.lowrank:
return self.stack.G
else:
return self.greens_function_qr_strat(trial, slice_ix=slice_ix,
inplace=inplace)
def greens_function_svd(self, trial, slice_ix=None, inplace=True):
if slice_ix == None:
slice_ix = self.stack.time_slice
bin_ix = slice_ix // self.stack.stack_size
# For final time slice want first block to be the rightmost (for energy
# evaluation).
if bin_ix == self.stack.nbins:
bin_ix = -1
if inplace:
G = None
else:
G = numpy.zeros(self.G.shape, self.G.dtype)
for spin in [0, 1]:
# Need to construct the product A(l) = B_l B_{l-1}..B_L...B_{l+1}
# in stable way. Iteratively construct SVD decompositions starting
# from the rightmost (product of) propagator(s).
B = self.stack.get((bin_ix+1)%self.stack.nbins)
(U1, S1, V1) = scipy.linalg.svd(B[spin])
for i in range(2, self.stack.nbins+1):
ix = (bin_ix + i) % self.stack.nbins
B = self.stack.get(ix)
T1 = numpy.dot(B[spin], U1)
# todo optimise
T2 = numpy.dot(T1, numpy.diag(S1))
(U1, S1, V) = scipy.linalg.svd(T2)
V1 = numpy.dot(V, V1)
A = numpy.dot(U1.dot(numpy.diag(S1)), V1)
# Final SVD decomposition to construct G(l) = [I + A(l)]^{-1}.
# Care needs to be taken when adding the identity matrix.
T3 = numpy.dot(U1.conj().T, V1.conj().T) + numpy.diag(S1)
(U2, S2, V2) = scipy.linalg.svd(T3)
U3 = numpy.dot(U1, U2)
D3 = numpy.diag(1.0/S2)
V3 = numpy.dot(V2, V1)
# G(l) = (U3 S2 V3)^{-1}
# = V3^{\dagger} D3 U3^{\dagger}
if inplace:
# self.G[spin] = (V3inv).dot(U3.conj().T)
self.G[spin] = (V3.conj().T).dot(D3).dot(U3.conj().T)
else:
# G[spin] = (V3inv).dot(U3.conj().T)
G[spin] = (V3.conj().T).dot(D3).dot(U3.conj().T)
return G
def greens_function_qr(self, trial, slice_ix=None, inplace=True):
if (slice_ix == None):
slice_ix = self.stack.time_slice
bin_ix = slice_ix // self.stack.stack_size
# For final time slice want first block to be the rightmost (for energy
# evaluation).
if bin_ix == self.stack.nbins:
bin_ix = -1
if not inplace:
G = numpy.zeros(self.G.shape, self.G.dtype)
else:
G = None
for spin in [0, 1]:
# Need to construct the product A(l) = B_l B_{l-1}..B_L...B_{l+1}
# in stable way. Iteratively construct SVD decompositions starting
# from the rightmost (product of) propagator(s).
B = self.stack.get((bin_ix+1)%self.stack.nbins)
(U1, V1) = scipy.linalg.qr(B[spin], pivoting=False, check_finite=False)
for i in range(2, self.stack.nbins+1):
ix = (bin_ix + i) % self.stack.nbins
B = self.stack.get(ix)
T1 = numpy.dot(B[spin], U1)
(U1, V) = scipy.linalg.qr(T1, pivoting=False, check_finite=False)
V1 = numpy.dot(V, V1)
# Final SVD decomposition to construct G(l) = [I + A(l)]^{-1}.
# Care needs to be taken when adding the identity matrix.
V1inv = scipy.linalg.solve_triangular(V1, numpy.identity(V1.shape[0]))
T3 = numpy.dot(U1.conj().T, V1inv) + numpy.identity(V1.shape[0])
(U2, V2) = scipy.linalg.qr(T3, pivoting=False, check_finite=False)
U3 = numpy.dot(U1, U2)
V3 = numpy.dot(V2, V1)
V3inv = scipy.linalg.solve_triangular(V3, numpy.identity(V3.shape[0]))
# G(l) = (U3 S2 V3)^{-1}
# = V3^{\dagger} D3 U3^{\dagger}
if inplace:
self.G[spin] = (V3inv).dot(U3.conj().T)
else:
G[spin] = (V3inv).dot(U3.conj().T)
return G
def compute_left_right(self, center_ix):
# Use Stratification method (DOI 10.1109/IPDPS.2012.37)
# B(L) .... B(1)
for spin in [0, 1]:
# right bit
# B(right) ... B(1)
if (center_ix > 0):
# print ("center_ix > 0")
B = self.stack.get(0)
(self.Qr[spin], R1, P1) = scipy.linalg.qr(B[spin], pivoting=True, check_finite=False)
# Form D matrices
self.Dr[spin] = (R1.diagonal())
D1inv = (1.0/R1.diagonal())
self.Tr[spin] = numpy.einsum('i,ij->ij',D1inv, R1)
# now permute them
self.Tr[spin][:,P1] = self.Tr[spin] [:,range(self.nbasis)]
for ix in range(1, center_ix):
B = self.stack.get(ix)
C2 = numpy.einsum('ij,j->ij',
numpy.dot(B[spin], self.Qr[spin]),
self.Dr[spin])
(self.Qr[spin], R1, P1) = scipy.linalg.qr(C2, pivoting=True, check_finite=False)
# Compute D matrices
D1inv = (1.0/R1.diagonal())
self.Dr[spin] = (R1.diagonal())
# smarter permutation
# D^{-1} * R
tmp = numpy.einsum('i,ij->ij',D1inv, R1)
# D^{-1} * R * P^T
tmp[:,P1] = tmp[:,range(self.nbasis)]
# D^{-1} * R * P^T * T
self.Tr[spin] = numpy.dot(tmp, self.Tr[spin])
# left bit
# B(l) ... B(left)
if (center_ix < self.stack.nbins-1):
# print("center_ix < self.stack.nbins-1 first")
# We will assume that B matrices are all diagonal for left....
B = self.stack.get(center_ix+1)
self.Dl[spin] = (B[spin].diagonal())
D1inv = (1.0/B[spin].diagonal())
self.Ql[spin] = numpy.identity(B[spin].shape[0])
self.Tl[spin] = numpy.identity(B[spin].shape[0])
for ix in range(center_ix+2, self.stack.nbins):
# print("center_ix < self.stack.nbins-1 first inner loop")
B = self.stack.get(ix)
C2 = (numpy.einsum('ii,i->i',B[spin],self.Dl[spin]))
self.Dl[spin] = C2
def compute_right(self, center_ix):
# Use Stratification method (DOI 10.1109/IPDPS.2012.37)
# B(L) .... B(1)
for spin in [0, 1]:
# right bit
# B(right) ... B(1)
if (center_ix > 0):
# print ("center_ix > 0")
B = self.stack.get(0)
(self.Qr[spin], R1, P1) = scipy.linalg.qr(B[spin], pivoting=True, check_finite=False)
# Form D matrices
self.Dr[spin] = (R1.diagonal())
D1inv = (1.0/R1.diagonal())
self.Tr[spin] = numpy.einsum('i,ij->ij',D1inv, R1)
# now permute them
self.Tr[spin][:,P1] = self.Tr[spin] [:,range(self.nbasis)]
for ix in range(1, center_ix):
B = self.stack.get(ix)
C2 = numpy.einsum('ij,j->ij',
numpy.dot(B[spin], self.Qr[spin]),
self.Dr[spin])
(self.Qr[spin], R1, P1) = scipy.linalg.qr(C2, pivoting=True, check_finite=False)
# Compute D matrices
D1inv = (1.0/R1.diagonal())
self.Dr[spin] = (R1.diagonal())
# smarter permutation
# D^{-1} * R
tmp = numpy.einsum('i,ij->ij',D1inv, R1)
# D^{-1} * R * P^T
tmp[:,P1] = tmp[:,range(self.nbasis)]
# D^{-1} * R * P^T * T
self.Tr[spin] = numpy.dot(tmp, self.Tr[spin])
def compute_left(self, center_ix):
# Use Stratification method (DOI 10.1109/IPDPS.2012.37)
# B(L) .... B(1)
for spin in [0, 1]:
# left bit
# B(l) ... B(left)
if (center_ix < self.stack.nbins-1):
# print("center_ix < self.stack.nbins-1 first")
# We will assume that B matrices are all diagonal for left....
B = self.stack.get(center_ix+1)
self.Dl[spin] = (B[spin].diagonal())
self.Ql[spin] = numpy.identity(B[spin].shape[0])
self.Tl[spin] = numpy.identity(B[spin].shape[0])
for ix in range(center_ix+2, self.stack.nbins):
# print("center_ix < self.stack.nbins-1 first inner loop")
B = self.stack.get(ix)
C2 = (numpy.einsum('ii,i->i',B[spin],self.Dl[spin]))
self.Dl[spin] = C2.diagonal()
def greens_function_left_right(self, center_ix, inplace=False, thresh = 1e-6):
assert(self.diagonal_trial)
if not inplace:
G = numpy.zeros(self.G.shape, self.G.dtype)
else:
G = None
mL = self.G.shape[1]
mR = self.G.shape[1]
mT = self.G.shape[1]
Bc = self.stack.get(center_ix)
nbsf = Bc.shape[1]
# It goes to right to left and we sample (I + L*B*R) in the end
for spin in [0,1]:
if (center_ix > 0): # there exists right bit
mR = len(self.Dr[spin][numpy.abs(self.Dr[spin])>thresh])
Ccr = numpy.einsum('ij,j->ij',
numpy.dot(Bc[spin],self.Qr[spin][:,:mR]),
self.Dr[spin][:mR]) # N x mR
(Qlcr, Rlcr, Plcr) = scipy.linalg.qr(Ccr, pivoting=True, check_finite=False)
Dlcr = Rlcr[:mR,:mR].diagonal() # mR
Dinv = 1.0/Dlcr # mR
tmp = numpy.einsum('i,ij->ij',Dinv[:mR], Rlcr[:mR,:mR]) # mR, mR x mR -> mR x mR
tmp[:,Plcr] = tmp[:,range(mR)]
Tlcr = numpy.dot(tmp, self.Tr[spin][:mR,:]) # mR x N
else:
(Qlcr, Rlcr, Plcr) = scipy.linalg.qr(Bc[spin], pivoting=True, check_finite=False)
# Form D matrices
Dlcr = Rlcr.diagonal()
mR = len(Dlcr[numpy.abs(Dlcr) > thresh])
Dinv = 1.0/Rlcr.diagonal()
Tlcr = numpy.einsum('i,ij->ij',Dinv[:mR], Rlcr[:mR,:]) # mR x N
Tlcr[:,Plcr] = Tlcr[:,range(self.nbasis)] # mR x N
if (center_ix < self.stack.nbins-1): # there exists left bit
# assume left stack is all diagonal (i.e., QDT = diagonal -> Q and T are identity)
Clcr = numpy.einsum('i,ij->ij',
self.Dl[spin],
numpy.einsum('ij,j->ij',Qlcr[:,:mR], Dlcr[:mR])) # N x mR
(Qlcr, Rlcr, Plcr) = scipy.linalg.qr(Clcr, pivoting=True, check_finite=False) # N x N, mR x mR
Dlcr = Rlcr.diagonal()
Dinv = 1.0/Dlcr
mT = len(Dlcr[numpy.abs(Dlcr) > thresh])
tmp = numpy.einsum('i,ij->ij',Dinv[:mT], Rlcr[:mT,:])
tmp[:,Plcr] = tmp[:,range(mR)] # mT x mR
Tlcr = numpy.dot(tmp, Tlcr) # mT x N
else:
mT = mR
# D = Ds Db^{-1}
Db = numpy.zeros(mT, Bc[spin].dtype)
Ds = numpy.zeros(mT, Bc[spin].dtype)
for i in range(mT):
absDlcr = abs(Dlcr[i])
if absDlcr > 1.0:
Db[i] = 1.0 / absDlcr
Ds[i] = numpy.sign(Dlcr[i])
else:
Db[i] = 1.0
Ds[i] = Dlcr[i]
if (mT == nbsf): # No need for Woodbury
T1inv = scipy.linalg.inv(Tlcr, check_finite=False)
# C = (Db Q^{-1}T^{-1}+Ds)
C = numpy.dot(
numpy.einsum('i,ij->ij',Db, Qlcr.conj().T),
T1inv) + numpy.diag(Ds)
Cinv = scipy.linalg.inv(C, check_finite = False)
# Then G = T^{-1} C^{-1} Db Q^{-1}
# Q is unitary.
if inplace:
self.G[spin] = numpy.dot(numpy.dot(T1inv, Cinv),
numpy.einsum('i,ij->ij',Db, Qlcr.conj().T))
# return # This seems to change the answer WHY??
else:
G[spin] = numpy.dot(numpy.dot(T1inv, Cinv),
numpy.einsum('i,ij->ij',Db, Qlcr.conj().T))
else: # Use Woodbury
TQ = Tlcr.dot(Qlcr[:,:mT])
TQinv = scipy.linalg.inv(TQ, check_finite=False)
tmp = scipy.linalg.inv(numpy.einsum('ij,j->ij',TQinv, Db) + numpy.diag(Ds), check_finite=False)
A = numpy.einsum("i,ij->ij", Db, tmp.dot(TQinv))
if inplace:
self.G[spin] = numpy.eye(nbsf, dtype=Bc[spin].dtype) - Qlcr[:,:mT].dot(numpy.diag(Dlcr[:mT])).dot(A).dot(Tlcr)
else:
G[spin] = numpy.eye(nbsf, dtype=Bc[spin].dtype) - Qlcr[:,:mT].dot(numpy.diag(Dlcr[:mT])).dot(A).dot(Tlcr)
# print(mR,mT,nbsf)
# print("ref: mL, mR, mT = {}, {}, {}".format(mL, mR, mT))
return G
def greens_function_left_right_no_truncation(self, center_ix, inplace=False):
if not inplace:
G = numpy.zeros(self.G.shape, self.G.dtype)
else:
G = None
Bc = self.stack.get(center_ix)
for spin in [0,1]:
if (center_ix > 0): # there exists right bit
# print("center_ix > 0 second")
Ccr = numpy.einsum('ij,j->ij',
numpy.dot(Bc[spin],self.Qr[spin]),
self.Dr[spin])
(Qlcr, Rlcr, Plcr) = scipy.linalg.qr(Ccr, pivoting=True, check_finite=False)
Dlcr = Rlcr.diagonal()
Dinv = 1.0/Rlcr.diagonal()
tmp = numpy.einsum('i,ij->ij',Dinv, Rlcr)
tmp[:,Plcr] = tmp[:,range(self.nbasis)]
Tlcr = numpy.dot(tmp, self.Tr[spin])
else:
# print("center_ix > 0 else second")
(Qlcr, Rlcr, Plcr) = scipy.linalg.qr(Bc[spin], pivoting=True, check_finite=False)
# Form D matrices
Dlcr = Rlcr.diagonal()
Dinv = 1.0/Rlcr.diagonal()
Tlcr = numpy.einsum('i,ij->ij',Dinv, Rlcr)
Tlcr[:,Plcr] = Tlcr[:,range(self.nbasis)]
if (center_ix < self.stack.nbins-1): # there exists left bit
# print("center_ix < self.stack.nbins-1 second")
# assume left stack is all diagonal
Clcr = numpy.einsum('i,ij->ij',
self.Dl[spin],
numpy.einsum('ij,j->ij',Qlcr, Dlcr))
(Qlcr, Rlcr, Plcr) = scipy.linalg.qr(Clcr, pivoting=True, check_finite=False)
Dlcr = Rlcr.diagonal()
Dinv = 1.0/Rlcr.diagonal()
tmp = numpy.einsum('i,ij->ij',Dinv, Rlcr)
tmp[:,Plcr] = tmp[:,range(self.nbasis)]
Tlcr = numpy.dot(tmp, Tlcr)
# print("Dlcr = {}".format(Dlcr))
# G^{-1} = 1+A = 1+QDT = Q (Q^{-1}T^{-1}+D) T
# Write D = Db^{-1} Ds
# Then G^{-1} = Q Db^{-1}(Db Q^{-1}T^{-1}+Ds) T
Db = numpy.zeros(Bc[spin].shape[-1], Bc[spin].dtype)
Ds = numpy.zeros(Bc[spin].shape[-1], Bc[spin].dtype)
for i in range(Db.shape[0]):
absDlcr = abs(Dlcr[i])
if (absDlcr > 1.0):
Db[i] = 1.0 / absDlcr
Ds[i] = numpy.sign(Dlcr[i])
else:
Db[i] = 1.0
Ds[i] = Dlcr[i]
T1inv = scipy.linalg.inv(Tlcr, check_finite=False)
# C = (Db Q^{-1}T^{-1}+Ds)
C = numpy.dot(
numpy.einsum('i,ij->ij',Db, Qlcr.conj().T),
T1inv) + numpy.diag(Ds)
Cinv = scipy.linalg.inv(C, check_finite = False)
# Then G = T^{-1} C^{-1} Db Q^{-1}
# Q is unitary.
if inplace:
self.G[spin] = numpy.dot(numpy.dot(T1inv, Cinv),
numpy.einsum('i,ij->ij',Db, Qlcr.conj().T))
else:
G[spin] = numpy.dot(numpy.dot(T1inv, Cinv),
numpy.einsum('i,ij->ij',Db, Qlcr.conj().T))
return G
def greens_function_qr_strat(self, trial, slice_ix=None, inplace=True):
# Use Stratification method (DOI 10.1109/IPDPS.2012.37)
if (slice_ix == None):
slice_ix = self.stack.time_slice
bin_ix = slice_ix // self.stack.stack_size
# For final time slice want first block to be the rightmost (for energy
# evaluation).
if bin_ix == self.stack.nbins:
bin_ix = -1
if not inplace:
G = numpy.zeros(self.G.shape, self.G.dtype)
else:
G = None
for spin in [0, 1]:
# Need to construct the product A(l) = B_l B_{l-1}..B_L...B_{l+1} in
# stable way. Iteratively construct column pivoted QR decompositions
# (A = QDT) starting from the rightmost (product of) propagator(s).
B = self.stack.get((bin_ix+1)%self.stack.nbins)
(Q1, R1, P1) = scipy.linalg.qr(B[spin], pivoting=True,
check_finite=False)
# Form D matrices
D1 = numpy.diag(R1.diagonal())
D1inv = numpy.diag(1.0/R1.diagonal())
T1 = numpy.einsum('ii,ij->ij', D1inv, R1)
# permute them
T1[:,P1] = T1 [:, range(self.nbasis)]
for i in range(2, self.stack.nbins+1):
ix = (bin_ix + i) % self.stack.nbins
B = self.stack.get(ix)
C2 = numpy.dot(numpy.dot(B[spin], Q1), D1)
(Q1, R1, P1) = scipy.linalg.qr(C2, pivoting=True,
check_finite=False)
# Compute D matrices
D1inv = numpy.diag(1.0/R1.diagonal())
D1 = numpy.diag(R1.diagonal())
tmp = numpy.einsum('ii,ij->ij',D1inv, R1)
tmp[:,P1] = tmp[:,range(self.nbasis)]
T1 = numpy.dot(tmp, T1)
# G^{-1} = 1+A = 1+QDT = Q (Q^{-1}T^{-1}+D) T
# Write D = Db^{-1} Ds
# Then G^{-1} = Q Db^{-1}(Db Q^{-1}T^{-1}+Ds) T
Db = numpy.zeros(B[spin].shape, B[spin].dtype)
Ds = numpy.zeros(B[spin].shape, B[spin].dtype)
for i in range(Db.shape[0]):
absDlcr = abs(Db[i,i])
if absDlcr > 1.0:
Db[i,i] = 1.0 / absDlcr
Ds[i,i] = numpy.sign(D1[i,i])
else:
Db[i,i] = 1.0
Ds[i,i] = D1[i,i]
T1inv = scipy.linalg.inv(T1, check_finite = False)
# C = (Db Q^{-1}T^{-1}+Ds)
C = numpy.dot(numpy.einsum('ii,ij->ij',Db, Q1.conj().T), T1inv) + Ds
Cinv = scipy.linalg.inv(C, check_finite=False)
# Then G = T^{-1} C^{-1} Db Q^{-1}
# Q is unitary.
if inplace:
self.G[spin] = numpy.dot(numpy.dot(T1inv, Cinv),
numpy.einsum('ii,ij->ij', Db, Q1.conj().T))
else:
G[spin] = numpy.dot(numpy.dot(T1inv, Cinv),
numpy.einsum('ii,ij->ij', Db, Q1.conj().T))
return G
def local_energy(self, system, two_rdm=None):
rdm = one_rdm_from_G(self.G)
return local_energy(system, rdm, two_rdm=two_rdm)
def unit_test():
from pauxy.systems.ueg import UEG
from pauxy.trial_density_matrices.onebody import OneBody
from pauxy.thermal_propagation.planewave import PlaneWave
from pauxy.qmc.options import QMCOpts
inputs = {'nup':1,
'ndown':1,
'rs':1.0,
'ecut':0.5,
"name": "one_body",
"mu":1.94046021,
"beta":0.5,
"dt": 0.05,
"optimised": True
}
beta = inputs ['beta']
dt = inputs['dt']
system = UEG(inputs, verbose = False)
qmc = QMCOpts(inputs, system, True)
trial = OneBody(inputs, system, beta, dt, system.H1, verbose=False)
walker = ThermalWalker(inputs, system, trial, True)
# walker.greens_function(trial)
E, T, V = walker.local_energy(system)
numpy.random.seed(0)
inputs['optimised'] = False
propagator = PlaneWave(inputs, qmc, system, trial, verbose=False)
propagator.propagate_walker_free(system, walker, trial, False)
Gold = walker.G[0].copy()
system = UEG(inputs, verbose=False)
qmc = QMCOpts(inputs, system, verbose=False)
trial = OneBody(inputs, system, beta, dt, system.H1, verbose=False)
propagator = PlaneWave(inputs, qmc, system, trial, True)
walker = ThermalWalker(inputs, system, trial, verbose=False)
# walker.greens_function(trial)
E, T, V = walker.local_energy(system)
numpy.random.seed(0)
inputs['optimised'] = True
propagator = PlaneWave(inputs, qmc, system, trial, verbose=False)
propagator.propagate_walker_free(system, walker, trial, False)
Gnew = walker.G[0].copy()
assert(scipy.linalg.norm(Gold[:,0] - Gnew[:,0]) < 1e-10)
inputs['stack_size'] = 1
walker = ThermalWalker(inputs, system, trial, verbose=False)
numpy.random.seed(0)
propagator = PlaneWave(inputs, qmc, system, trial, verbose=False)
for i in range(0,5):
propagator.propagate_walker(system, walker, trial)
Gs1 = walker.G[0].copy()
for ts in range(walker.stack_length):
walker.greens_function(trial, slice_ix=ts*walker.stack_size)
E, T, V = walker.local_energy(system)
# print(E)
inputs['stack_size'] = 5
walker = ThermalWalker(inputs, system, trial, verbose=False)
numpy.random.seed(0)
propagator = PlaneWave(inputs, qmc, system, trial, verbose=False)
for i in range(0,5):
propagator.propagate_walker(system, walker, trial)
Gs5 = walker.G[0].copy()
for ts in range(walker.stack_length):
walker.greens_function(trial, slice_ix=ts*walker.stack_size)
E, T, V = walker.local_energy(system)
# print(E)
assert(numpy.linalg.norm(Gs1-Gs5) < 1e-10)
N = 5
A = numpy.random.rand(N,N)
Q, R, P = scipy.linalg.qr(A, pivoting=True)
#### test permutation start
# Pmat = numpy.zeros((N,N))
# for i in range (N):
# Pmat[P[i],i] = 1
# print(P)
# tmp = Q.dot(R)#.dot(Pmat.T)
# print(tmp)
# print("==================")
# tmp2 = tmp.dot(Pmat.T)
# print(tmp2)
# print("==================")
# tmp[:,P] = tmp [:,range(N)]
# print(tmp)
#### test permutation end
B = numpy.random.rand(N,N)
(Q1, R1, P1) = scipy.linalg.qr(B, pivoting=True, check_finite = False)
# Form permutation matrix
P1mat = numpy.zeros(B.shape, B.dtype)
P1mat[P1,range(len(P1))] = 1.0
# Form D matrices
D1 = numpy.diag(R1.diagonal())
D1inv = numpy.diag(1.0/R1.diagonal())
T1 = numpy.dot(numpy.dot(D1inv, R1), P1mat.T)
assert(numpy.linalg.norm(B - numpy.einsum('ij,jj->ij',Q1,D1).dot(T1)) < 1e-10)
# tmp[:,:] = tmp[:,P]
# print(A - tmp)
# print(Q * Q.T)
# print(R)
# Test walker green's function.
from pauxy.systems.hubbard import Hubbard
from pauxy.estimators.thermal import greens_function, one_rdm_from_G
from pauxy.estimators.hubbard import local_energy_hubbard
sys_dict = {'name': 'Hubbard', 'nx': 4, 'ny': 4,
'nup': 7, 'ndown': 7, 'U': 4, 't': 1}
system = Hubbard(sys_dict)
beta = 4
mu = 1
trial = OneBody({"mu": mu}, system, beta, dt, verbose=True)
dt = 0.05
num_slices = int(beta/dt)
eref = 0
for ek in system.eks:
eref += 2 * ek * 1.0 / (numpy.exp(beta*(ek-mu))+1)
walker = ThermalWalker({"stack_size": 1}, system, trial)
Gs1 = walker.G[0].copy()
rdm = one_rdm_from_G(walker.G)
ekin = local_energy_hubbard(system, rdm)[1]
try:
assert(abs(eref-ekin) < 1e-8)
except AssertionError:
print("Error in kinetic energy check. Ref: %13.8e Calc:%13.8e"%(eref, ekin))
walker = ThermalWalker({"stack_size": 10}, system, trial)
rdm = one_rdm_from_G(walker.G)
ekin = local_energy_hubbard(system, rdm)[1]
try:
assert(abs(eref-ekin) < 1e-8)
except AssertionError:
print("Error in kinetic energy check. Ref: %13.10e Calc: %13.10e"
" Error: %13.8e"%(eref.real, ekin.real, abs(eref-ekin)))
for ts in range(walker.stack_length):
walker.greens_function(trial, slice_ix=ts*walker.stack_size)
assert(numpy.linalg.norm(Gs1-walker.G[0]) < 1e-10)
if __name__=="__main__":
unit_test()
| 41.678014
| 130
| 0.510499
| 3,919
| 29,383
| 3.755295
| 0.089053
| 0.038527
| 0.028267
| 0.018074
| 0.712713
| 0.660121
| 0.612965
| 0.580281
| 0.566963
| 0.551063
| 0
| 0.028552
| 0.342035
| 29,383
| 704
| 131
| 41.737216
| 0.732685
| 0.143961
| 0
| 0.563265
| 0
| 0
| 0.03748
| 0.00084
| 0
| 0
| 0
| 0.00142
| 0.018367
| 1
| 0.02449
| false
| 0
| 0.034694
| 0
| 0.077551
| 0.022449
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4108de14460ec5d4d27312f76596c4d68c7d284a
| 1,607
|
py
|
Python
|
schedules_tools/batches/utils.py
|
RedHat-Eng-PGM/python-schedules-tools
|
6166cdd0e5f7c08fba1c50f113ae6a6103460f9b
|
[
"MIT"
] | 1
|
2019-05-06T21:10:35.000Z
|
2019-05-06T21:10:35.000Z
|
schedules_tools/batches/utils.py
|
RedHat-Eng-PGM/schedules-tools
|
fd96a9e1df4e53b8da3048c013af0cd2c290f9b5
|
[
"MIT"
] | 5
|
2019-05-06T21:25:38.000Z
|
2021-02-05T20:54:30.000Z
|
schedules_tools/batches/utils.py
|
RedHat-Eng-PGM/schedules-tools
|
fd96a9e1df4e53b8da3048c013af0cd2c290f9b5
|
[
"MIT"
] | 1
|
2019-10-31T01:51:41.000Z
|
2019-10-31T01:51:41.000Z
|
from collections import OrderedDict
import os
import re
from schedules_tools.schedule_handlers.smart_sheet import ScheduleHandler_smartsheet
import yaml
DEFAULT_TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), 'templates')
DEPENDENCY_REGEX = re.compile(r'^{(?P<to>predecessor|\d+)}(?P<type>[F|S]+)?'
r'( ?(?P<lag_sign>[+|-])?(?P<lag_amount>\d+)'
r'(?P<lag_type>[d|w]))?$')
def load_template(template_name):
template_dir = os.getenv('BATCHES_TEMPLATE_DIR', DEFAULT_TEMPLATE_DIR)
template_path = os.path.join(template_dir, '%s.yml' % template_name)
if not os.path.exists(template_path):
raise ValueError('Template "%s" now found.', template_name)
with open(template_path, 'r') as f:
template = yaml.safe_load(f)
tasks = OrderedDict()
for task in template['tasks']:
task_id = task.pop('id')
if 'dependency' in task:
dependency_match = DEPENDENCY_REGEX.match(task['dependency'])
if not dependency_match:
raise ValueError('Incorrect dependency format: %s' % task['dependency'])
else:
task['dependency'] = dependency_match.groupdict()
tasks[task_id] = task
template['tasks'] = tasks
return template
def initialize_ss_handler(handle):
api_token = os.getenv('SMARTSHEET_TOKEN')
if not api_token:
raise ValueError('SMARTSHEET_TOKEN required')
handler = ScheduleHandler_smartsheet(
handle=handle,
options={'smartsheet_token': api_token}
)
return handler
| 31.509804
| 88
| 0.645924
| 193
| 1,607
| 5.15544
| 0.388601
| 0.055276
| 0.036181
| 0.030151
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.22962
| 1,607
| 50
| 89
| 32.14
| 0.803716
| 0
| 0
| 0
| 0
| 0
| 0.191039
| 0.065339
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.135135
| 0
| 0.243243
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
41122da858230ceb4c96eb4a8c7375d59b77bc28
| 8,148
|
py
|
Python
|
kotti/testing.py
|
mete0r/Kotti
|
e89103cc57d5d2af8d60eb8208ae9d04c068f6e7
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
kotti/testing.py
|
mete0r/Kotti
|
e89103cc57d5d2af8d60eb8208ae9d04c068f6e7
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
kotti/testing.py
|
mete0r/Kotti
|
e89103cc57d5d2af8d60eb8208ae9d04c068f6e7
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Inheritance Diagram
-------------------
.. inheritance-diagram:: kotti.testing
"""
import os
from os.path import join, dirname
from unittest import TestCase
from pytest import mark
from pyramid import testing
from pyramid.events import NewResponse
from pyramid.security import ALL_PERMISSIONS
from zope.deprecation.deprecation import deprecate
import transaction
# re-enable deprecation warnings during test runs
# however, let the `ImportWarning` produced by Babel's
# `localedata.py` vs `localedata/` show up once...
from warnings import catch_warnings
with catch_warnings():
from babel import localedata
import compiler
localedata, compiler # make pyflakes happy... :p
# py.test markers (see http://pytest.org/latest/example/markers.html)
user = mark.user
BASE_URL = 'http://localhost:6543'
class Dummy(dict):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class DummyRequest(testing.DummyRequest):
is_xhr = False
POST = dict()
user = None
referrer = None
def is_response(self, ob):
return (hasattr(ob, 'app_iter') and hasattr(ob, 'headerlist') and
hasattr(ob, 'status'))
def asset(name):
import kotti
return open(join(dirname(kotti.__file__), 'tests', name), 'rb')
def includeme_login(config):
config.add_view(
login_view,
name='login',
renderer='kotti:templates/login.pt')
def includeme_layout(config):
# override edit master layout with view master layout
config.override_asset(
to_override='kotti:templates/edit/master.pt',
override_with='kotti:templates/view/master.pt')
def login_view(request):
return {}
def dummy_search(search_term, request):
return u"Not found. Sorry!"
def testing_db_url():
return os.environ.get('KOTTI_TEST_DB_STRING', 'sqlite://')
def _initTestingDB():
from sqlalchemy import create_engine
from kotti import get_settings
from kotti.resources import initialize_sql
database_url = testing_db_url()
get_settings()['sqlalchemy.url'] = database_url
session = initialize_sql(create_engine(database_url), drop_all=True)
return session
def _populator():
from kotti import DBSession
from kotti.resources import Document
from kotti.populate import populate
populate()
for doc in DBSession.query(Document)[1:]:
DBSession.delete(doc)
transaction.commit()
def _turn_warnings_into_errors(): # pragma: no cover
# turn all warnings into errors, but let the `ImportWarning`
# produced by Babel's `localedata.py` vs `localedata/` show up once...
from babel import localedata
localedata # make pyflakes happy... :p
from warnings import filterwarnings
filterwarnings("error")
def setUp(init_db=True, **kwargs):
# _turn_warnings_into_errors()
from kotti import _resolve_dotted
from kotti import conf_defaults
tearDown()
settings = conf_defaults.copy()
settings['kotti.secret'] = 'secret'
settings['kotti.secret2'] = 'secret2'
settings['kotti.populators'] = 'kotti.testing._populator'
settings.update(kwargs.get('settings', {}))
settings = _resolve_dotted(settings)
kwargs['settings'] = settings
config = testing.setUp(**kwargs)
config.add_default_renderers()
if init_db:
_initTestingDB()
transaction.begin()
return config
def tearDown():
from kotti import events
from kotti import security
from kotti.message import _inject_mailer
# These should arguable use the configurator, so they don't need
# to be torn down separately:
events.clear()
security.reset()
_inject_mailer[:] = []
transaction.abort()
testing.tearDown()
class UnitTestBase(TestCase):
def setUp(self, **kwargs):
self.config = setUp(**kwargs)
def tearDown(self):
tearDown()
class EventTestBase(TestCase):
def setUp(self, **kwargs):
super(EventTestBase, self).setUp(**kwargs)
self.config.include('kotti.events')
# Functional ----
def _functional_includeme(config):
from kotti import DBSession
def expire(event):
DBSession.flush()
DBSession.expire_all()
config.add_subscriber(expire, NewResponse)
def _zope_testbrowser_pyquery(self):
from pyquery import PyQuery
return PyQuery(
self.contents.replace('xmlns="http://www.w3.org/1999/xhtml', ''))
def setUpFunctional(global_config=None, **settings):
from kotti import main
import wsgi_intercept.zope_testbrowser
from webtest import TestApp
tearDown()
_settings = {
'sqlalchemy.url': testing_db_url(),
'kotti.secret': 'secret',
'kotti.site_title': 'Website des Kottbusser Tors', # for mailing
'kotti.populators': 'kotti.testing._populator',
'mail.default_sender': 'kotti@localhost',
'pyramid.includes': 'kotti.testing._functional_includeme',
}
_settings.update(settings)
host, port = BASE_URL.split(':')[-2:]
app = main({}, **_settings)
wsgi_intercept.add_wsgi_intercept(host[2:], int(port), lambda: app)
Browser = wsgi_intercept.zope_testbrowser.WSGI_Browser
Browser.pyquery = property(_zope_testbrowser_pyquery)
return dict(
Browser=Browser,
browser=Browser(),
test_app=TestApp(app),
)
class FunctionalTestBase(TestCase):
BASE_URL = BASE_URL
def setUp(self, **kwargs):
self.__dict__.update(setUpFunctional(**kwargs))
def tearDown(self):
tearDown()
def login(self, login=u'admin', password=u'secret'):
return self.test_app.post(
'/@@login',
{'login': login, 'password': password, 'submit': 'submit'},
status=302,
)
@deprecate('login_testbrowser is deprecated as of Kotti 0.7. Please use '
'the `browser` funcarg in conjunction with the `@user` '
'decorator.')
def login_testbrowser(self, login=u'admin', password=u'secret'):
browser = self.Browser()
browser.open(BASE_URL + '/edit')
browser.getControl("Username or email").value = login
browser.getControl("Password").value = password
browser.getControl(name="submit").click()
return browser
class TestingRootFactory(dict):
__name__ = '' # root is required to have an empty name!
__parent__ = None
__acl__ = [('Allow', 'role:admin', ALL_PERMISSIONS)]
def __init__(self, request):
super(TestingRootFactory, self).__init__()
def dummy_view(context, request):
return {}
def include_testing_view(config):
config.add_view(
dummy_view,
context=TestingRootFactory,
renderer='kotti:tests/testing_view.pt',
)
config.add_view(
dummy_view,
name='secured',
permission='view',
context=TestingRootFactory,
renderer='kotti:tests/testing_view.pt',
)
def setUpFunctionalStrippedDownApp(global_config=None, **settings):
# An app that doesn't use Nodes at all
_settings = {
'kotti.base_includes': (
'kotti kotti.views kotti.views.login kotti.views.users'),
'kotti.use_tables': 'principals',
'kotti.populators': 'kotti.populate.populate_users',
'pyramid.includes': 'kotti.testing.include_testing_view',
'kotti.root_factory': 'kotti.testing.TestingRootFactory',
'kotti.site_title': 'My Stripped Down Kotti',
}
_settings.update(settings)
return setUpFunctional(global_config, **_settings)
def registerDummyMailer():
from pyramid_mailer.mailer import DummyMailer
from kotti.message import _inject_mailer
mailer = DummyMailer()
_inject_mailer.append(mailer)
return mailer
# set up deprecation warnings
from zope.deprecation.deprecation import deprecated
for item in UnitTestBase, EventTestBase, FunctionalTestBase, _initTestingDB:
name = getattr(item, '__name__', item)
deprecated(name, 'Unittest-style tests are deprecated as of Kotti 0.7. '
'Please use pytest function arguments instead.')
| 26.627451
| 78
| 0.676976
| 934
| 8,148
| 5.729122
| 0.30621
| 0.021865
| 0.022426
| 0.010092
| 0.151
| 0.084844
| 0.072136
| 0.060923
| 0.04971
| 0.027285
| 0
| 0.003426
| 0.211831
| 8,148
| 305
| 79
| 26.714754
| 0.829804
| 0.100884
| 0
| 0.149254
| 0
| 0
| 0.16863
| 0.043288
| 0
| 0
| 0
| 0
| 0
| 1
| 0.144279
| false
| 0.019901
| 0.169154
| 0.029851
| 0.452736
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4113d55c3875b03d32cbc830fabbfbb2cdd11046
| 694
|
py
|
Python
|
leetcode/trees/level-order.py
|
vtemian/interviews-prep
|
ddef96b5ecc699a590376a892a804c143fe18034
|
[
"Apache-2.0"
] | 8
|
2019-05-14T12:50:29.000Z
|
2022-03-01T09:08:27.000Z
|
leetcode/trees/level-order.py
|
vtemian/interviews-prep
|
ddef96b5ecc699a590376a892a804c143fe18034
|
[
"Apache-2.0"
] | 46
|
2019-03-24T20:59:29.000Z
|
2019-04-09T16:28:43.000Z
|
leetcode/trees/level-order.py
|
vtemian/interviews-prep
|
ddef96b5ecc699a590376a892a804c143fe18034
|
[
"Apache-2.0"
] | 1
|
2022-01-28T12:46:29.000Z
|
2022-01-28T12:46:29.000Z
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def levelOrder(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
result = []
queue = [(root, 0)]
while queue:
node, level = queue.pop(0)
if len(result) <= level:
result.append([])
result[level].append(node.val)
if node.left:
queue.append((node.left, level + 1))
if node.right:
queue.append((node.right, level + 1))
return result
| 22.387097
| 60
| 0.487032
| 77
| 694
| 4.337662
| 0.428571
| 0.08982
| 0.08982
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009479
| 0.391931
| 694
| 30
| 61
| 23.133333
| 0.781991
| 0.214697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
411e7327dfc8f59a57e3065bd00dbadcb1b1f18c
| 302
|
py
|
Python
|
mkdir.py
|
FunsomMars/Timg
|
216c994fd0b100996e72f4cda4eace369c8452ef
|
[
"MIT"
] | null | null | null |
mkdir.py
|
FunsomMars/Timg
|
216c994fd0b100996e72f4cda4eace369c8452ef
|
[
"MIT"
] | null | null | null |
mkdir.py
|
FunsomMars/Timg
|
216c994fd0b100996e72f4cda4eace369c8452ef
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019-07-23 22:47
# @Author : Simon Meng
# @Site :
# @File : mkdir.py
# @Software: PyCharm
import os
# Make a folder under the current path
def mkdir(path):
folder = os.path.exists(path)
if not folder:
os.makedirs(path)
| 17.764706
| 38
| 0.609272
| 45
| 302
| 4.088889
| 0.8
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06087
| 0.238411
| 302
| 16
| 39
| 18.875
| 0.73913
| 0.586093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
411fa137c5df36c387a70295ace27f0afc3352fe
| 2,183
|
py
|
Python
|
scripts/create-opencl-headers.py
|
molkoback/icemet-server
|
9d7a29b38c711534923952d598fc37efff5db154
|
[
"MIT"
] | null | null | null |
scripts/create-opencl-headers.py
|
molkoback/icemet-server
|
9d7a29b38c711534923952d598fc37efff5db154
|
[
"MIT"
] | null | null | null |
scripts/create-opencl-headers.py
|
molkoback/icemet-server
|
9d7a29b38c711534923952d598fc37efff5db154
|
[
"MIT"
] | 1
|
2020-09-16T15:33:23.000Z
|
2020-09-16T15:33:23.000Z
|
import os
import sys
header_file_fmt = "{name}_ocl.hpp"
header_string = (
"#ifndef {definition}_OCL_HPP\n"
"#define {definition}_OCL_HPP\n"
"#include <opencv2/core/ocl.hpp>\n"
"const cv::ocl::ProgramSource& {module}_{name}_ocl() {{\n"
"static cv::ocl::ProgramSource source(\"{module}\", \"{name}\", \"{kernel}\", \"\");\n"
"return source;\n"
"}}\n"
"#endif\n"
)
def clear_between(string, del1, del2):
pos1 = string.find(del1)
if pos1 < 0:
return string
pos2 = string[pos1:].find(del2) + pos1
if pos2 < 0:
return string
return string.replace(string[pos1:pos2+len(del2)], "")
def clear_all(string, del1, del2):
while True:
cleared = clear_between(string, del1, del2)
if string == cleared:
return string
string = cleared
def clear_repeating(string, tok):
while True:
cleared = string.replace(tok+tok, tok)
if string == cleared:
return string
string = cleared
def compress(code):
code = clear_all(code, "/*", "*/")
code = clear_all(code, "//", "\n")
code = code.replace("\n", "\\n")
code = code.replace("\t", "")
code = code.replace("\"", "\\\"")
code = clear_repeating(code, " ")
code = clear_repeating(code, "\\n")
return code
def create_header_file(kernel_path, header_path):
with open(kernel_path) as fp:
kernel = compress(fp.read())
base = os.path.splitext(os.path.basename(kernel_path))[0]
module, name = base.split("_")
data = header_string.format(
definition=base.upper(),
module=module,
name=name,
kernel=kernel
)
with open(header_path, "w") as fp:
fp.write(data)
def create_headers(kernel_dir, header_dir):
for kernel_file in os.listdir(kernel_dir):
kernel_path = os.path.join(kernel_dir, kernel_file)
if os.path.isfile(kernel_path) and kernel_file.endswith(".cl"):
header_file = header_file_fmt.format(name=os.path.splitext(kernel_file)[0])
header_path = os.path.join(header_dir, header_file)
create_header_file(kernel_path, header_path)
print("-- Created {}".format(header_file))
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: {} <kernel_dir> <header_dir>".format(sys.argv[0]))
sys.exit(1)
os.makedirs(sys.argv[2], exist_ok=True)
create_headers(sys.argv[1], sys.argv[2])
| 27.987179
| 87
| 0.685295
| 322
| 2,183
| 4.456522
| 0.263975
| 0.04878
| 0.014634
| 0.023693
| 0.171429
| 0.110105
| 0.110105
| 0.05993
| 0
| 0
| 0
| 0.014815
| 0.134219
| 2,183
| 77
| 88
| 28.350649
| 0.744444
| 0
| 0
| 0.144928
| 0
| 0
| 0.148878
| 0.061383
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.028986
| 0
| 0.202899
| 0.028986
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
412189bdca83add7a6eee8aca45c35007f4cbdb4
| 256
|
py
|
Python
|
models/mail_message.py
|
billhepeng/wx_tools
|
64369531bd76a935eff547c50ff68150a240849d
|
[
"Apache-2.0"
] | 1
|
2021-01-19T02:49:14.000Z
|
2021-01-19T02:49:14.000Z
|
models/mail_message.py
|
billhepeng/wx_tools
|
64369531bd76a935eff547c50ff68150a240849d
|
[
"Apache-2.0"
] | null | null | null |
models/mail_message.py
|
billhepeng/wx_tools
|
64369531bd76a935eff547c50ff68150a240849d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class MailMessage(models.Model):
_inherit = 'mail.message'
weixin_id = fields.Char('微信ID', required=False)
| 21.333333
| 74
| 0.703125
| 35
| 256
| 5.085714
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004762
| 0.179688
| 256
| 11
| 75
| 23.272727
| 0.842857
| 0.367188
| 0
| 0
| 0
| 0
| 0.101266
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4122f41a65d52a80ce0e4e61b3b52bf36d00d875
| 3,143
|
py
|
Python
|
concerned-coyotes/earlyinternet/news/tests.py
|
Vthechamp22/summer-code-jam-2021
|
0a8bf1f22f6c73300891fd779da36efd8e1304c1
|
[
"MIT"
] | 40
|
2020-08-02T07:38:22.000Z
|
2021-07-26T01:46:50.000Z
|
concerned-coyotes/earlyinternet/news/tests.py
|
Vthechamp22/summer-code-jam-2021
|
0a8bf1f22f6c73300891fd779da36efd8e1304c1
|
[
"MIT"
] | 134
|
2020-07-31T12:15:45.000Z
|
2020-12-13T04:42:19.000Z
|
concerned-coyotes/earlyinternet/news/tests.py
|
Vthechamp22/summer-code-jam-2021
|
0a8bf1f22f6c73300891fd779da36efd8e1304c1
|
[
"MIT"
] | 101
|
2020-07-31T12:00:47.000Z
|
2021-11-01T09:06:58.000Z
|
import datetime
import random
from django.test import TestCase
from django.utils.dateparse import parse_datetime
from .models import Article
class ArticleTestCase(TestCase):
def setUp(self) -> None:
self.article = Article.objects.create(
source="HackerNews",
author="Guido van Rossum",
title="Why Python is such a nice language",
description="...",
content="...",
url="http://python.org/",
published_at=datetime.datetime(2020, 1, 1, 12, 0)
)
def test_representation(self):
""" Test if Article.__str__ works correctly """
self.assertEqual(
str(self.article),
"Why Python is such a nice language 2020-01-01T12:00:00"
)
def test_article_manager_create_article(self):
"""
Test if Article.objects.create_article works correctly
:return:
"""
article = {
'source': {'id': 'news-com-au', 'name': 'News.com.au'},
'author': 'unknown',
'title': 'F1 British Grand Prix live: updates, results, starting grid, Vettel reacts to Ferrari sabotage '
'questions',
'description': 'The British Grand Prix has ended in incredible drama as the last lap went down to the '
'wire with Lewis Hamilton winning after his tyre blew on the last lap.',
'url': 'https://www.news.com.au/sport/motorsport/formula-one/live-updates-from-the-2020-british-grand'
'-prix/live-coverage/ba297f46d4e91321c092db9d3d5d2e1f',
'urlToImage': 'https://content.api.news/v3/images/bin/2554ff2213b5c8a54e9809d310e697db',
'publishedAt': '2020-08-02T22:04:07Z',
'content': '...'
}
created = Article.objects.create_article(article)
self.assertEqual(article['source']['name'], created.source)
self.assertEqual('unknown', created.author)
self.assertEqual(article['title'], created.title)
self.assertEqual(article['description'], created.description)
self.assertEqual(article['url'], created.url)
self.assertEqual(parse_datetime(article['publishedAt']), created.published_at)
self.assertEqual('...', created.content)
def test_article_manager_get_latest(self):
""" Test Article.objects.get_latest """
# create 10 articles
articles = [self.article]
for i in range(9):
year = random.randrange(1900, 2020)
month = random.randrange(1, 12)
day = random.randrange(1, 28)
hour = random.randrange(1, 24)
article = Article.objects.create(
source="", author="", title=str(i), description="", content="", url="http://example.org/",
published_at=datetime.datetime(year, month, day, hour)
)
articles.append(article)
# sort articles
articles.sort(key=lambda x: x.published_at, reverse=True)
self.assertEqual(
articles[:4],
list(Article.objects.get_latest(4))
)
| 39.2875
| 118
| 0.598473
| 340
| 3,143
| 5.464706
| 0.423529
| 0.072659
| 0.043057
| 0.029064
| 0.097955
| 0.03014
| 0.03014
| 0
| 0
| 0
| 0
| 0.046717
| 0.278078
| 3,143
| 79
| 119
| 39.78481
| 0.772146
| 0.054407
| 0
| 0.033333
| 0
| 0.016667
| 0.272976
| 0.017766
| 0
| 0
| 0
| 0
| 0.15
| 1
| 0.066667
| false
| 0
| 0.083333
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
412b47d093592288c113a1eac3194f68134c0446
| 11,406
|
py
|
Python
|
data/transforms.py
|
raja21068/Federated-Learning-For-Medical-Images
|
aa30ce9d8106fd4039188fc56fa99bdc9f46f0e0
|
[
"MIT"
] | 27
|
2021-03-05T05:56:35.000Z
|
2022-03-30T03:15:43.000Z
|
data/transforms.py
|
DiahannWu/FL-MRCM
|
946c981a044452333791b7da26609c0874da292c
|
[
"MIT"
] | 8
|
2021-03-08T10:41:19.000Z
|
2021-12-30T04:53:21.000Z
|
data/transforms.py
|
DiahannWu/FL-MRCM
|
946c981a044452333791b7da26609c0874da292c
|
[
"MIT"
] | 5
|
2021-03-28T14:02:30.000Z
|
2022-01-11T08:31:42.000Z
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import torch
def to_tensor(data):
"""
Convert numpy array to PyTorch tensor. For complex arrays, the real and imaginary parts
are stacked along the last dimension.
Args:
data (np.array): Input numpy array
Returns:
torch.Tensor: PyTorch version of data
"""
if np.iscomplexobj(data):
data = np.stack((data.real, data.imag), axis=-1)
return torch.from_numpy(data)
def to_numpy(data):
"""
Convert PyTorch tensor to numpy array. For complex tensor with two channels, the complex numpy arrays are used.
Args:
data (torch.Tensor): Input torch tensor
Returns:
np.array numpy arrays
"""
if data.shape[-1] == 2:
out = np.zeros(data.shape[:-1], dtype=np.complex64)
real = data[..., 0].numpy()
imag = data[..., 1].numpy()
out.real = real
out.imag = imag
else:
out = data.numpy()
return out
def apply_mask(data, mask_func, seed=None):
"""
Subsample given k-space by multiplying with a mask.
Args:
data (torch.Tensor): The input k-space data. This should have at least 3 dimensions, where
dimensions -3 and -2 are the spatial dimensions, and the final dimension has size
2 (for complex values).
mask_func (callable): A function that takes a shape (tuple of ints) and a random
number seed and returns a mask.
seed (int or 1-d array_like, optional): Seed for the random number generator.
Returns:
(tuple): tuple containing:
masked data (torch.Tensor): Subsampled k-space data
mask (torch.Tensor): The generated mask
"""
shape = np.array(data.shape)
shape[:-3] = 1
mask = mask_func(shape, seed)
return data * mask, mask
def fft2(data, normalized=True):
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The FFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = torch.fft(data, 2, normalized=normalized)
data = fftshift(data, dim=(-3, -2))
return data
def rfft2(data):
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The FFT of the input.
"""
data = ifftshift(data, dim=(-2, -1))
data = torch.rfft(data, 2, normalized=True, onesided=False)
data = fftshift(data, dim=(-3, -2))
return data
def ifft2(data, normalized=True):
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The IFFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = torch.ifft(data, 2, normalized=normalized)
data = fftshift(data, dim=(-3, -2))
return data
def irfft2(data):
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The IFFT of the input.
"""
data = ifftshift(data, dim=(-3, -2))
data = torch.irfft(data, 2, normalized=True, onesided=False)
data = fftshift(data, dim=(-2, -1))
return data
def complex_to_mag_phase(data):
"""
:param data (torch.Tensor): A complex valued tensor, where the size of the third last dimension should be 2
:return: Mag and Phase (torch.Tensor): tensor of same size as input
"""
assert data.size(-3) == 2
mag = (data ** 2).sum(dim=-3).sqrt()
phase = torch.atan2(data[:, 1, :, :], data[:, 0, :, :])
return torch.stack((mag, phase), dim=-3)
def mag_phase_to_complex(data):
"""
:param data (torch.Tensor): Mag and Phase (torch.Tensor):
:return: A complex valued tensor, where the size of the third last dimension is 2
"""
assert data.size(-3) == 2
real = data[:, 0, :, :] * torch.cos(data[:, 1, :, :])
imag = data[:, 0, :, :] * torch.sin(data[:, 1, :, :])
return torch.stack((real, imag), dim=-3)
def partial_fourier(data):
"""
:param data:
:return:
"""
def complex_abs(data):
"""
Compute the absolute value of a complex valued input tensor.
Args:
data (torch.Tensor): A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
torch.Tensor: Absolute value of data
"""
assert data.size(-1) == 2 or data.size(-3) == 2
return (data ** 2).sum(dim=-1).sqrt() if data.size(-1) == 2 else (data ** 2).sum(dim=-3).sqrt()
def root_sum_of_squares(data, dim=0):
"""
Compute the Root Sum of Squares (RSS) transform along a given dimension of a tensor.
Args:
data (torch.Tensor): The input tensor
dim (int): The dimensions along which to apply the RSS transform
Returns:
torch.Tensor: The RSS value
"""
return torch.sqrt((data ** 2).sum(dim))
def center_crop(data, shape):
"""
Apply a center crop to the input real image or batch of real images.
Args:
data (torch.Tensor): The input tensor to be center cropped. It should have at
least 2 dimensions and the cropping is applied along the last two dimensions.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-2]
assert 0 < shape[1] <= data.shape[-1]
w_from = (data.shape[-2] - shape[0]) // 2
h_from = (data.shape[-1] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to]
def complex_center_crop(data, shape):
"""
Apply a center crop to the input image or batch of complex images.
Args:
data (torch.Tensor): The complex input tensor to be center cropped. It should
have at least 3 dimensions and the cropping is applied along dimensions
-3 and -2 and the last dimensions should have a size of 2.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-3]
assert 0 < shape[1] <= data.shape[-2]
w_from = (data.shape[-3] - shape[0]) // 2
h_from = (data.shape[-2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to, :]
def normalize(data, mean, stddev, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
Args:
data (torch.Tensor): Input data to be normalized
mean (float): Mean value
stddev (float): Standard deviation
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
return (data - mean) / (stddev + eps)
def normalize_instance(data, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
where mean and stddev are computed from the data itself.
Args:
data (torch.Tensor): Input data to be normalized
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
mean = data.mean()
std = data.std()
return normalize(data, mean, std, eps), mean, std
def normalize_volume(data, mean, std, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
where mean and stddev are provided and computed from volume.
Args:
data (torch.Tensor): Input data to be normalized
mean: mean of whole volume
std: std of whole volume
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
return normalize(data, mean, std, eps), mean, std
def normalize_complex(data, eps=0.):
"""
Normalize the given complex tensor using:
(data - mean) / (stddev + eps)
where mean and stddev are computed from magnitude of data.
Note that data is centered by complex mean so that the result centered data have average zero magnitude.
Args:
data (torch.Tensor): Input data to be normalized (*, 2)
mean: mean of image magnitude
std: std of image magnitude
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized complex tensor with 2 channels (*, 2)
"""
mag = complex_abs(data)
mag_mean = mag.mean()
mag_std = mag.std()
temp = mag_mean/mag
mean_real = data[..., 0] * temp
mean_imag = data[..., 1] * temp
mean_complex = torch.stack((mean_real, mean_imag), dim=-1)
stddev = mag_std
return (data - mean_complex) / (stddev + eps), mag_mean, stddev
# Helper functions
def roll(x, shift, dim):
"""
Similar to np.roll but applies to PyTorch Tensors
"""
if isinstance(shift, (tuple, list)):
assert len(shift) == len(dim)
for s, d in zip(shift, dim):
x = roll(x, s, d)
return x
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
def fftshift(x, dim=None):
"""
Similar to np.fft.fftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [dim // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = x.shape[dim] // 2
else:
shift = [x.shape[i] // 2 for i in dim]
return roll(x, shift, dim)
def ifftshift(x, dim=None):
"""
Similar to np.fft.ifftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [(dim + 1) // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = (x.shape[dim] + 1) // 2
else:
shift = [(x.shape[i] + 1) // 2 for i in dim]
return roll(x, shift, dim)
| 29.703125
| 115
| 0.608276
| 1,605
| 11,406
| 4.287227
| 0.135826
| 0.054353
| 0.037059
| 0.038657
| 0.580003
| 0.552681
| 0.515332
| 0.475658
| 0.471152
| 0.456765
| 0
| 0.018403
| 0.280642
| 11,406
| 383
| 116
| 29.780679
| 0.820232
| 0.521831
| 0
| 0.268293
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081301
| 1
| 0.170732
| false
| 0
| 0.01626
| 0
| 0.365854
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5aba0aa3a1bda30d3d5e14338fb55d72ab3b386
| 1,883
|
py
|
Python
|
b5/lib/state.py
|
team23/b5
|
90f45e86966eeb7a259667bbe06a5555648d012d
|
[
"BSD-3-Clause"
] | 14
|
2018-11-24T23:33:35.000Z
|
2022-02-04T23:46:49.000Z
|
b5/lib/state.py
|
team23/b5
|
90f45e86966eeb7a259667bbe06a5555648d012d
|
[
"BSD-3-Clause"
] | 3
|
2020-02-10T11:05:11.000Z
|
2020-03-04T08:42:11.000Z
|
b5/lib/state.py
|
team23/b5
|
90f45e86966eeb7a259667bbe06a5555648d012d
|
[
"BSD-3-Clause"
] | 1
|
2020-02-11T19:45:13.000Z
|
2020-02-11T19:45:13.000Z
|
import os
import tempfile
from types import TracebackType
from typing import Any, BinaryIO, Optional, TextIO, Type, Union
import yaml
class StoredState:
def __init__(self, state: "State") -> None:
self.state = state
if self.state.stored_name is not None:
raise RuntimeError('You may only store the state once')
self.file_handle = tempfile.NamedTemporaryFile(suffix='b5-state', mode='w', encoding='utf-8', delete=False)
self.state.stored_name = self.name
yaml.dump({
key: getattr(self.state, key)
for key in state.KEYS
}, self.file_handle, default_flow_style=False)
self.file_handle.close()
def close(self) -> None:
os.unlink(self.file_handle.name)
self.state.stored_name = None
def __enter__(self) -> "StoredState":
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
self.close()
@property
def name(self) -> str:
return self.file_handle.name
class State:
KEYS = ('project_path', 'run_path', 'taskfiles', 'configfiles', 'config', 'args', 'stored_name')
taskfiles = []
configfiles = []
args = {}
def __init__(self, **kwargs: Any) -> None:
for key in self.KEYS:
if not hasattr(self, key):
setattr(self, key, None)
for key in kwargs:
if key not in self.KEYS:
raise RuntimeError('Key %s is not a valid state attribute' % key)
setattr(self, key, kwargs[key])
def stored(self) -> StoredState:
return StoredState(self)
@classmethod
def load(cls, file_handle: Union[BinaryIO, TextIO]) -> "State":
return cls(**yaml.safe_load(file_handle))
| 28.969231
| 115
| 0.60701
| 225
| 1,883
| 4.933333
| 0.368889
| 0.063063
| 0.063063
| 0.051351
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001476
| 0.280404
| 1,883
| 64
| 116
| 29.421875
| 0.817712
| 0
| 0
| 0
| 0
| 0
| 0.088157
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16
| false
| 0
| 0.1
| 0.08
| 0.46
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5ac35c88920717e7f434d347b3a61d75f1b9fd5
| 2,711
|
py
|
Python
|
lines_ext.py
|
subhrajit02/handwritten-digit-recognision
|
239a4bd1283393865d2655b91ad4674ce8450882
|
[
"MIT"
] | null | null | null |
lines_ext.py
|
subhrajit02/handwritten-digit-recognision
|
239a4bd1283393865d2655b91ad4674ce8450882
|
[
"MIT"
] | null | null | null |
lines_ext.py
|
subhrajit02/handwritten-digit-recognision
|
239a4bd1283393865d2655b91ad4674ce8450882
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
def rem_multi_lines(lines, thresh):
"""
to remove the multiple lines with close proximity
:param lines: initial list with all the lines(multiple in place of singular)
:param thresh: dist between two lines for them to be considered as same
:return: final list with singular lines in place of multiple
"""
a = []
i = 0
lines.append([800, 0]) # random val/ noise
out = []
# this loop collects lines with close proximity in a list (a) and then appends that
# complete list in a common list called out.
while i < len(lines) - 1:
if lines[i] not in a:
a.append(lines[i])
if abs(lines[i + 1][0] - lines[i][0]) < thresh:
a.append(lines[i + 1])
else:
out.append(a)
a = []
i += 1
# print(out)
final = []
for i in out:
a = np.array(i)
final.append(np.average(a, axis=0))
# print(final)
for i in final.copy():
if i[0] < 0:
final.remove(i)
return final
def draw_r_theta_lines(img, lines, color):
"""
draw lines on image which are of (r, theta) form
:param img: image to draw the lines on
:param lines: list of lines on the form (r, theta)
:param color: color of lines
:return:
"""
for rho, theta in lines:
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * a)
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * a)
cv2.line(img, (x1, y1), (x2, y2), color, 2)
def lines_ext(img, hough_thresh, multilines_thresh):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 45, 10)
line_image = img.copy()
lines = cv2.HoughLines(edges, 1, np.pi / 180, hough_thresh)
lines = lines.reshape(lines.shape[0], 2)
draw_r_theta_lines(line_image, lines, (0, 0, 255))
lines = sorted(lines, key=lambda x: x[0])
cv2.imshow("lines", line_image)
cv2.waitKey(0)
l1 = list(lines)
l2 = []
for i in l1:
l2.append(list(i))
v_lines = []
h_lines = []
for i in l2:
if round(i[1]) == 0:
v_lines.append(i)
elif round(i[1]) > 0.5:
h_lines.append(i)
# print('v:', v_lines)
# print('h:', h_lines)
v_lines = rem_multi_lines(v_lines, multilines_thresh)
h_lines = rem_multi_lines(h_lines, multilines_thresh)
final = v_lines + h_lines
draw_r_theta_lines(line_image, final, (0, 255, 0))
cv2.imshow("lines1", line_image)
cv2.waitKey(0)
return v_lines, h_lines
| 25.101852
| 88
| 0.560679
| 412
| 2,711
| 3.589806
| 0.291262
| 0.028398
| 0.016227
| 0.030426
| 0.0595
| 0.032454
| 0
| 0
| 0
| 0
| 0
| 0.048518
| 0.315751
| 2,711
| 107
| 89
| 25.336449
| 0.748787
| 0.239395
| 0
| 0.063492
| 0
| 0
| 0.005508
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.031746
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5acb14365decf5cb2d85dfdb8cc3ac0e9ffe41f
| 1,553
|
py
|
Python
|
examples/wmt/tools/scorer/nlm.py
|
godweiyang/ParaGen
|
9665d1244ea38a41fc06b4e0a7f6411985e2221f
|
[
"Apache-2.0"
] | 50
|
2022-01-18T07:25:46.000Z
|
2022-03-14T13:06:18.000Z
|
examples/wmt/tools/scorer/nlm.py
|
JiangtaoFeng/ParaGen
|
509334bf16e3674e009bb9dc37ecc38ae3b5c977
|
[
"Apache-2.0"
] | 2
|
2022-01-19T09:36:42.000Z
|
2022-02-23T07:16:02.000Z
|
examples/wmt/tools/scorer/nlm.py
|
JiangtaoFeng/ParaGen
|
509334bf16e3674e009bb9dc37ecc38ae3b5c977
|
[
"Apache-2.0"
] | 6
|
2022-01-19T09:28:53.000Z
|
2022-03-10T10:20:08.000Z
|
# Before running this command, you should firstly run:
# pip install fairseq
# pip install fastBPE
# wget https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.en.tar.gz
# tar zxvf wmt19.en.tar.gz
import argparse
from itertools import islice
import numpy as np
from fairseq.models.transformer_lm import TransformerLanguageModel
parser = argparse.ArgumentParser()
parser.add_argument('--hypo_filename', metavar='N', type=str, help='hypo_filename')
parser.add_argument('--out_filename', metavar='N', type=str, help='out_filename')
# parser.add_argument('--num_candidates', type=int, help="num_candidates")
args, unknown = parser.parse_known_args()
en_lm = TransformerLanguageModel.from_pretrained('wmt19.en', 'model.pt', tokenizer='moses', bpe='fastbpe')
en_lm.cuda()
num_processed = 0
ppl = []
batch_num = 1000
with open(args.hypo_filename, 'r') as f, open(args.out_filename, 'w') as out:
while True:
n_lines = list(map(lambda x: x.strip(), islice(f, batch_num)))
if len(n_lines) == 0:
break
for ele in en_lm.score(n_lines, beam=1):
ppl.append(float(ele['positional_scores'].mean().neg().exp().item()))
num_processed += batch_num
print(f"Processed {num_processed}")
ppl = np.array(ppl)
ppl = np.nan_to_num(ppl, nan=np.nanmax(ppl))
# scores = 1 - ppl/ppl.max()
# for ele in zip(ppl.tolist(), scores.tolist()):
# out.write(f"{np.log(ele[0])}, {ele[0]}, {ele[1]}\n")
ppl = np.array(ppl)
for ele in ppl.tolist():
out.write(f"{np.log(ele)}\n")
| 36.116279
| 106
| 0.676755
| 234
| 1,553
| 4.363248
| 0.448718
| 0.020568
| 0.049951
| 0.023506
| 0.097943
| 0.097943
| 0.045054
| 0
| 0
| 0
| 0
| 0.013067
| 0.162267
| 1,553
| 42
| 107
| 36.97619
| 0.771714
| 0.251771
| 0
| 0.074074
| 0
| 0
| 0.124132
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.148148
| 0
| 0.148148
| 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5accc4b43ec1556256e37986ed9a579a786c19a
| 2,742
|
py
|
Python
|
aioli_openapi/service.py
|
jimorie/aioli-openapi
|
5a5ea6471d332adc8361ad39af7421e4686811fd
|
[
"MIT"
] | null | null | null |
aioli_openapi/service.py
|
jimorie/aioli-openapi
|
5a5ea6471d332adc8361ad39af7421e4686811fd
|
[
"MIT"
] | null | null | null |
aioli_openapi/service.py
|
jimorie/aioli-openapi
|
5a5ea6471d332adc8361ad39af7421e4686811fd
|
[
"MIT"
] | null | null | null |
import warnings
from apispec import APISpec
from apispec.ext.marshmallow import MarshmallowPlugin
from aioli.service import BaseService
from aioli.controller import BaseHttpController
from aioli.exceptions import NoMatchFound
class OpenApiService(BaseService):
_specs = {}
def oas_schema(self, pkg):
spec = APISpec(
title=pkg.meta["name"].capitalize(),
version=pkg.meta["version"],
openapi_version=self.config["oas_version"],
plugins=[MarshmallowPlugin()],
)
for ctrl in pkg.controllers:
if not isinstance(ctrl, BaseHttpController):
continue
routes = {}
for func, handler in ctrl.handlers:
if not handler.status:
warnings.warn(f"No @returns for {func}, cannot generate OAS3 schema for this handler")
break
abspath = handler.path_full
method = handler.method.lower()
if abspath not in routes:
routes[abspath] = {}
if method not in routes[abspath]:
routes[abspath][method] = dict(
responses={},
parameters=[]
)
route = routes[abspath][method]
responses = route["responses"]
parameters = route["parameters"]
for location, schema_cls in handler.schemas:
if location == "response":
if not schema_cls:
content = {}
else:
content = {"application/json": {"schema": schema_cls}}
responses[handler.status] = dict(
description=None,
content=content
)
elif location in ["path", "query", "header"]:
if not schema_cls:
continue
parameters.append({
"in": location,
"schema": schema_cls
})
spec.path(handler.path_full, operations=routes[abspath])
return spec.to_dict()
async def on_startup(self):
for pkg in self.app.registry.imported:
if not pkg.config["path"]:
continue
self._specs[pkg.meta["name"]] = self.oas_schema(pkg)
async def get_schemas(self, **query):
return self._specs
async def get_schema(self, name):
if name not in self._specs:
raise NoMatchFound
return self._specs[name]
| 31.159091
| 106
| 0.496718
| 243
| 2,742
| 5.522634
| 0.358025
| 0.018629
| 0.016393
| 0.020864
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00063
| 0.420861
| 2,742
| 87
| 107
| 31.517241
| 0.844458
| 0
| 0
| 0.076923
| 0
| 0
| 0.061999
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015385
| false
| 0
| 0.107692
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5afafed15f47453d454c043799fdd7a4422ab1b
| 1,863
|
py
|
Python
|
src_old/tests/delete_migrations.py
|
rishikesh67/django-tenant-oracle-schemas
|
918a64e842b678fc506eadbb4d7e51b0b38ab0a2
|
[
"MIT"
] | null | null | null |
src_old/tests/delete_migrations.py
|
rishikesh67/django-tenant-oracle-schemas
|
918a64e842b678fc506eadbb4d7e51b0b38ab0a2
|
[
"MIT"
] | 8
|
2019-12-04T23:26:11.000Z
|
2022-02-10T09:42:18.000Z
|
src/tests/delete_migrations.py
|
rishikesh67/django-tenant-oracle-schemas
|
918a64e842b678fc506eadbb4d7e51b0b38ab0a2
|
[
"MIT"
] | 2
|
2019-06-26T05:31:16.000Z
|
2019-07-01T12:22:50.000Z
|
import os
import glob
import shutil
import logging
# logging.basicConfig(level=logging.DEBUG)
# DEBUG:root:Skipping file /Users/hygull/Projects/Python3/DjangoTenantOracleSchemas/django-tenant-oracle-schemas/src/tenants/models.py
# logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
# 2019-06-24 16:19:29,898 Skipping file /Users/hygull/Projects/Python3/DjangoTenantOracleSchemas/django-tenant-oracle-schemas/src/manage.py
# logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG, datefmt='%d/%m/%Y %H:%M:%S %p')
# 24/06/2019 04:23:31 PM Skipping file /Users/hygull/Projects/Python3/DjangoTenantOracleSchemas/django-tenant-oracle-schemas/src/manage.py
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG, datefmt='[%d/%m/%Y %H:%M:%S %p] =>')
# 24/06/2019 16:24:02 PM Skipping file /Users/hygull/Projects/Python3/DjangoTenantOracleSchemas/django-tenant-oracle-schemas/src/manage.py
def delete_migrations(
dir_path='/Users/hygull/Projects/Python3/DjangoTenantOracleSchemas/django-tenant-oracle-schemas/',
migrations=True,
pycaches=False,
**kwargs
):
dir_path = os.path.join(os.path.abspath(dir_path))
logging.info(dir_path)
if os.path.isdir(dir_path):
files = os.listdir(dir_path)
for file in files:
abspath = os.path.join(dir_path, file)
if os.path.isdir(abspath):
logging.debug('file ---> {0} {1}'.format(file, pycaches))
if (migrations and file == 'migrations') or (pycaches and file == "__pycache__"):
logging.debug('Found migration as ' + abspath)
shutil.rmtree(abspath)
logging.debug(abspath + ' is removed')
else:
logging.debug('Iteration over -> ' + abspath)
delete_migrations(abspath, pycaches, migrations, **kwargs)
else:
logging.debug('Skipping file ' + abspath)
else:
logging.debug('Path is not a directory')
| 38.8125
| 139
| 0.7343
| 257
| 1,863
| 5.272374
| 0.319066
| 0.088561
| 0.070111
| 0.095941
| 0.505535
| 0.505535
| 0.505535
| 0.505535
| 0.505535
| 0.449446
| 0
| 0.031458
| 0.112721
| 1,863
| 47
| 140
| 39.638298
| 0.788264
| 0.411702
| 0
| 0.1
| 0
| 0
| 0.236431
| 0.079117
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.133333
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5b0b5d5e4ce7c8e9669a43f27a5226a60590d4f
| 6,075
|
py
|
Python
|
qa2nli/converters/processors.py
|
nli-for-qa/conversion
|
588de7fbbcdeb9698fe888b6e3ece7dfadf25238
|
[
"MIT"
] | null | null | null |
qa2nli/converters/processors.py
|
nli-for-qa/conversion
|
588de7fbbcdeb9698fe888b6e3ece7dfadf25238
|
[
"MIT"
] | null | null | null |
qa2nli/converters/processors.py
|
nli-for-qa/conversion
|
588de7fbbcdeb9698fe888b6e3ece7dfadf25238
|
[
"MIT"
] | 1
|
2021-07-04T01:59:56.000Z
|
2021-07-04T01:59:56.000Z
|
from typing import Callable, List, Union, Optional, Dict, Tuple
import re
import spacy
import logging
import math
from enum import Enum
logger = logging.getLogger(__name__)
def remove_excess_space(inp: str) -> str:
return ' '.join(inp.split()).strip()
def get_spacy_model(model: str) -> spacy.language.Model:
try:
spacy_model = spacy.load(model)
except OSError:
logger.warning(
f"Spacy models '{model}' not found. Downloading and installing.")
spacy.cli.download(model)
# Import the downloaded model module directly and load from there
spacy_model_module = __import__(model)
spacy_model = spacy_model_module.load()
return spacy_model
class PreprocessorBase:
"""Override the __call__ method in inherited class to change functionallity"""
def __call__(self, q: str, o: str) -> Tuple[str, Dict]:
""" Very basic preprocessor which concats question and option.
Handles fill in the black type questions.
"""
if '_' in q: # FITB
h = q.replace('_', o)
else:
h = q + ' ' + o
h = remove_excess_space(h)
meta = {'question': q, 'option': o}
return h, meta
Preprocessor = PreprocessorBase
dots = re.compile(r"[\.\'\"\?, ]{2,}[\w ]*")
def remove_dots(inp: str) -> str:
return dots.sub('.', inp)
class ConversionIssue(Enum):
NONE = 'none'
TOO_SHORT = 'too_short'
TOO_LONG = 'too_long'
COULD_NOT_FIX = 'could_not_fix'
INVALID_QUESTION = 'invalid_question'
INVALID_OPTION = 'invalid_option'
MISSING_INFORMATION = 'missing_info'
UNGRAMTICAL_RESULT = 'ungramatical_result'
UNKNOWN = 'unknown'
def __str__(self) -> str:
return self.value
class PostprocessorBase:
def __init__(self,
lower_length_ratio: Optional[float] = None,
upper_length_ratio: float = 1.3) -> None:
self.lower_length_ratio = lower_length_ratio
self.upper_length_ratio = upper_length_ratio
def __call__(self, inp: str, meta: Dict) -> Tuple[str, Dict]:
# if the list does not exists add an empty
meta['conversion_issues'] = meta.get('conversion_issues', [])
return inp, meta
def _length_check(self, output: str, question: str,
option: str) -> ConversionIssue:
total_ratio = (len(output) / (len(question) + len(option)))
if total_ratio > self.upper_length_ratio:
# too long. Cut the output
return ConversionIssue.TOO_LONG
elif self.lower_length_ratio is None and len(output) < len(option):
return ConversionIssue.TOO_SHORT
elif self.lower_length_ratio is not None:
if total_ratio < self.lower_length_ratio:
return ConversionIssue.TOO_SHORT
return ConversionIssue.NONE
class Postprocessor(PostprocessorBase):
def __init__(self,
sentence_splitter: str = 'period',
cleaner: str = None,
lower_length_ratio: float = None,
upper_length_ratio: float = 1.3) -> None:
self.sentence_splitter = sentence_splitter
if cleaner == 'remove_dots':
self.cleaner: Callable[[str], str] = remove_dots
else:
self.cleaner = lambda x: x
if sentence_splitter == 'spacy':
self.spacy_nlp = get_spacy_model('en_core_web_sm')
else:
self.spacy_nlp = None
super().__init__(
lower_length_ratio=lower_length_ratio,
upper_length_ratio=upper_length_ratio)
def _fix_too_short(self, all_sentences: List[str],
meta: Dict) -> Tuple[str, bool]:
next_ = 1
could_not_fix = False
current_output = all_sentences[0]
# add sentences till legth is not too short
max_tries = min(5, len(all_sentences))
length_issue = ConversionIssue.TOO_SHORT
if max_tries == 1:
could_not_fix = True
while length_issue == ConversionIssue.TOO_SHORT and (
not could_not_fix):
current_output = current_output + f" {all_sentences[next_]}"
length_issue = self._length_check(current_output, meta['question'],
meta['option'])
next_ += 1
if next_ >= max_tries:
could_not_fix = True
break
return current_output, could_not_fix
def __call__(self, inp: str, meta: Dict) -> Tuple[str, Dict]:
cleaned = self.cleaner(inp)
if self.sentence_splitter == 'spacy':
sentences = [
s.text.strip() for s in list(self.spacy_nlp(cleaned).sents)
]
first_sent = (sentences[0]).strip()
elif self.sentence_splitter == 'period':
sentences = cleaned.split('.')
first_sent = sentences[0]
meta['all_sentences'] = sentences
output = first_sent
issues_encountered = []
length_issue = self._length_check(output, meta['question'],
meta['option'])
if length_issue == ConversionIssue.TOO_SHORT:
issues_encountered.append(length_issue)
output, could_not_fix = self._fix_too_short(sentences, meta)
if could_not_fix:
issues_encountered.append(ConversionIssue.COULD_NOT_FIX)
# check again
length_issue = self._length_check(output, meta['question'],
meta['option'])
if length_issue == ConversionIssue.TOO_LONG:
issues_encountered.append(length_issue)
output = output[:int(
math.ceil(self.upper_length_ratio *
(len(meta['question']) + len(meta['option']))))]
meta['conversion_issues'] = [
str(issue) for issue in issues_encountered
]
output = remove_excess_space(output)
return output, meta
| 31.806283
| 82
| 0.597366
| 687
| 6,075
| 4.989811
| 0.243086
| 0.054551
| 0.032089
| 0.029172
| 0.222287
| 0.171237
| 0.114061
| 0.094516
| 0.094516
| 0.094516
| 0
| 0.002852
| 0.30749
| 6,075
| 190
| 83
| 31.973684
| 0.81198
| 0.059918
| 0
| 0.150376
| 0
| 0
| 0.066984
| 0.003878
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082707
| false
| 0
| 0.052632
| 0.022556
| 0.323308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5b0c54a48711381cd579c3094b7c9b18f185760
| 2,106
|
py
|
Python
|
trphysx/data_utils/dataset_cylinder.py
|
zabaras/transformer-physx
|
eb28d09957641cc594b3e5acf4ace2e4dc193584
|
[
"MIT"
] | 33
|
2020-10-15T06:43:36.000Z
|
2022-03-24T10:46:12.000Z
|
trphysx/data_utils/dataset_cylinder.py
|
zabaras/transformer-physx
|
eb28d09957641cc594b3e5acf4ace2e4dc193584
|
[
"MIT"
] | 2
|
2021-05-18T14:31:38.000Z
|
2021-07-30T18:18:50.000Z
|
trphysx/data_utils/dataset_cylinder.py
|
zabaras/transformer-physx
|
eb28d09957641cc594b3e5acf4ace2e4dc193584
|
[
"MIT"
] | 6
|
2020-12-01T05:54:01.000Z
|
2022-03-25T21:22:09.000Z
|
"""
=====
Distributed by: Notre Dame SCAI Lab (MIT Liscense)
- Associated publication:
url: https://arxiv.org/abs/2010.03957
doi:
github: https://github.com/zabaras/transformer-physx
=====
"""
import logging
import h5py
import torch
from .dataset_phys import PhysicalDataset
from ..embedding.embedding_model import EmbeddingModel
logger = logging.getLogger(__name__)
class CylinderDataset(PhysicalDataset):
"""Dataset for 2D flow around a cylinder numerical example
"""
def embed_data(self, h5_file: h5py.File, embedder: EmbeddingModel) -> None:
"""Embeds cylinder flow data into a 1D vector representation for the transformer.
Args:
h5_file (h5py.File): HDF5 file object of raw data
embedder (EmbeddingModel): Embedding neural network
"""
# Iterate through stored time-series
samples = 0
embedder.eval()
for key in h5_file.keys():
ux = torch.Tensor(h5_file[key + '/ux'])
uy = torch.Tensor(h5_file[key + '/uy'])
p = torch.Tensor(h5_file[key + '/p'])
data_series = torch.stack([ux, uy, p], dim=1).to(embedder.devices[0])
visc = (2.0 / float(key))*torch.ones(ux.size(0), 1).to(embedder.devices[0])
with torch.no_grad():
embedded_series = embedder.embed(data_series, visc).cpu()
# Stride over time-series
for i in range(0, data_series.size(0) - self.block_size + 1, self.stride): # Truncate in block of block_size
data_series0 = embedded_series[i: i + self.block_size] # .repeat(1, 4)
self.examples.append(data_series0)
if self.eval:
self.states.append(data_series[i: i + self.block_size].cpu())
samples = samples + 1
if (self.ndata > 0 and samples >= self.ndata): # If we have enough time-series samples break loop
break
logger.info(
'Collected {:d} time-series from hdf5 file. Total of {:d} time-series.'.format(samples, len(self.examples))
)
| 37.607143
| 121
| 0.61396
| 269
| 2,106
| 4.710037
| 0.460967
| 0.028414
| 0.030781
| 0.040253
| 0.110497
| 0.033149
| 0
| 0
| 0
| 0
| 0
| 0.025325
| 0.268756
| 2,106
| 55
| 122
| 38.290909
| 0.797403
| 0.283476
| 0
| 0
| 0
| 0
| 0.052957
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.172414
| 0
| 0.241379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5b575448dfd3070de7e8cc30de61a51b143522f
| 927
|
py
|
Python
|
strategies/forest.py
|
aladics/DeepBugHunter
|
564f2417eafc50e99de60d5d6c0a1b4193d1bf8b
|
[
"Apache-2.0"
] | 6
|
2019-03-01T13:17:09.000Z
|
2022-03-07T04:07:04.000Z
|
strategies/forest.py
|
aladics/DeepBugHunter
|
564f2417eafc50e99de60d5d6c0a1b4193d1bf8b
|
[
"Apache-2.0"
] | null | null | null |
strategies/forest.py
|
aladics/DeepBugHunter
|
564f2417eafc50e99de60d5d6c0a1b4193d1bf8b
|
[
"Apache-2.0"
] | 2
|
2020-08-02T07:36:00.000Z
|
2021-01-13T15:04:00.000Z
|
import os
import math
import argparse
import dbh_util as util
from sklearn.ensemble import RandomForestClassifier
parser = argparse.ArgumentParser()
parser.add_argument('--n-estimators', type=int, default=10, help='The number of trees in the forest')
parser.add_argument('--max-depth', type=int, default=5, help='Max decision tree leaf node depth')
parser.add_argument('--criterion', default='gini', help='Split quality criterion, "gini" or "entropy"')
#
# Random Forest approach
#
def predict(classifier, test, args, sargs_str, threshold=None):
sargs = util.parse(parser, sargs_str.split())
preds = classifier.predict(test[0])
if threshold is not None:
preds = [1 if x >= threshold else 0 for x in preds]
return preds
def learn(train, dev, test, args, sargs_str):
sargs = util.parse(parser, sargs_str.split())
return util.sklearn_wrapper(train, dev, test, RandomForestClassifier(**sargs))
| 34.333333
| 103
| 0.73247
| 131
| 927
| 5.114504
| 0.51145
| 0.047761
| 0.076119
| 0.047761
| 0.098507
| 0.098507
| 0.098507
| 0
| 0
| 0
| 0
| 0.007576
| 0.145631
| 927
| 26
| 104
| 35.653846
| 0.838384
| 0.023732
| 0
| 0.111111
| 0
| 0
| 0.166482
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.277778
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5b7476abd3046a860b7d297b7e32e4ae0dcc3db
| 9,476
|
py
|
Python
|
vitrage_tempest_plugin/tests/e2e/test_overlapping_actions.py
|
openstack/vitrage-tempest-plugin
|
69acc7f3ea26f8c3a652cdf9d1fd842dbf9af58f
|
[
"Apache-2.0"
] | 6
|
2018-08-02T12:11:09.000Z
|
2019-03-05T11:45:09.000Z
|
vitrage_tempest_plugin/tests/e2e/test_overlapping_actions.py
|
openstack/vitrage-tempest-plugin
|
69acc7f3ea26f8c3a652cdf9d1fd842dbf9af58f
|
[
"Apache-2.0"
] | null | null | null |
vitrage_tempest_plugin/tests/e2e/test_overlapping_actions.py
|
openstack/vitrage-tempest-plugin
|
69acc7f3ea26f8c3a652cdf9d1fd842dbf9af58f
|
[
"Apache-2.0"
] | 1
|
2018-08-22T12:29:54.000Z
|
2018-08-22T12:29:54.000Z
|
# Copyright 2017 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
from vitrage_tempest_plugin.tests.base import IsEmpty
from vitrage_tempest_plugin.tests.common.constants import DOCTOR_DATASOURCE
from vitrage_tempest_plugin.tests.common.constants import EntityCategory
from vitrage_tempest_plugin.tests.common.constants import VertexProperties \
as VProps
from vitrage_tempest_plugin.tests.common.constants import VITRAGE_DATASOURCE
from vitrage_tempest_plugin.tests.common import general_utils as g_utils
from vitrage_tempest_plugin.tests.common.tempest_clients import TempestClients
from vitrage_tempest_plugin.tests.common import vitrage_utils as v_utils
from vitrage_tempest_plugin.tests.e2e.test_actions_base import TestActionsBase
from vitrage_tempest_plugin.tests import utils
LOG = logging.getLogger(__name__)
TRIGGER_ALARM_1 = 'e2e.test_overlapping_actions.trigger.alarm1'
TRIGGER_ALARM_2 = 'e2e.test_overlapping_actions.trigger.alarm2'
TRIGGER_ALARM_3 = 'e2e.test_overlapping_actions.trigger.alarm3'
TRIGGER_ALARM_4 = 'e2e.test_overlapping_actions.trigger.alarm4'
DEDUCED = 'e2e.test_overlapping_actions.deduced.alarm'
TRIGGER_ALARM_1_PROPS = {
VProps.NAME: TRIGGER_ALARM_1,
VProps.VITRAGE_CATEGORY: EntityCategory.ALARM,
VProps.VITRAGE_TYPE: DOCTOR_DATASOURCE,
}
TRIGGER_ALARM_2_PROPS = {
VProps.NAME: TRIGGER_ALARM_2,
VProps.VITRAGE_CATEGORY: EntityCategory.ALARM,
VProps.VITRAGE_TYPE: DOCTOR_DATASOURCE,
}
DEDUCED_PROPS = {
VProps.NAME: DEDUCED,
VProps.VITRAGE_CATEGORY: EntityCategory.ALARM,
VProps.VITRAGE_TYPE: VITRAGE_DATASOURCE,
}
class TestOverlappingActions(TestActionsBase):
@classmethod
def setUpClass(cls):
super(TestOverlappingActions, cls).setUpClass()
cls._template = v_utils.add_template(
'e2e_test_overlapping_actions.yaml')
@classmethod
def tearDownClass(cls):
if cls._template is not None:
v_utils.delete_template(cls._template['uuid'])
@utils.tempest_logger
def test_overlapping_action_set_state(self):
try:
# Do - first
self._trigger_do_action(TRIGGER_ALARM_1)
curr_host = v_utils.get_first_host()
self.assertEqual(
'ERROR',
curr_host.get(VProps.VITRAGE_AGGREGATED_STATE),
'state should change after set_state action')
# Do - second
self._trigger_do_action(TRIGGER_ALARM_2)
curr_host = v_utils.get_first_host()
self.assertEqual(
'ERROR',
curr_host.get(VProps.VITRAGE_AGGREGATED_STATE),
'state should remain unchanged')
# Undo - first
self._trigger_undo_action(TRIGGER_ALARM_1)
curr_host = v_utils.get_first_host()
self.assertEqual(
'ERROR',
curr_host.get(VProps.VITRAGE_AGGREGATED_STATE),
'state should remain unchanged')
# Undo - second
self._trigger_undo_action(TRIGGER_ALARM_2)
curr_host = v_utils.get_first_host()
self.assertEqual(
self.orig_host.get(VProps.VITRAGE_AGGREGATED_STATE),
curr_host.get(VProps.VITRAGE_AGGREGATED_STATE),
'state should change after undo set_state action')
finally:
self._trigger_undo_action(TRIGGER_ALARM_1)
self._trigger_undo_action(TRIGGER_ALARM_2)
@utils.tempest_logger
def test_overlapping_action_mark_down(self):
try:
host_name = self.orig_host.get(VProps.NAME)
# Do - first
self._trigger_do_action(TRIGGER_ALARM_3)
nova_service = TempestClients.nova().services.list(
host=host_name, binary='nova-compute')[0]
self.assertEqual("down", nova_service.state)
# Do - second
self._trigger_do_action(TRIGGER_ALARM_4)
nova_service = TempestClients.nova().services.list(
host=host_name, binary='nova-compute')[0]
self.assertEqual("down", nova_service.state)
# Undo - first
self._trigger_undo_action(TRIGGER_ALARM_3)
nova_service = TempestClients.nova().services.list(
host=host_name, binary='nova-compute')[0]
self.assertEqual("down", nova_service.state)
# Undo - second
self._trigger_undo_action(TRIGGER_ALARM_4)
nova_service = TempestClients.nova().services.list(
host=host_name, binary='nova-compute')[0]
self.assertEqual("up", nova_service.state)
finally:
self._trigger_undo_action(TRIGGER_ALARM_3)
self._trigger_undo_action(TRIGGER_ALARM_4)
# nova.host datasource may take up to snapshot_interval to update
time.sleep(130)
@utils.tempest_logger
def test_overlapping_action_deduce_alarm(self):
try:
host_id = self.orig_host.get(VProps.VITRAGE_ID)
# Do - first
self._trigger_do_action(TRIGGER_ALARM_1)
self._check_deduced(1, DEDUCED_PROPS, host_id)
# Do - second
self._trigger_do_action(TRIGGER_ALARM_2)
self._check_deduced(1, DEDUCED_PROPS, host_id)
# Undo - first
self._trigger_undo_action(TRIGGER_ALARM_1)
self._check_deduced(1, DEDUCED_PROPS, host_id)
# Undo - second
self._trigger_undo_action(TRIGGER_ALARM_2)
self._check_deduced(0, DEDUCED_PROPS, host_id)
finally:
self._trigger_undo_action(TRIGGER_ALARM_1)
self._trigger_undo_action(TRIGGER_ALARM_2)
@utils.tempest_logger
def test_overlapping_action_add_causal_relationship(self):
try:
# ---- Do first & second ----
self._trigger_do_action(TRIGGER_ALARM_1)
self._trigger_do_action(TRIGGER_ALARM_2)
alarms = self.vitrage_client.alarm.list(
vitrage_id=self.orig_host.get(VProps.VITRAGE_ID),
all_tenants=True)
deduced = g_utils.first_match(alarms, **DEDUCED_PROPS)
trigger1 = g_utils.first_match(alarms, **TRIGGER_ALARM_1_PROPS)
trigger2 = g_utils.first_match(alarms, **TRIGGER_ALARM_2_PROPS)
# Get Rca for the deduced
rca = self.vitrage_client.rca.get(deduced[VProps.VITRAGE_ID],
all_tenants=True)
self._check_rca(rca, [deduced, trigger1, trigger2], DEDUCED_PROPS)
# Get Rca for trigger 1
rca = self.vitrage_client.rca.get(trigger1[VProps.VITRAGE_ID],
all_tenants=True)
self._check_rca(rca, [deduced, trigger1], TRIGGER_ALARM_1_PROPS)
# Get Rca for trigger 2
rca = self.vitrage_client.rca.get(trigger2[VProps.VITRAGE_ID],
all_tenants=True)
self._check_rca(rca, [deduced, trigger2], TRIGGER_ALARM_2_PROPS)
# ---- Undo - first ----
self._trigger_undo_action(TRIGGER_ALARM_1)
alarms = self.vitrage_client.alarm.list(
vitrage_id=self.orig_host.get(VProps.VITRAGE_ID),
all_tenants=True)
deduced = g_utils.first_match(alarms, **DEDUCED_PROPS)
trigger2 = g_utils.first_match(alarms, **TRIGGER_ALARM_2_PROPS)
# Get Rca for the deduced
rca = self.vitrage_client.rca.get(deduced[VProps.VITRAGE_ID],
all_tenants=True)
self._check_rca(rca, [deduced, trigger2], DEDUCED_PROPS)
# Get Rca for trigger 2
rca = self.vitrage_client.rca.get(trigger2[VProps.VITRAGE_ID],
all_tenants=True)
self._check_rca(rca, [deduced, trigger2], TRIGGER_ALARM_2_PROPS)
# ---- Undo - second ----
self._trigger_undo_action(TRIGGER_ALARM_2)
alarms = self.vitrage_client.alarm.list(
vitrage_id=self.orig_host.get(VProps.VITRAGE_ID),
all_tenants=True)
self.assertThat(
g_utils.all_matches(alarms, **TRIGGER_ALARM_1_PROPS),
IsEmpty(),
'trigger alarm 1 should have been removed')
self.assertThat(
g_utils.all_matches(alarms, **TRIGGER_ALARM_2_PROPS),
IsEmpty(),
'trigger alarm 2 should have been removed')
self.assertThat(
g_utils.all_matches(alarms, **DEDUCED_PROPS),
IsEmpty(),
'deduced alarm should have been removed')
finally:
self._trigger_undo_action(TRIGGER_ALARM_1)
self._trigger_undo_action(TRIGGER_ALARM_2)
| 40.495726
| 78
| 0.646264
| 1,113
| 9,476
| 5.165319
| 0.160827
| 0.087667
| 0.075144
| 0.058445
| 0.74326
| 0.695599
| 0.670725
| 0.612454
| 0.53505
| 0.47156
| 0
| 0.012658
| 0.274694
| 9,476
| 233
| 79
| 40.669528
| 0.823803
| 0.100042
| 0
| 0.621951
| 0
| 0
| 0.069822
| 0.029083
| 0
| 0
| 0
| 0
| 0.067073
| 1
| 0.036585
| false
| 0
| 0.073171
| 0
| 0.115854
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5b80f86d6e5672de1791e2d08c1fbaf96195a02
| 4,137
|
py
|
Python
|
clone_tests/clone_compilation_errors.py
|
dcz-purism/glib
|
eccd097166cdf7dfea9be17869868d45f8ef4ef6
|
[
"MIT-0",
"MIT"
] | null | null | null |
clone_tests/clone_compilation_errors.py
|
dcz-purism/glib
|
eccd097166cdf7dfea9be17869868d45f8ef4ef6
|
[
"MIT-0",
"MIT"
] | null | null | null |
clone_tests/clone_compilation_errors.py
|
dcz-purism/glib
|
eccd097166cdf7dfea9be17869868d45f8ef4ef6
|
[
"MIT-0",
"MIT"
] | null | null | null |
import json
import os
import subprocess
import sys
TEST_FILENAME = "tmp_py_file"
TEST_FOLDER = "clone_tests"
TESTS = [
("clone!( => move || {})",
"If you have nothing to clone, no need to use this macro!"),
("clone!(|| {})",
"If you have nothing to clone, no need to use this macro!"),
("clone!(|a, b| {})",
"If you have nothing to clone, no need to use this macro!"),
("clone!(@strong self => move |x| {})",
"Can't use `self` as variable name. Try storing it in a temporary variable or rename it using `as`."),
("clone!(@strong self.v => move |x| {})",
"Field accesses are not allowed as is, you must rename it!"),
("clone!(@weak v => @default-return false, || {})",
"Closure needs to be \"moved\" so please add `move` before closure"),
("clone!(@weak v => @default-return false, |bla| {})",
"Closure needs to be \"moved\" so please add `move` before closure"),
("clone!(@weak v => default-return false, move || {})",
"Missing `@` before `default-return`"),
("clone!(@weak v => @default-return false move || {})",
"Missing comma after `@default-return`'s value"),
("clone!(@yolo v => move || {})",
"Unknown keyword, only `weak` and `strong` are allowed"),
("clone!(v => move || {})",
"You need to specify if this is a weak or a strong clone."),
]
def convert_to_string(s):
if s.__class__.__name__ == 'bytes':
return s.decode('utf-8')
return s
def exec_command(command):
child = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = child.communicate()
return (child.returncode == 0, convert_to_string(stdout), convert_to_string(stderr))
def run_test(code, expected_str):
with open("{}/{}.rs".format(TEST_FOLDER, TEST_FILENAME), 'w') as f:
f.write('extern crate glib;use glib::clone;use std::rc::Rc;fn main(){{let v = Rc::new(1);{};}}'.format(code))
code, stdout, stderr = exec_command([
"bash",
"-c",
"cd {} && cargo build --message-format json".format(TEST_FOLDER),
])
os.remove("{}/{}.rs".format(TEST_FOLDER, TEST_FILENAME))
if code is True:
return "This isn't supposed to compile!"
parts = stdout.split('}\n{')
compiler_message = None
for (pos, part) in enumerate(parts):
try:
if pos > 0:
part = "{" + part
if pos + 1 < len(parts):
part += "}"
x = json.loads(part)
if (x["reason"] != "compiler-message"
or x["message"]["message"] == "aborting due to previous error"):
continue
compiler_message = x["message"]["message"]
break
except Exception:
continue
if compiler_message is None:
return "Weird issue: no compiler-message found..."
if expected_str not in compiler_message:
return "`{}` not found in `{}`".format(expected_str, compiler_message)
return None
def run_tests():
print("About to start the tests on the clone! macro.")
print("It might be slow to run the first one since cargo has to build dependencies...")
print("")
errors = 0
with open('{}/Cargo.toml'.format(TEST_FOLDER), 'w') as f:
f.write("""[package]
name = "test"
version = "0.0.1"
authors = ["gtk-rs developers"]
[dependencies]
glib = {{ path = ".." }}
[[bin]]
name = "{0}"
path = "{0}.rs"
""".format(TEST_FILENAME))
for (code, expected_str) in TESTS:
sys.stdout.write('Running `{}`...'.format(code))
sys.stdout.flush()
err = run_test(code, expected_str)
if err is not None:
print(" FAILED\n{}".format(err))
errors += 1
else:
print(" OK")
print("Ran {} tests, got {} failure{}".format(len(TESTS), errors, "s" if errors > 1 else ""))
os.remove("{}/Cargo.toml".format(TEST_FOLDER))
os.remove("{}/Cargo.lock".format(TEST_FOLDER))
exec_command(['bash', '-c', 'rm -r {}/target'.format(TEST_FOLDER)])
return errors
if __name__ == "__main__":
sys.exit(run_tests())
| 35.358974
| 117
| 0.578922
| 542
| 4,137
| 4.319188
| 0.335793
| 0.034173
| 0.047843
| 0.029047
| 0.248612
| 0.186245
| 0.148654
| 0.148654
| 0.127296
| 0.127296
| 0
| 0.004187
| 0.249456
| 4,137
| 116
| 118
| 35.663793
| 0.749758
| 0
| 0
| 0.068627
| 0
| 0.019608
| 0.427363
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039216
| false
| 0
| 0.039216
| 0
| 0.156863
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5b9371efb3fb18aace487077f47abfd7957e4b2
| 2,437
|
py
|
Python
|
tests/test_tags.py
|
wbcsmarteezgithub/django-snakeoil
|
ae1a8dab9e14194e48963101ff3349f45aee0ccf
|
[
"BSD-2-Clause"
] | 1
|
2020-07-03T15:52:25.000Z
|
2020-07-03T15:52:25.000Z
|
tests/test_tags.py
|
wbcsmarteezgithub/django-snakeoil
|
ae1a8dab9e14194e48963101ff3349f45aee0ccf
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_tags.py
|
wbcsmarteezgithub/django-snakeoil
|
ae1a8dab9e14194e48963101ff3349f45aee0ccf
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import unicode_literals
from django.http import HttpRequest
from django.template import Context, Template, TemplateSyntaxError
from django.test import TestCase
from snakeoil.models import SeoUrl
from .models import TestModel
class GetSeoDataTagTests(TestCase):
def test_invalid_syntax(self):
request = HttpRequest()
request.path = '/'
with self.assertRaises(TemplateSyntaxError):
Template(
'{% load snakeoil %}'
'{% get_seo_data spam %}'
'{{ seo.head_title }}'
'{{ seo.meta_description }}'
).render(Context({'request': request}))
def test_no_data(self):
request = HttpRequest()
request.path = '/'
out = Template(
'{% load snakeoil %}'
'{% get_seo_data %}'
'{{ seo.head_title }}'
'{{ seo.meta_description }}'
).render(Context({'request': request}))
self.assertEqual(out, '')
def test_data_from_url(self):
SeoUrl.objects.create(url='/', head_title='spam',
meta_description='eggs')
request = HttpRequest()
request.path = '/'
out = Template(
'{% load snakeoil %}'
'{% get_seo_data %}'
'{{ seo.head_title }}'
'{{ seo.meta_description }}'
).render(Context({'request': request}))
self.assertEqual(out, 'spameggs')
def test_as_parameter(self):
SeoUrl.objects.create(url='/', head_title='spam',
meta_description='eggs')
request = HttpRequest()
request.path = '/'
out = Template(
'{% load snakeoil %}'
'{% get_seo_data as spam %}'
'{{ spam.head_title }}'
'{{ spam.meta_description }}'
).render(Context({'request': request}))
self.assertEqual(out, 'spameggs')
def test_data_from_model(self):
obj = TestModel.objects.create(head_title='spam',
meta_description='eggs')
request = HttpRequest()
request.path = '/'
out = Template(
'{% load snakeoil %}'
'{% get_seo_data %}'
'{{ seo.head_title }}'
'{{ seo.meta_description }}'
).render(Context({'request': request, 'obj': obj}))
self.assertEqual(out, 'spameggs')
| 29.719512
| 66
| 0.531801
| 219
| 2,437
| 5.721461
| 0.223744
| 0.057462
| 0.099761
| 0.115722
| 0.66241
| 0.622506
| 0.598563
| 0.598563
| 0.598563
| 0.598563
| 0
| 0
| 0.330735
| 2,437
| 81
| 67
| 30.08642
| 0.76824
| 0
| 0
| 0.666667
| 0
| 0
| 0.214608
| 0.008617
| 0
| 0
| 0
| 0
| 0.079365
| 1
| 0.079365
| false
| 0
| 0.095238
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5ba98b5a8a467c1237f20ea32bee34cf54cde58
| 420
|
py
|
Python
|
test/nn/conv/test_gravnet_conv.py
|
shrey-bansal/pytorch_geometric
|
17108a08066b0a73530544d01719b186f2625ef2
|
[
"MIT"
] | 2
|
2020-09-08T15:22:08.000Z
|
2020-09-08T15:22:09.000Z
|
test/nn/conv/test_gravnet_conv.py
|
shrey-bansal/pytorch_geometric
|
17108a08066b0a73530544d01719b186f2625ef2
|
[
"MIT"
] | null | null | null |
test/nn/conv/test_gravnet_conv.py
|
shrey-bansal/pytorch_geometric
|
17108a08066b0a73530544d01719b186f2625ef2
|
[
"MIT"
] | 1
|
2021-07-06T06:50:21.000Z
|
2021-07-06T06:50:21.000Z
|
import torch
from torch_geometric.nn import GravNetConv
def test_gravnet_conv():
num_nodes, in_channels, out_channels = 20, 16, 32
x = torch.randn((num_nodes, in_channels))
conv = GravNetConv(in_channels, out_channels, space_dimensions=4,
propagate_dimensions=8, k=12)
assert conv.__repr__() == 'GravNetConv(16, 32, k=12)'
assert conv(x).size() == (num_nodes, out_channels)
| 32.307692
| 69
| 0.688095
| 58
| 420
| 4.672414
| 0.517241
| 0.088561
| 0.073801
| 0.132841
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047478
| 0.197619
| 420
| 12
| 70
| 35
| 0.756677
| 0
| 0
| 0
| 0
| 0
| 0.059524
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 1
| 0.111111
| false
| 0
| 0.222222
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5baf25c3fc1ee4bca1c0e0df333ed41bd65f476
| 2,216
|
py
|
Python
|
base/CrossPlotter.py
|
pulsatrixwx/PulsatrixWx
|
aae6ac36e2460dcf7f4a592d709139cd0d6a2e91
|
[
"MIT"
] | 3
|
2016-03-27T00:21:46.000Z
|
2018-06-01T09:20:57.000Z
|
base/CrossPlotter.py
|
pulsatrixwx/PulsatrixWx
|
aae6ac36e2460dcf7f4a592d709139cd0d6a2e91
|
[
"MIT"
] | null | null | null |
base/CrossPlotter.py
|
pulsatrixwx/PulsatrixWx
|
aae6ac36e2460dcf7f4a592d709139cd0d6a2e91
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from hootpy import HootPy
class CrossPlotter(HootPy):
"""
CrossPlotter
Purpose: Handles the plotting of cross section products.
Started: 14 June 2010 by Tim Supinie (tsupinie@ou.edu)
Completed: [not yet]
Modified: [not yet]
"""
def __init__(self, config):
"""
__init__()
Purpose: Constructor for the CrossPlotter class.
Parameters: config [type=dictionary]
Dictionary containing configuration parameters for the run.
"""
super(CrossPlotter, self).__init__(config)
return
def loadData(self):
"""
loadData() [public]
Purpose: Handles the loading in of data.
Parameters: [none]
Returns: [nothing]
"""
return
def plot(self):
"""
plot() [public]
Purpose: Plot cross section products. For model products, plots products for all forecast hours.
Parameters: [none]
Returns: [nothing]
"""
if self._forecast_hours is None:
# Plot cross section here ...
self._finalizeCrossSection(None)
else:
for fh in self._forecast_hours:
# Plot the cross section here ...
self._finalizeCrossSection(fh)
return
def _finalizeCrossSection(self, forecast_hour):
"""
_finalizeCrossSection() [protected]
Purpose: Add final things to the profile, such as the background,
title, valid time, and image border, and then save the image.
Parameters: forecast_hour [type=int]
Forecast hour for model products (pass in None for an observed product).
Returns: [nothing]
"""
# Finish creating the product. Should be last.
self._finalizeProduct(forecast_hour)
return
if __name__ == "__main__":
cfg = {
'forecast_hours':[0, 3, 6, 9, 12],
'product_title':"NAM Forecast Cross Section KDRT-KGRB",
'image_file_name':"nam_fcross_KDRT-KGRB_f%02d.png"
}
hpc = CrossPlotter(cfg)
hpc.loadData()
hpc.plot()
| 28.410256
| 108
| 0.581679
| 229
| 2,216
| 5.458515
| 0.471616
| 0.048
| 0.0272
| 0.0448
| 0.064
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009472
| 0.333032
| 2,216
| 77
| 109
| 28.779221
| 0.836265
| 0.474729
| 0
| 0.148148
| 0
| 0
| 0.12527
| 0.032397
| 0
| 0
| 0
| 0
| 0
| 1
| 0.148148
| false
| 0
| 0.074074
| 0
| 0.407407
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5bb1ebe52102d71c8810bac844699880019ddf3
| 3,072
|
py
|
Python
|
management/commands/syncldap.py
|
LUH-CHI/chiffee
|
78ec85d36a6c757e5f56113089f1b56fdb0ed494
|
[
"MIT"
] | 1
|
2018-03-22T09:53:06.000Z
|
2018-03-22T09:53:06.000Z
|
management/commands/syncldap.py
|
LUH-CHI/chiffee
|
78ec85d36a6c757e5f56113089f1b56fdb0ed494
|
[
"MIT"
] | 4
|
2019-04-01T08:44:40.000Z
|
2020-02-07T17:44:16.000Z
|
management/commands/syncldap.py
|
LUH-CHI/chiffee
|
78ec85d36a6c757e5f56113089f1b56fdb0ed494
|
[
"MIT"
] | 4
|
2018-05-04T12:01:50.000Z
|
2019-10-11T09:47:33.000Z
|
import logging
import ldap
from django.conf import settings
from django.contrib.auth.models import Group
from django.core.management.base import BaseCommand
from django_auth_ldap.backend import LDAPBackend
from chiffee.models import User
logger = logging.getLogger('syncldap')
# This command synchronizes local database with the LDAP server.
# New LDAP user -> new user in the local database.
# Deleted LDAP user -> local user is set to inactive.
class Command(BaseCommand):
help = 'Syncing local users with LDAP... '
def handle(self, *args, **options):
self.populate_db()
self.find_inactive_user()
# Find all users in LDAP and add them to the database if needed.
def populate_db(self):
connection = ldap.initialize(settings.AUTH_LDAP_SERVER_URI)
connection.simple_bind_s(settings.AUTH_LDAP_BIND_DN,
settings.AUTH_LDAP_BIND_PASSWORD)
filter_ = '(&(uid=*))' # Customize this if necessary.
ldap_users = connection.search_s(settings.BASE_DN,
ldap.SCOPE_SUBTREE,
filter_)
connection.unbind()
for ldap_user in ldap_users:
username = ldap_user[1]['uid'][0].decode('UTF-8')
if not User.objects.filter(username=username).exists():
logger.info('Adding new user %s...' % username)
user = LDAPBackend().populate_user(
ldap_user[1]['uid'][0].decode('UTF-8'))
user.is_active = True
# Add a single group to the user.
# When group information is not stored as part of the user info,
# code needs to be modified.
try:
groups = ldap_user[1]['group']
except KeyError:
logger.info(
'User could not be added to a group and won\'t be able to '
'purchase anything.')
continue
groups = [g.decode('UTF-8') for g in groups]
self.add_user_to_group(user, groups)
user.save()
# A user should belong to only one group.
# Group priority: professors > employees > students
def add_user_to_group(self, user, groups):
if 'professors' in groups:
group_name = 'professors'
elif 'employees' in groups:
group_name = 'employees'
else:
group_name = 'students'
group = Group.objects.get(name=group_name)
if len(user.groups.all()) == 0:
group.user_set.add(user)
else:
user.groups.clear()
group.user_set.add(user)
# Mark all users with no LDAP entry inactive.
def find_inactive_user(self):
for user in User.objects.filter(is_active=True):
ldap_user = LDAPBackend().populate_user(user.username)
if ldap_user is None and not user.is_superuser:
logger.info('User %s set to inactive.' % user)
user.is_active = False
user.save()
| 36.571429
| 79
| 0.595378
| 380
| 3,072
| 4.681579
| 0.344737
| 0.035975
| 0.026981
| 0.022485
| 0.047218
| 0.025857
| 0.025857
| 0.025857
| 0
| 0
| 0
| 0.004282
| 0.315755
| 3,072
| 83
| 80
| 37.012048
| 0.842055
| 0.166341
| 0
| 0.101695
| 0
| 0
| 0.089839
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067797
| false
| 0.016949
| 0.118644
| 0
| 0.220339
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5beb267f6635aef6117ff273b49cdca310125ca
| 367
|
py
|
Python
|
jp.atcoder/abc045/abc045_b/8983851.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-09T03:06:25.000Z
|
2022-02-09T03:06:25.000Z
|
jp.atcoder/abc045/abc045_b/8983851.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-05T22:53:18.000Z
|
2022-02-09T01:29:30.000Z
|
jp.atcoder/abc045/abc045_b/8983851.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | null | null | null |
import sys
from collections import deque
a, b, c = sys.stdin.read().split()
def main():
deck = dict([("a", deque(a)), ("b", deque(b)), ("c", deque(c))])
p = "a"
while True:
if deck[p]:
p = deck[p].popleft()
else:
return p.upper()
if __name__ == "__main__":
ans = main()
print(ans)
| 17.47619
| 69
| 0.46594
| 48
| 367
| 3.395833
| 0.541667
| 0.07362
| 0.08589
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.348774
| 367
| 20
| 70
| 18.35
| 0.682008
| 0
| 0
| 0
| 0
| 0
| 0.034582
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.142857
| 0
| 0.285714
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5bed273a043f28510a7c31520baff8cb6ddab43
| 16,504
|
py
|
Python
|
src/pipelines/azureml/lightgbm_training.py
|
microsoft/lightgbm-benchmark
|
286668d698d9d166857f924ecb775d5de224d489
|
[
"MIT"
] | 13
|
2021-08-20T01:03:51.000Z
|
2022-02-12T05:34:46.000Z
|
src/pipelines/azureml/lightgbm_training.py
|
microsoft/lightgbm-benchmark
|
286668d698d9d166857f924ecb775d5de224d489
|
[
"MIT"
] | 199
|
2021-08-21T21:18:53.000Z
|
2022-03-27T23:08:44.000Z
|
src/pipelines/azureml/lightgbm_training.py
|
microsoft/lightgbm-benchmark
|
286668d698d9d166857f924ecb775d5de224d489
|
[
"MIT"
] | 4
|
2021-08-20T06:53:26.000Z
|
2022-01-24T22:22:39.000Z
|
"""
Runs LightGBM using distributed (mpi) training.
to execute:
> python src/pipelines/azureml/lightgbm_training.py --exp-config conf/experiments/lightgbm_training/cpu.yaml
"""
# pylint: disable=no-member
# NOTE: because it raises 'dict' has no 'outputs' member in dsl.pipeline construction
import os
import sys
import json
import logging
import argparse
# config management
from dataclasses import dataclass
from omegaconf import OmegaConf, MISSING
from typing import Optional, Any, List
# AzureML
from azure.ml.component import Component
from azure.ml.component import dsl
from azure.ml.component.environment import Docker
# when running this script directly, needed to import common
LIGHTGBM_REPO_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
SCRIPTS_SOURCES_ROOT = os.path.join(LIGHTGBM_REPO_ROOT, 'src')
if SCRIPTS_SOURCES_ROOT not in sys.path:
logging.info(f"Adding {SCRIPTS_SOURCES_ROOT} to path")
sys.path.append(str(SCRIPTS_SOURCES_ROOT))
from common.tasks import training_task, training_variant
from common.sweep import SweepParameterParser
from common.aml import load_dataset_from_data_input_spec
from common.aml import apply_sweep_settings
from common.pipelines import (
parse_pipeline_config,
azureml_connect,
pipeline_submit,
COMPONENTS_ROOT
)
### CONFIG DATACLASS ###
# Step 1 : to configure your pipeline, add all your fields inside a
# properly defined dataclass, pipeline_cli_main will figure out how
# to read that config from a given yaml file + hydra override commands
@dataclass
class lightgbm_training_config: # pylint: disable=invalid-name
""" Config object constructed as a dataclass.
NOTE: the name of this class will be used as namespace in your config yaml file.
"""
# NOTE: all those values are REQUIRED in your yaml config file
benchmark_name: str = MISSING
# INPUT DATASETS
tasks: List[training_task] = MISSING
# TRAINING PARAMS
reference: training_variant = MISSING
# free changing parameters on top of reference
variants: Optional[Any] = None
### PIPELINE COMPONENTS ###
# Step 2 : your pipeline consists in assembling components
# load those components from local yaml specifications
# use COMPONENTS_ROOT as base folder
lightgbm_train_module = Component.from_yaml(yaml_file=os.path.join(COMPONENTS_ROOT, "training", "lightgbm_python", "spec.yaml"))
lightgbm_train_sweep_module = Component.from_yaml(yaml_file=os.path.join(COMPONENTS_ROOT, "training", "lightgbm_python", "sweep_spec.yaml"))
partition_data_module = Component.from_yaml(yaml_file=os.path.join(COMPONENTS_ROOT, "data_processing", "partition_data", "spec.yaml"))
lightgbm_data2bin_module = Component.from_yaml(yaml_file=os.path.join(COMPONENTS_ROOT, "data_processing", "lightgbm_data2bin", "spec.yaml"))
### PIPELINE SPECIFIC CODE ###
def process_sweep_parameters(params_dict, sweep_algorithm):
"""Parses config and spots sweepable paraneters
Args:
params_dict (dict): configuration object (see get_config_class())
sweep_algorithm (str): random, grid, bayesian
Returns:
tunable_params (dict)
"""
# the class below automates parsing of sweepable parameters
sweep_param_parser = SweepParameterParser(
tunable_parameters=[
# those are keys and their default values
"num_iterations",
"num_leaves",
"min_data_in_leaf",
"learning_rate",
"max_bin",
"feature_fraction"
],
cli_prefix=None, # this is not argparse
parameter_sampling=sweep_algorithm
)
# provide config as a dictionary to the parser
sweep_parameters = {
"num_iterations": params_dict['num_iterations'],
"num_leaves": params_dict['num_leaves'],
"min_data_in_leaf": params_dict['min_data_in_leaf'],
"learning_rate": params_dict['learning_rate'],
"max_bin": params_dict['max_bin'],
"feature_fraction": params_dict['feature_fraction'],
}
# parser gonna parse
sweep_param_parser.parse_from_dict(sweep_parameters)
# and return params as we want them
tunable_params = sweep_param_parser.get_tunable_params()
fixed_params = sweep_param_parser.get_fixed_params()
# return dictionaries to fed as params into our pipeline
return tunable_params, fixed_params
### TRAINING PIPELINE ###
# Step 3: your pipeline consists in creating a python function
# decorated with @dsl.pipeline.
# You can create as many subgraphs as you want,
# but `pipeline_cli_main` will need one pipeline function
# taking a single config argument, not a pipeline parameter.
# Here you should create an instance of a pipeline function (using your custom config dataclass)
@dsl.pipeline(
name="lightgbm_training", # pythonic name
description="LightGBM distributed training (mpi)",
non_pipeline_parameters=['config', 'benchmark_custom_properties']
)
def lightgbm_training_pipeline_function(config,
benchmark_custom_properties,
train_dataset,
test_dataset):
"""Pipeline function for this graph.
Args:
TODO
Returns:
dict[str->PipelineOutputData]: a dictionary of your pipeline outputs
for instance to be consumed by other graphs
"""
# create list of all variants params
training_variants_params = [
config.lightgbm_training_config.reference
]
# if there's any variant specified
if config.lightgbm_training_config.variants:
# create distinct training params for each variant
for variant_index, training_variant_config in enumerate(config.lightgbm_training_config.variants):
# create a specific dict of params for the variant
variant_config = OmegaConf.merge(config.lightgbm_training_config.reference, training_variant_config)
training_variants_params.append(variant_config)
# for each variant, check if sweep needs to be applied
for variant_index, variant_params in enumerate(training_variants_params):
############
### DATA ###
############
# if we're using multinode, add partitioning
if variant_params.data.auto_partitioning and (variant_params.training.tree_learner == "data" or variant_params.training.tree_learner == "voting"):
# if using data parallel, train data has to be partitioned first
if (variant_params.runtime.nodes * variant_params.runtime.processes) > 1:
partition_data_step = partition_data_module(
input_data=train_dataset,
mode="roundrobin",
number=(variant_params.runtime.nodes * variant_params.runtime.processes),
header=variant_params.data.header,
verbose=variant_params.training.verbose
)
partition_data_step.runsettings.configure(target=config.compute.linux_cpu)
partitioned_train_data = partition_data_step.outputs.output_data
else:
# for other modes, train data has to be one file
partitioned_train_data = train_dataset
else:
# for other modes, train data has to be one file
partitioned_train_data = train_dataset
# convert into binary files
if variant_params.data.pre_convert_to_binary:
convert_data2bin_step = lightgbm_data2bin_module(
train=partitioned_train_data,
test=test_dataset,
header=variant_params.data.header,
label_column=variant_params.data.label_column,
group_column=variant_params.data.group_column,
max_bin=variant_params.training.max_bin,
custom_params=json.dumps(dict(variant_params.training.custom_params or {})),
verbose=variant_params.training.verbose
)
convert_data2bin_step.runsettings.configure(target=config.compute.linux_cpu)
prepared_train_data = convert_data2bin_step.outputs.output_train
prepared_test_data = convert_data2bin_step.outputs.output_test
else:
prepared_train_data = partitioned_train_data
prepared_test_data = test_dataset
################
### TRAINING ###
################
# copy params into dict for flexibility
training_params = dict(variant_params.training)
# add all data-related params
training_params['header'] = variant_params.data.header
training_params['label_column'] = variant_params.data.label_column
training_params['group_column'] = variant_params.data.group_column
# extract and construct "sweepable" params
if variant_params.sweep:
tunable_params, fixed_params = process_sweep_parameters(
variant_params.training,
variant_params.sweep.algorithm
)
# test if we have sweepable parameters in the learning params
if len(tunable_params) > 0:
use_sweep = True
training_params.update(tunable_params)
else:
use_sweep = False
else:
use_sweep = False
# create custom properties and serialize to pass as argument
variant_custom_properties = {
'variant_index': variant_index,
'framework': "lightgbm",
'framework_build': variant_params.runtime.build,
}
variant_custom_properties.update(benchmark_custom_properties)
training_params['custom_properties'] = json.dumps(variant_custom_properties)
# serialize custom_params to pass as argument
training_params['custom_params'] = json.dumps(dict(variant_params.training.custom_params or {}))
# some debug outputs to expose variant parameters
print(f"*** lightgbm variant#{variant_index}: {training_params}")
# figuring out target (cpu/gpu)
training_target = variant_params.runtime.target
if not training_target:
if (variant_params.training.device_type == 'gpu' or variant_params.training.device_type == 'cuda'):
training_target = config.compute.linux_gpu
else:
training_target = config.compute.linux_cpu
if use_sweep:
# sweep training
if variant_params.sweep.primary_metric is None:
variant_params.sweep.primary_metric=f"node_0/valid_0.{variant_params.training.metric}"
lightgbm_train_step = lightgbm_train_sweep_module(
train = prepared_train_data,
test = prepared_test_data,
**training_params
)
# apply runsettings
lightgbm_train_step.runsettings.target=training_target
lightgbm_train_step.runsettings.resource_layout.node_count = variant_params.runtime.nodes
lightgbm_train_step.runsettings.resource_layout.process_count_per_node = variant_params.runtime.processes
# apply settings from our custom yaml config
apply_sweep_settings(lightgbm_train_step, variant_params.sweep)
else:
# regular training, no sweep
lightgbm_train_step = lightgbm_train_module(
train = prepared_train_data,
test = prepared_test_data,
**training_params
)
# apply runsettings
lightgbm_train_step.runsettings.target=training_target
lightgbm_train_step.runsettings.resource_layout.node_count = variant_params.runtime.nodes
lightgbm_train_step.runsettings.resource_layout.process_count_per_node = variant_params.runtime.processes
###############
### RUNTIME ###
###############
# # optional: override docker (ex: to test custom builds)
if 'build' in variant_params.runtime and variant_params.runtime.build:
custom_docker = Docker(file=os.path.join(LIGHTGBM_REPO_ROOT, variant_params.runtime.build))
lightgbm_train_step.runsettings.environment.configure(
docker=custom_docker
)
##############
### OUTPUT ###
##############
# add some relevant comments on the component
lightgbm_train_step.comment = " -- ".join(
[
f"variant #{variant_index}",
# add more
]
)
# optional: save output model
if variant_params.output and variant_params.output.register_model:
# "{register_model_prefix}-{task_key}-{num_iterations}trees-{num_leaves}leaves-{register_model_suffix}"
model_basename = "{num_iterations}trees-{num_leaves}leaves".format(
num_iterations=variant_params.training.num_iterations,
num_leaves=variant_params.training.num_leaves
)
# prepend task_key if given
if benchmark_custom_properties.get('benchmark_task_key', None):
model_basename = benchmark_custom_properties['benchmark_task_key'] + "-" + model_basename
# prepend prefix if given
if variant_params.output.register_model_prefix:
model_basename = variant_params.output.register_model_prefix + "-" + model_basename
# append suffix if given
if variant_params.output.register_model_suffix:
model_basename += "-" + variant_params.output.register_model_suffix
print(f"*** Will output model at {model_basename}")
# auto-register output with model basename
lightgbm_train_step.outputs.model.register_as(
name=model_basename,
create_new_version=True
)
# return {key: output}'
return {}
# creating an overall pipeline using pipeline_function for each task given
@dsl.pipeline(
name="training_all_tasks",
non_pipeline_parameters=['workspace', 'config']
)
def training_all_tasks(workspace, config):
# loop on all training tasks
for training_task in config.lightgbm_training_config.tasks:
# load the given train dataset
train_data = load_dataset_from_data_input_spec(workspace, training_task.train)
test_data = load_dataset_from_data_input_spec(workspace, training_task.test)
# create custom properties for this task
# they will be passed on to each job as tags
benchmark_custom_properties = {
'benchmark_name' : config.lightgbm_training_config.benchmark_name,
'benchmark_task_key' : training_task.task_key
}
# call pipeline_function as a subgraph here
training_task_subgraph_step = lightgbm_training_pipeline_function(
# NOTE: benchmark_custom_properties is not an actual pipeline input, just passed to the python code
config=config,
benchmark_custom_properties=benchmark_custom_properties,
train_dataset=train_data,
test_dataset=test_data
)
# add some relevant comments on the subgraph
training_task_subgraph_step.comment = " -- ".join([
"LightGBM training pipeline",
f"benchmark name: {config.lightgbm_training_config.benchmark_name}",
f"benchmark task key: {training_task.task_key}"
])
### MAIN BLOCK ###
# Step 4: implement main block using helper functions
def main():
# use parse helper function to get arguments from CLI
config = parse_pipeline_config(lightgbm_training_config)
# you'll need a workspace object to connect
workspace = azureml_connect(config)
# run the pipeline function with the given arguments
pipeline_instance = training_all_tasks(workspace, config)
# generate a nice markdown description
experiment_description="\n".join([
"Training on all specified tasks (see yaml below).",
"```yaml""",
"data_generation_config:",
OmegaConf.to_yaml(config.lightgbm_training_config),
"```"
])
# validate/submit the pipeline (if run.submit=True)
pipeline_submit(
workspace,
config,
pipeline_instance,
experiment_description=experiment_description
)
if __name__ == "__main__":
main()
| 39.961259
| 154
| 0.673534
| 1,882
| 16,504
| 5.6339
| 0.195537
| 0.058851
| 0.027728
| 0.023767
| 0.311421
| 0.229935
| 0.1874
| 0.161841
| 0.117514
| 0.117514
| 0
| 0.001209
| 0.248303
| 16,504
| 412
| 155
| 40.058252
| 0.853458
| 0.249636
| 0
| 0.134199
| 0
| 0
| 0.098739
| 0.021158
| 0
| 0
| 0
| 0.002427
| 0
| 1
| 0.017316
| false
| 0
| 0.069264
| 0
| 0.116883
| 0.008658
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5bf990b580312d748c5534bd056ce7638df5fe7
| 3,319
|
py
|
Python
|
twinfield/metadata.py
|
zypp-io/twinfield
|
b4306e79f514ae691584c2d47ce072a3619469b8
|
[
"Apache-2.0"
] | 4
|
2020-12-20T23:02:33.000Z
|
2022-01-13T19:40:13.000Z
|
twinfield/metadata.py
|
zypp-io/twinfield
|
b4306e79f514ae691584c2d47ce072a3619469b8
|
[
"Apache-2.0"
] | 9
|
2020-12-18T07:27:07.000Z
|
2022-02-17T09:23:51.000Z
|
twinfield/metadata.py
|
zypp-io/twinfield
|
b4306e79f514ae691584c2d47ce072a3619469b8
|
[
"Apache-2.0"
] | null | null | null |
from xml.etree import ElementTree as Et
import pandas as pd
import requests
from twinfield.core import Base
from twinfield.exceptions import ServerError
from twinfield.messages import METADATA_XML
class Metadata(Base):
def __init__(self, access_token: str, code: str, company: str):
"""
This class is for building the Browse SOAP requests for getting metadata of browse codes
Parameters
----------
access_token: str
access_token obtained from TwinfieldLogin class.
code: str
specific browsecode of which we want to get the metadata
company: str
specific the office code of the request
"""
super().__init__()
self.browsecode = code
self.access_token = access_token
self.company = company
def create_metadata_query(self) -> str:
"""
Returns
-------
columns: str
combination of fields and filters, that together make up for the <columns> section in
the XML template.
"""
metadata_request = f"""<read>
<type>browse</type>
<code>{self.browsecode}</code>
</read>"""
return metadata_request
def body(self) -> str:
"""
Returns
-------
body: str
the full XML SOAP message for the request. The body is build up in a base template,
string formatted with the current session_id , the module requested and the columns.
"""
xml = self.create_metadata_query()
body = METADATA_XML.format(self.access_token, self.company, xml)
return body
def parse_metadata_response(self, response: requests.Response) -> pd.DataFrame:
"""
Parameters
----------
response
Response object containing the twinfield server response
Returns
-------
df: pd.DataFrame
dataframe of metadata
"""
root = Et.fromstring(response.text)
body = root.find("env:Body", self.namespaces)
if body.find("env:Fault", self.namespaces):
raise ServerError()
data = body.find("tw:ProcessXmlStringResponse/tw:ProcessXmlStringResult", self.namespaces)
data = Et.fromstring(data.text)
col = data.find("columns")
rec = list()
for records in col:
ttl = dict()
for record in records:
ttl[record.tag] = record.text
rec.append(ttl)
df = pd.DataFrame(rec)
return df
def send_request(self, cluster) -> pd.DataFrame:
"""
Parameters
----------
cluster: cluster obtained from TwinfieldApi class
Returns
-------
df: pd.DataFrame
dataframe containing the records.
"""
body = self.body()
response = requests.post(
url=f"{cluster}/webservices/processxml.asmx?wsdl",
headers={"Content-Type": "text/xml", "Accept-Charset": "utf-8"},
data=body,
)
metadata = self.parse_metadata_response(response)
metadata.loc[metadata.label.isna(), "label"] = metadata.field
metadata.set_index("field", inplace=True)
return metadata
| 27.658333
| 98
| 0.577584
| 355
| 3,319
| 5.31831
| 0.377465
| 0.034958
| 0.023835
| 0.023305
| 0.03072
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000446
| 0.323893
| 3,319
| 119
| 99
| 27.890756
| 0.840909
| 0.286231
| 0
| 0
| 0
| 0
| 0.133234
| 0.061912
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102041
| false
| 0
| 0.122449
| 0
| 0.326531
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5c4f96d849731c4a186b3fef06e21bef4391f32
| 1,177
|
py
|
Python
|
test/device/test_brakes.py
|
uOstar/barista
|
ab62ec6320fb9b5e9c305f23be7fc7e828c25ab1
|
[
"MIT"
] | 4
|
2017-11-05T19:37:23.000Z
|
2018-06-18T13:18:11.000Z
|
test/device/test_brakes.py
|
uOstar/barista
|
ab62ec6320fb9b5e9c305f23be7fc7e828c25ab1
|
[
"MIT"
] | 24
|
2017-11-05T19:22:08.000Z
|
2018-06-14T13:50:39.000Z
|
test/device/test_brakes.py
|
uorocketry/barista
|
ab62ec6320fb9b5e9c305f23be7fc7e828c25ab1
|
[
"MIT"
] | 1
|
2022-03-25T04:01:25.000Z
|
2022-03-25T04:01:25.000Z
|
import pytest
from mock import patch
from app.device.brakes import Brakes
from app.utils.servo import Servo
from app.utils.exceptions import InvalidArguments
@patch.object(Servo, 'write')
@patch.object(Servo, '__init__')
def test_init_creates_servo_on_pin_21(servo_init_mock, servo_write_mock):
servo_init_mock.return_value = None
servo_write_mock.return_value = None
brakes = Brakes()
servo_init_mock.assert_called_once_with(21)
servo_write_mock.assert_called_once_with(0)
@patch.object(Servo, 'write')
@patch.object(Servo, '__init__')
def test_write_full_close_is_20_precent(servo_init_mock, servo_write_mock):
servo_init_mock.return_value = None
servo_write_mock.return_value = None
brakes = Brakes()
brakes.deploy(0)
servo_write_mock.assert_called_with(0.2)
assert brakes.percentage == 0
@patch.object(Servo, 'write')
@patch.object(Servo, '__init__')
def test_write_full_open(servo_init_mock, servo_write_mock):
servo_init_mock.return_value = None
servo_write_mock.return_value = None
brakes = Brakes()
brakes.deploy(1.0)
servo_write_mock.assert_called_with(1.0)
assert brakes.percentage == 1.0
| 28.02381
| 75
| 0.773152
| 177
| 1,177
| 4.717514
| 0.220339
| 0.143713
| 0.150898
| 0.136527
| 0.713772
| 0.644311
| 0.644311
| 0.57006
| 0.57006
| 0.57006
| 0
| 0.016683
| 0.13424
| 1,177
| 41
| 76
| 28.707317
| 0.802748
| 0
| 0
| 0.483871
| 0
| 0
| 0.033135
| 0
| 0
| 0
| 0
| 0
| 0.193548
| 1
| 0.096774
| false
| 0
| 0.16129
| 0
| 0.258065
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5c957427e5b93fcfc4229d7e7efbe7a5cf8ce25
| 601
|
py
|
Python
|
4 kyu/Most_frequently_used_words_in_a_text.py
|
jonathansnolan/Codewars
|
9d6a3fd10ffb2c61ae292961f384067cdede0470
|
[
"MIT"
] | null | null | null |
4 kyu/Most_frequently_used_words_in_a_text.py
|
jonathansnolan/Codewars
|
9d6a3fd10ffb2c61ae292961f384067cdede0470
|
[
"MIT"
] | null | null | null |
4 kyu/Most_frequently_used_words_in_a_text.py
|
jonathansnolan/Codewars
|
9d6a3fd10ffb2c61ae292961f384067cdede0470
|
[
"MIT"
] | null | null | null |
from collections import Counter
def top_3_words(text):
text = text.lower()
count = ""
j = []
for u in text:
if ord(u) > 96 and ord(u) < 123 or ord(u) == 39:
count += u
else:
j.append(count)
count = ""
i = []
for k in j:
temp = ""
for u in k:
if ord(u) > 96 and ord(u) < 123 or ord(u) == 39 and len(k) > 3:
temp += u
if temp != "":
i.append(temp)
u = dict(Counter(i))
ans = sorted(u, key=u.get)
ans = ans[::-1]
ans = ans[:3]
return ans
| 22.259259
| 75
| 0.425957
| 88
| 601
| 2.886364
| 0.397727
| 0.094488
| 0.047244
| 0.062992
| 0.204724
| 0.204724
| 0.204724
| 0.204724
| 0.204724
| 0.204724
| 0
| 0.052478
| 0.429285
| 601
| 26
| 76
| 23.115385
| 0.688047
| 0
| 0
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.041667
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5d03f80ba9950414b41050d76a8ec9d43425ee6
| 656
|
py
|
Python
|
src/easy/plus_one_66.py
|
ahmet9cengiz/leetCode
|
9e9a61f059072d7791dd19706b7a3e0d0a446669
|
[
"MIT"
] | null | null | null |
src/easy/plus_one_66.py
|
ahmet9cengiz/leetCode
|
9e9a61f059072d7791dd19706b7a3e0d0a446669
|
[
"MIT"
] | null | null | null |
src/easy/plus_one_66.py
|
ahmet9cengiz/leetCode
|
9e9a61f059072d7791dd19706b7a3e0d0a446669
|
[
"MIT"
] | null | null | null |
class Solution(object):
# Time Complexity: O(n)
@staticmethod
def plus_one(digits):
keep_going = True
for i, e in reversed(list(enumerate(digits))):
if keep_going:
if e == 9:
digits[i] = 0
else:
digits[i] += 1
keep_going = False
else:
break
if keep_going:
new_digits = [1]
new_digits[1:] = [digits[i] for i in range(len(digits))]
return new_digits
return digits
if __name__ == '__main__':
s = Solution()
print(s.plus_one([9,9,9]))
| 24.296296
| 69
| 0.464939
| 75
| 656
| 3.84
| 0.493333
| 0.125
| 0.076389
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021505
| 0.432927
| 656
| 26
| 70
| 25.230769
| 0.752688
| 0.032012
| 0
| 0.190476
| 0
| 0
| 0.012638
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0
| 0
| 0.190476
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5d07d12c4b5747b9b1b9f630c617df1ba338e16
| 1,607
|
py
|
Python
|
timetracker/vms/test/models/test_client_admin_invite_model.py
|
comp523-jarvis/timetracker-web
|
af638f0b3aab8a69a974bdb9a18118198488657c
|
[
"Apache-2.0"
] | 1
|
2019-04-09T16:46:53.000Z
|
2019-04-09T16:46:53.000Z
|
timetracker/vms/test/models/test_client_admin_invite_model.py
|
comp523-jarvis/timetracker-web
|
af638f0b3aab8a69a974bdb9a18118198488657c
|
[
"Apache-2.0"
] | 105
|
2018-10-12T17:57:20.000Z
|
2020-06-05T19:35:21.000Z
|
timetracker/vms/test/models/test_client_admin_invite_model.py
|
comp523-jarvis/timetracker-web
|
af638f0b3aab8a69a974bdb9a18118198488657c
|
[
"Apache-2.0"
] | 1
|
2019-04-11T14:43:42.000Z
|
2019-04-11T14:43:42.000Z
|
from django.conf import settings
from django.template.loader import render_to_string
from vms import models
def test_accept(client_admin_invite_factory, user_factory):
"""
Accepting the invitation should create a new client admin for the
user who accepts.
"""
invite = client_admin_invite_factory()
user = user_factory()
admin = invite.accept(user)
assert admin.client == invite.client
assert models.ClientAdminInvite.objects.count() == 0
def test_send(client_admin_invite_factory, request_factory, mailoutbox):
"""
Sending the invitation should send an email to the email address
attached to the invite.
"""
request = request_factory.get('/')
invite = client_admin_invite_factory()
invite.send(request)
context = {
'accept_url': f'{request.get_host()}{invite.accept_url}',
'client': invite.client,
}
expected_msg = render_to_string(
'vms/emails/client-admin-invite.txt',
context=context,
)
assert len(mailoutbox) == 1
msg = mailoutbox[0]
assert msg.body == expected_msg
assert msg.from_email == settings.DEFAULT_FROM_EMAIL
assert msg.subject == 'Client Administrator Invitation'
assert msg.to == [invite.email]
def test_string_conversion(client_admin_invite_factory):
"""
Converting an invite to a string should return a string containing
the email it was sent to and the linked client.
"""
invite = client_admin_invite_factory()
expected = f'Admin invite for {invite.email} from {invite.client}'
assert str(invite) == expected
| 27.706897
| 72
| 0.701929
| 205
| 1,607
| 5.317073
| 0.331707
| 0.090826
| 0.109174
| 0.13211
| 0.111927
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002355
| 0.207218
| 1,607
| 57
| 73
| 28.192982
| 0.853218
| 0.178594
| 0
| 0.096774
| 0
| 0
| 0.137084
| 0.057845
| 0
| 0
| 0
| 0
| 0.258065
| 1
| 0.096774
| false
| 0
| 0.096774
| 0
| 0.193548
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5d0bd552a2206b2e1b134ade80b6b88f2ce3b53
| 3,489
|
py
|
Python
|
_from_pydot/lambdas/dev/pyppeteer.py
|
owasp-sbot/pbx-gs-python-utils
|
f448aa36c4448fc04d30c3a5b25640ea4d44a267
|
[
"Apache-2.0"
] | 3
|
2018-12-14T15:43:46.000Z
|
2019-04-25T07:44:58.000Z
|
_from_pydot/lambdas/dev/pyppeteer.py
|
owasp-sbot/pbx-gs-python-utils
|
f448aa36c4448fc04d30c3a5b25640ea4d44a267
|
[
"Apache-2.0"
] | 1
|
2019-05-11T14:19:37.000Z
|
2019-05-11T14:51:04.000Z
|
_from_pydot/lambdas/dev/pyppeteer.py
|
owasp-sbot/pbx-gs-python-utils
|
f448aa36c4448fc04d30c3a5b25640ea4d44a267
|
[
"Apache-2.0"
] | 4
|
2018-12-27T04:54:14.000Z
|
2019-05-11T14:07:47.000Z
|
import base64
import os
import asyncio
from pbx_gs_python_utils.utils.Process import Process
from osbot_aws.Dependencies import load_dependency
def run(event, context):
load_dependency("pyppeteer") # (on first run downloads a zip file from S3 to /tmp/lambdas-dependencies/pyppeteer/ which contains
# the contents of `pip3 install pyppeteer - t pyppeteer` and the headless_shell file created by
# https://github.com/sambaiz/puppeteer-lambda-starter-kit
# This command also sets the add the /tmp/lambdas-dependencies/pyppeteer/ to sys.path
path_headless_shell = '/tmp/lambdas-dependencies/pyppeteer/headless_shell' # path to headless_shell AWS Linux executable
path_page_screenshot = '/tmp/screenshot.png' # path to store screenshot of url loaded
os.environ['PYPPETEER_HOME'] = '/tmp' # tell pyppeteer to use this read-write path in Lambda aws
target_url = event.get('url') # get url to load from lambda params
doc_type = event.get('doc_type')
async def get_screenshot(): # async method to run request
from pyppeteer import launch # import pyppeteer dependency
Process.run("chmod", ['+x', path_headless_shell]) # set the privs of path_headless_shell to execute
browser = await launch(executablePath = path_headless_shell, # lauch chrome (i.e. headless_shell)
args = ['--no-sandbox','--single-process']) # two key settings or the requests will not work
page = await browser.newPage() # typical pyppeteer code, where we create a new Page object
await page.goto(target_url) # - open an url
await page.waitFor(2 * 1000); # To Remove
#await page.waitForNavigation(); not working
if doc_type and doc_type == 'pdf':
await page.pdf({'path': path_page_screenshot});
else:
await page.screenshot({'path': path_page_screenshot}) # - take a screenshot of the page loaded and save it
await browser.close() # - close the browser
asyncio.get_event_loop().run_until_complete(get_screenshot()) # event loop to start the run async method which will open the
# url provided in the lambda params and save it as an png
with open(path_page_screenshot, "rb") as image_file: # open path_page_screenshot file
encoded_png = base64.b64encode(image_file.read()).decode() # save it as a png string (base64 encoded to make it easier to return)
return { "base64_data" : encoded_png} # return value to Lambda caller
| 67.096154
| 162
| 0.509888
| 355
| 3,489
| 4.88169
| 0.419718
| 0.060012
| 0.051933
| 0.053664
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008547
| 0.429923
| 3,489
| 51
| 163
| 68.411765
| 0.862745
| 0.329321
| 0
| 0
| 0
| 0
| 0.071768
| 0.021617
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.206897
| 0
| 0.275862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5d23a181d6fd76675487606efe26f43a22cb25e
| 2,757
|
py
|
Python
|
filter_plugins/net_textfsm_parse.py
|
iamroddo/ansible_helpers
|
420b9d7a1bb637f52209aeeea4cd424d03cf4eef
|
[
"Apache-2.0"
] | 44
|
2017-05-19T19:55:39.000Z
|
2022-02-08T17:21:22.000Z
|
filter_plugins/net_textfsm_parse.py
|
iamroddo/ansible_helpers
|
420b9d7a1bb637f52209aeeea4cd424d03cf4eef
|
[
"Apache-2.0"
] | 2
|
2017-07-17T14:28:23.000Z
|
2020-12-11T15:54:00.000Z
|
filter_plugins/net_textfsm_parse.py
|
iamroddo/ansible_helpers
|
420b9d7a1bb637f52209aeeea4cd424d03cf4eef
|
[
"Apache-2.0"
] | 18
|
2017-07-27T07:58:34.000Z
|
2021-06-06T04:06:33.000Z
|
"""
Filter to convert results from network device show commands obtained from ios_command,
eos_command, et cetera to structured data using TextFSM templates.
"""
from __future__ import unicode_literals
from __future__ import print_function
import os
from textfsm.clitable import CliTableError
import textfsm.clitable as clitable
def get_template_dir():
"""Find and return the ntc-templates/templates dir."""
try:
template_dir = os.environ['NET_TEXTFSM']
index = os.path.join(template_dir, 'index')
if not os.path.isfile(index):
# Assume only base ./ntc-templates specified
template_dir = os.path.join(template_dir, 'templates')
except KeyError:
# Construct path ~/ntc-templates/templates
home_dir = os.path.expanduser("~")
template_dir = os.path.join(home_dir, 'ntc-templates', 'templates')
index = os.path.join(template_dir, 'index')
if not os.path.isdir(template_dir) or not os.path.isfile(index):
msg = """
Valid ntc-templates not found, please install https://github.com/networktocode/ntc-templates
and then set the NET_TEXTFSM environment variable to point to the ./ntc-templates/templates
directory."""
raise ValueError(msg)
return template_dir
def get_structured_data(raw_output, platform, command):
"""Convert raw CLI output to structured data using TextFSM template."""
template_dir = get_template_dir()
index_file = os.path.join(template_dir, 'index')
textfsm_obj = clitable.CliTable(index_file, template_dir)
attrs = {'Command': command, 'Platform': platform}
try:
# Parse output through template
textfsm_obj.ParseCmd(raw_output, attrs)
return clitable_to_dict(textfsm_obj)
except CliTableError:
return raw_output
def clitable_to_dict(cli_table):
"""Converts TextFSM cli_table object to list of dictionaries."""
objs = []
for row in cli_table:
temp_dict = {}
for index, element in enumerate(row):
temp_dict[cli_table.header[index].lower()] = element
objs.append(temp_dict)
return objs
def net_textfsm_parse(output, platform, command):
"""Process config find interfaces using ip helper."""
try:
output = output['stdout'][0]
except (KeyError, IndexError, TypeError):
pass
return get_structured_data(output, platform, command)
class FilterModule(object):
"""Filter to convert results from network device show commands obtained from ios_command,
eos_command, et cetera to structured data using TextFSM templates."""
def filters(self):
return {
'net_textfsm_parse': net_textfsm_parse,
}
if __name__ == "__main__":
# Test code
pass
| 32.821429
| 93
| 0.696772
| 351
| 2,757
| 5.273504
| 0.344729
| 0.077256
| 0.027012
| 0.038898
| 0.258239
| 0.197731
| 0.183684
| 0.183684
| 0.183684
| 0.183684
| 0
| 0.000459
| 0.210011
| 2,757
| 83
| 94
| 33.216867
| 0.849403
| 0.237214
| 0
| 0.132075
| 0
| 0
| 0.145419
| 0.012118
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09434
| false
| 0.037736
| 0.09434
| 0.018868
| 0.320755
| 0.018868
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5d2d84344ef95aeed5c0f078a4e133508f0ccd9
| 5,705
|
py
|
Python
|
firebaseClient/firebaseClientGPIO.py
|
tabris2015/personCounter
|
0cd7f8698afefdd9e913a97820b9ff9c01752274
|
[
"MIT"
] | null | null | null |
firebaseClient/firebaseClientGPIO.py
|
tabris2015/personCounter
|
0cd7f8698afefdd9e913a97820b9ff9c01752274
|
[
"MIT"
] | null | null | null |
firebaseClient/firebaseClientGPIO.py
|
tabris2015/personCounter
|
0cd7f8698afefdd9e913a97820b9ff9c01752274
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import threading
import Queue
import serial
import time
from datetime import datetime
from firebase import firebase
import sqlite3
from datetime import datetime, timedelta
from gpiozero import Button, LED
#///////////////////////////////////////////
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
#/////////////////////////////////////////////////
missed_events = []
DB_INTERVAL = 180
##### pin definitions
FAULT = LED(5)
FALLA = False
IN1 = 13
OUT1 = 6
IN2 = 26
OUT2 = 19
in1_button = Button(IN1, pull_up=False)
out1_button = Button(OUT1, pull_up=False)
in2_button = Button(IN2, pull_up=False)
out2_button = Button(OUT2, pull_up=False)
eventQueue = Queue.Queue()
####
connected = False
def queue_get_all(q):
items = []
maxItemsToRetreive = 10000
for numOfItemsRetrieved in range(0, maxItemsToRetreive):
try:
if numOfItemsRetrieved == maxItemsToRetreive:
break
items.append(q.get_nowait())
except:
break
return items
def in1Event():
print("in1!")
event_dic = {}
event_dic["tipo_marcado"] = 1
event_dic["fecha"] = datetime.utcnow()
event_dic["id_sensor"] = 1
eventQueue.put(event_dic)
def out1Event():
print("out1!")
event_dic = {}
event_dic["tipo_marcado"] = 0
event_dic["fecha"] = datetime.utcnow()
event_dic["id_sensor"] = 1
eventQueue.put(event_dic)
def in2Event():
print("in2!")
event_dic = {}
event_dic["tipo_marcado"] = 1
event_dic["fecha"] = datetime.utcnow()
event_dic["id_sensor"] = 2
eventQueue.put(event_dic)
def out2Event():
print("out2!")
event_dic = {}
event_dic["tipo_marcado"] = 0
event_dic["fecha"] = datetime.utcnow()
event_dic["id_sensor"] = 2
eventQueue.put(event_dic)
def periodicDBInsert(key):
insert_SQL = '''INSERT INTO personEvent(fecha, tipo_marcado, id_sensor) VALUES(?, ?, ?)'''
db = sqlite3.connect('/home/pi/projects/personCounter/firebaseClient/local.db')
c = db.cursor()
global DB_INTERVAL
global FALLA
#///////////////////
global missed_events
try:
print("conectando a la DB...")
cred = credentials.Certificate(key)
firebase_admin.initialize_app(cred)
dbFs = firestore.client()
FAULT.off()
FALLA = False
except:
FAULT.on()
FALLA = True
return
# for sqlite
while True:
if eventQueue.empty() and not missed_events:
print("no hay eventos!")
else:
print("insertando eventos...")
# for event in events:
# pushToLocalDB(db, event)
# creando doc
events = []
if not eventQueue.empty():
print("eventos nuevos en cola: ", eventQueue.qsize())
events = queue_get_all(eventQueue)
eventQueue.task_done()
try:
print("eventos perdidos en cola: ", len(missed_events))
total_events = events + missed_events
print("accediendo a coleccion...")
doc_data = {
'marcados':total_events,
'id_evento': 1,
}
######
events_sqlite = []
for event in total_events:
events_sqlite.append(
(
event['fecha'],
event['tipo_marcado'],
event['id_sensor']
)
)
c.executemany(insert_SQL, events_sqlite)
print('ingresando datos a db local...')
db.commit()
######
print('ingresando datos a db remota...')
doc_ref = dbFs.collection(u'marcados_eventos').document(unicode(datetime.now()))
doc_ref.set(doc_data)
##################
events = []
missed_events = []
FAULT.off()
FALLA = False
print('actualizacion de db finalizada!')
except Exception:
print(Exception.message)
print('salvando datos...')
missed_events = events
FAULT.on()
FALLA = True
#c.executemany(insert_SQL, events2)
#db.commit()
#select_last_events(db)
events = []
time.sleep(DB_INTERVAL)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='contador de personas')
parser.add_argument('-key', required=True, action='store',help='path to key for remote connection')
args = parser.parse_args()
keyPath = ""
if args.key != None:
keyPath = args.key
#first_event = False
dbTh = threading.Thread(target=periodicDBInsert, args=(keyPath,))
#dbTh = threading.Timer(5, periodicDBInsert, args=(db,))
dbTh.daemon = True
# -----
dbTh.start()
###
#firebase = firebase.FirebaseApplication(URL, authentication=authentication)
in1_button.when_pressed = in1Event
out1_button.when_pressed = out1Event
in2_button.when_pressed = in2Event
out2_button.when_pressed = out2Event
while True:
if not FALLA:
FAULT.on()
time.sleep(0.1)
FAULT.off()
time.sleep(0.9)
else:
FAULT.on()
time.sleep(1)
FAULT.on()
FAULT.on()
| 26.784038
| 103
| 0.540053
| 573
| 5,705
| 5.207679
| 0.331588
| 0.053619
| 0.014745
| 0.021448
| 0.144102
| 0.128686
| 0.128686
| 0.128686
| 0.128686
| 0.128686
| 0
| 0.016566
| 0.333392
| 5,705
| 212
| 104
| 26.910377
| 0.768078
| 0.076775
| 0
| 0.322581
| 0
| 0
| 0.118618
| 0.010557
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03871
| false
| 0
| 0.083871
| 0
| 0.135484
| 0.096774
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|