hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c8205acb89329008fc256d7baa124e1eca07ffcd
| 1,521
|
py
|
Python
|
slybot/slybot/linkextractor/xml.py
|
coolkunal64/ht
|
b7c52d5604dd75ea4086a6ff92eaa2db85bb145c
|
[
"BSD-3-Clause"
] | 1
|
2017-11-03T13:00:21.000Z
|
2017-11-03T13:00:21.000Z
|
slybot/slybot/linkextractor/xml.py
|
coolkunal64/ht
|
b7c52d5604dd75ea4086a6ff92eaa2db85bb145c
|
[
"BSD-3-Clause"
] | 2
|
2021-03-31T20:04:55.000Z
|
2021-12-13T20:47:09.000Z
|
slybot/slybot/linkextractor/xml.py
|
coolkunal64/ht
|
b7c52d5604dd75ea4086a6ff92eaa2db85bb145c
|
[
"BSD-3-Clause"
] | 2
|
2017-11-03T13:00:23.000Z
|
2020-08-28T19:59:40.000Z
|
"""
Link extraction for auto scraping
"""
from scrapy.link import Link
from scrapy.selector import Selector
from slybot.linkextractor.base import BaseLinkExtractor
class XmlLinkExtractor(BaseLinkExtractor):
"""Link extractor for XML sources"""
def __init__(self, xpath, **kwargs):
self.remove_namespaces = kwargs.pop('remove_namespaces', False)
super(XmlLinkExtractor, self).__init__(**kwargs)
self.xpath = xpath
def _extract_links(self, response):
type = 'html'
if response.body_as_unicode().strip().startswith('<?xml version='):
type = 'xml'
xxs = Selector(response, type=type)
if self.remove_namespaces:
xxs.remove_namespaces()
for url in xxs.xpath(self.xpath).extract():
yield Link(url.encode(response.encoding))
class RssLinkExtractor(XmlLinkExtractor):
"""Link extraction from RSS feeds"""
def __init__(self, **kwargs):
super(RssLinkExtractor, self).__init__("//item/link/text()", **kwargs)
class SitemapLinkExtractor(XmlLinkExtractor):
"""Link extraction for sitemap.xml feeds"""
def __init__(self, **kwargs):
kwargs['remove_namespaces'] = True
super(SitemapLinkExtractor, self).__init__("//urlset/url/loc/text() | //sitemapindex/sitemap/loc/text()", **kwargs)
class AtomLinkExtractor(XmlLinkExtractor):
def __init__(self, **kwargs):
kwargs['remove_namespaces'] = True
super(AtomLinkExtractor, self).__init__("//link/@href", **kwargs)
| 37.097561
| 123
| 0.680473
| 162
| 1,521
| 6.12963
| 0.364198
| 0.096677
| 0.04431
| 0.05136
| 0.123867
| 0.096677
| 0.096677
| 0.096677
| 0.096677
| 0
| 0
| 0
| 0.187377
| 1,521
| 40
| 124
| 38.025
| 0.803398
| 0.087442
| 0
| 0.178571
| 0
| 0
| 0.117949
| 0.041026
| 0
| 0
| 0
| 0
| 0
| 1
| 0.178571
| false
| 0
| 0.107143
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c821ed2774a2669777a45f15bf9913ade184edde
| 1,319
|
py
|
Python
|
questions/construct-the-rectangle/Solution.py
|
marcus-aurelianus/leetcode-solutions
|
8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6
|
[
"MIT"
] | 141
|
2017-12-12T21:45:53.000Z
|
2022-03-25T07:03:39.000Z
|
questions/construct-the-rectangle/Solution.py
|
marcus-aurelianus/leetcode-solutions
|
8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6
|
[
"MIT"
] | 32
|
2015-10-05T14:09:52.000Z
|
2021-05-30T10:28:41.000Z
|
questions/construct-the-rectangle/Solution.py
|
marcus-aurelianus/leetcode-solutions
|
8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6
|
[
"MIT"
] | 56
|
2015-09-30T05:23:28.000Z
|
2022-03-08T07:57:11.000Z
|
"""
A web developer needs to know how to design a web page's size. So, given a specific rectangular web page’s area, your job by now is to design a rectangular web page, whose length L and width W satisfy the following requirements:
The area of the rectangular web page you designed must equal to the given target area.
The width W should not be larger than the length L, which means L >= W.
The difference between length L and width W should be as small as possible.
Return an array [L, W] where L and W are the length and width of the web page you designed in sequence.
Example 1:
Input: area = 4
Output: [2,2]
Explanation: The target area is 4, and all the possible ways to construct it are [1,4], [2,2], [4,1].
But according to requirement 2, [1,4] is illegal; according to requirement 3, [4,1] is not optimal compared to [2,2]. So the length L is 2, and the width W is 2.
Example 2:
Input: area = 37
Output: [37,1]
Example 3:
Input: area = 122122
Output: [427,286]
Constraints:
1 <= area <= 107
"""
class Solution(object):
def constructRectangle(self, area):
"""
:type area: int
:rtype: List[int]
"""
w = int(area ** 0.5)
while w >= 1:
l, r = divmod(area, w)
if r == 0:
return [l, w]
w -= 1
| 27.479167
| 228
| 0.644428
| 228
| 1,319
| 3.72807
| 0.421053
| 0.041176
| 0.063529
| 0.035294
| 0.037647
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050568
| 0.265353
| 1,319
| 48
| 229
| 27.479167
| 0.826625
| 0.791509
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c825a6df3c14933bdcbd115b36ca8c69f6c6f233
| 2,434
|
py
|
Python
|
limiter/rate_limiter.py
|
sousa-andre/requests-limiter
|
ad3a5982a40e88111eca63b258e1226e15a8befa
|
[
"MIT"
] | 4
|
2020-11-14T18:13:27.000Z
|
2021-01-03T19:13:39.000Z
|
limiter/rate_limiter.py
|
sousa-andre/requests-limiter
|
ad3a5982a40e88111eca63b258e1226e15a8befa
|
[
"MIT"
] | null | null | null |
limiter/rate_limiter.py
|
sousa-andre/requests-limiter
|
ad3a5982a40e88111eca63b258e1226e15a8befa
|
[
"MIT"
] | 2
|
2021-01-03T19:13:46.000Z
|
2021-01-31T12:24:23.000Z
|
from functools import wraps
from time import sleep
from typing import List
from .rate_limit import RateLimit
from .exceptions import RateLimitHit
class OnHitAction:
raise_exception = 0
wait = 1
class RateLimiter:
def __init__(self, storage=RateLimit, *, action=OnHitAction.raise_exception):
self._limits = []
self._storage = storage
self.action = action
def _create_single_limiter(self, name, callback, defaults=None):
if defaults is None:
defaults = [(), (), ()]
self._limits.append(self._storage(name, callback, defaults))
def create_limiter(self, names, callback, defaults=None):
if isinstance(names, list):
for name in names:
self._create_single_limiter(name, callback, defaults)
elif isinstance(names, str):
self._create_single_limiter(names, callback, defaults)
else:
raise ValueError("names parameter must be either a string or a iterable")
@staticmethod
def can_request(limits):
for limit in limits:
print(limit, limit.can_request())
if not limit.can_request():
return [False, limit]
return [True, None]
@staticmethod
def is_initialized(limits):
for limit in limits:
if not limit.is_initialized():
return False
return True
@staticmethod
def register_request(limits, rt):
for limit in limits:
limit.register_request(rt)
def use(self, *limits_names):
def request_wrapper(func):
limits: List[RateLimit] = [limit for limit in self._limits if limit.name in limits_names]
@wraps(func)
def func_wrapper():
rl = RateLimiter.can_request(limits)
if rl[0]:
ret = func()
RateLimiter.register_request(limits, ret)
return ret
else:
if self.action == OnHitAction.raise_exception:
raise RateLimitHit(rl[1])
elif self.action == OnHitAction.wait:
sleep(rl[1].time_until_new_request_is_possible)
ret = func()
RateLimiter.register_request(limits, ret)
return ret
return func_wrapper
return request_wrapper
| 32.453333
| 101
| 0.582991
| 259
| 2,434
| 5.30888
| 0.277992
| 0.058182
| 0.029091
| 0.034909
| 0.106182
| 0.074182
| 0.074182
| 0.074182
| 0.074182
| 0
| 0
| 0.003117
| 0.341002
| 2,434
| 74
| 102
| 32.891892
| 0.854115
| 0
| 0
| 0.225806
| 0
| 0
| 0.021775
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.145161
| false
| 0
| 0.080645
| 0
| 0.419355
| 0.016129
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c82642bd0188daaa561a06de4c6541a12f22393f
| 2,081
|
py
|
Python
|
pymod/amsexceptions.py
|
kevangel79/argo-ams-library
|
6824b1f6f577e688575d8f2f67f747126a856fcb
|
[
"Apache-2.0"
] | null | null | null |
pymod/amsexceptions.py
|
kevangel79/argo-ams-library
|
6824b1f6f577e688575d8f2f67f747126a856fcb
|
[
"Apache-2.0"
] | 1
|
2021-06-25T15:35:46.000Z
|
2021-06-25T15:35:46.000Z
|
pymod/amsexceptions.py
|
kevangel79/argo-ams-library
|
6824b1f6f577e688575d8f2f67f747126a856fcb
|
[
"Apache-2.0"
] | null | null | null |
import json
class AmsException(Exception):
"""Base exception class for all Argo Messaging service related errors"""
def __init__(self, *args, **kwargs):
super(AmsException, self).__init__(*args, **kwargs)
class AmsServiceException(AmsException):
"""Exception for Argo Messaging Service API errors"""
def __init__(self, json, request):
errord = dict()
self.msg = "While trying the [{0}]: {1}".format(request, json['error']['message'])
errord.update(error=self.msg)
if json['error'].get('code'):
self.code = json['error']['code']
errord.update(status_code=self.code)
if json['error'].get('status'):
self.status = json['error']['status']
errord.update(status=self.status)
super(AmsServiceException, self).__init__(errord)
class AmsBalancerException(AmsServiceException):
"""Exception for load balancer Argo Messaging Service errors"""
def __init__(self, json, request):
super(AmsBalancerException, self).__init__(json, request)
class AmsTimeoutException(AmsServiceException):
"""Exception for timeouts errors
Timeouts can be generated by the Argo Messaging Service if message was
not acknownledged in desired time frame (ackDeadlineSeconds). Also, 408
timeouts can come from load balancer for partial requests that were not
completed in required time frame.
"""
def __init__(self, json, request):
super(AmsTimeoutException, self).__init__(json, request)
class AmsConnectionException(AmsException):
"""Exception for connection related problems catched from requests library"""
def __init__(self, exp, request):
self.msg = "While trying the [{0}]: {1}".format(request, repr(exp))
super(AmsConnectionException, self).__init__(self.msg)
class AmsMessageException(AmsException):
"""Exception that indicate problems with constructing message"""
def __init__(self, msg):
self.msg = msg
super(AmsMessageException, self).__init__(self.msg)
| 33.031746
| 90
| 0.683325
| 231
| 2,081
| 5.943723
| 0.329004
| 0.046613
| 0.04807
| 0.037145
| 0.151493
| 0.116533
| 0.05244
| 0.05244
| 0.05244
| 0
| 0
| 0.004225
| 0.203748
| 2,081
| 62
| 91
| 33.564516
| 0.824381
| 0.280154
| 0
| 0.1
| 0
| 0
| 0.073919
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.033333
| 0
| 0.433333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8284b2ce3b5bfcda541a3e925afc518ce46735a
| 18,871
|
py
|
Python
|
tests/fixtures/__init__.py
|
Lunga001/pmg-cms-2
|
10cea3979711716817b0ba2a41987df73f2c7642
|
[
"Apache-2.0"
] | 2
|
2019-06-11T20:46:43.000Z
|
2020-08-27T22:50:32.000Z
|
tests/fixtures/__init__.py
|
Lunga001/pmg-cms-2
|
10cea3979711716817b0ba2a41987df73f2c7642
|
[
"Apache-2.0"
] | 70
|
2017-05-26T14:04:06.000Z
|
2021-06-30T10:21:58.000Z
|
tests/fixtures/__init__.py
|
OpenUpSA/pmg-cms-2
|
ec5f259dae81674ac7a8cdb80f124a8b0f167780
|
[
"Apache-2.0"
] | 4
|
2017-08-29T10:09:30.000Z
|
2021-05-25T11:29:03.000Z
|
import pytz
import datetime
from fixture import DataSet, NamedDataStyle, SQLAlchemyFixture
from pmg.models import (
db,
House,
Committee,
CommitteeMeeting,
Bill,
BillType,
Province,
Party,
CommitteeMeetingAttendance,
Member,
CallForComment,
TabledCommitteeReport,
CommitteeQuestion,
Minister,
Event,
Featured,
Page,
BillStatus,
Post,
User,
Role,
Membership,
MembershipType,
EmailTemplate,
DailySchedule,
Organisation,
)
THIS_YEAR = datetime.datetime.today().year
class HouseData(DataSet):
class joint:
id = 1
name = "Joint (NA + NCOP)"
name_short = "Joint"
sphere = "national"
class ncop:
id = 2
name = "National Council of Provinces"
name_short = "NCOP"
sphere = "national"
class na:
id = 3
name = "National Assembly"
name_short = "NA"
sphere = "national"
class president:
id = 4
name = ("The President's Office",)
name_short = "President"
sphere = "national"
class western_cape:
id = 5
name = "Western Cape"
name_short = "western_cape"
sphere = "provincial"
class MinisterData(DataSet):
class minister_of_arts:
id = 1
name = "Minister of Sports, Arts and Culture"
class minister_of_transport:
id = 2
name = "Minister of Transport "
class president:
id = 3
name = "President"
class minister_in_presidency_for_women:
id = 4
name = (
"Minister in The Presidency for Women, Youth and Persons with Disabilities"
)
class minister_of_public_works:
id = 5
name = "Minister of Public Works and Infrastructure"
class CommitteeData(DataSet):
class communications:
name = "Communications"
house = HouseData.na
premium = True
class arts:
name = "Arts and Culture"
house = HouseData.na
minister = MinisterData.minister_of_arts
class constitutional_review:
name = "Constitutional Review Committee"
house = HouseData.joint
active = False
class western_cape_budget:
name = "Budget (WCPP)"
house = HouseData.western_cape
active = False
class CommitteeMeetingData(DataSet):
class arts_meeting_one:
date = datetime.datetime(2019, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
title = "Public meeting One"
committee = CommitteeData.arts
class arts_meeting_two:
date = datetime.datetime(2019, 8, 1, 0, 0, 0, tzinfo=pytz.utc)
title = "Public meeting Two"
committee = CommitteeData.arts
featured = True
class arts_future_meeting_one:
date = datetime.datetime(2020, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
title = "Public meeting 2020 one"
committee = CommitteeData.arts
class arts_future_meeting_two:
date = datetime.datetime(2020, 5, 1, 0, 0, 0, tzinfo=pytz.utc)
title = "Public meeting 2020 two"
committee = CommitteeData.arts
class premium_recent:
date = datetime.datetime(
THIS_YEAR, 11, 5, 0, 0, 0, tzinfo=pytz.FixedOffset(120)
)
title = "Premium meeting recent"
committee = CommitteeData.communications
class premium_old:
date = datetime.datetime(THIS_YEAR - 2, 11, 5, 0, 0, 0, tzinfo=pytz.utc)
title = "Premium meeting old"
committee = CommitteeData.communications
class BillTypeData(DataSet):
class section_74:
name = "Section 74"
prefix = "B"
description = "Section 74"
class section_75:
name = "Section 75"
prefix = "B"
description = "Ordinary Bills not affecting the provinces"
class section_77:
name = "Section 77"
prefix = "B"
description = "Section 77"
class private_member_bill_74:
name = "Private Member Bill: S74"
prefix = "PMB"
description = "Private Member Bill: Section 74"
class private_member_bill_77:
name = "Private Member Bill: S77"
prefix = "PMB"
description = "Private Member Bill: Section 77"
class draft:
name = "Draft"
prefix = "D"
description = "Draft bill"
class BillStatusData(DataSet):
class current:
name = "na"
description = "current"
class assent:
name = "assent"
description = "assent"
class president:
name = "president"
description = "president"
class BillData(DataSet):
"""
Enter various types of bills
"""
class food:
year = 2019
title = "Food and Health Bill"
type = BillTypeData.section_74
introduced_by = "Minister of Finance"
date_of_introduction = datetime.datetime(2019, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
status = BillStatusData.current
class farm:
year = 2019
title = "Farm and Agricultural Bill"
type = BillTypeData.section_77
status = BillStatusData.president
class public:
year = 2019
title = "Public Investment Corporation Amendment Bill"
type = BillTypeData.private_member_bill_74
status = BillStatusData.assent
class child:
year = 2019
title = "Children's Amendment Bill"
type = BillTypeData.private_member_bill_77
class bill_with_none_number:
year = 2019
number = None
title = "Bill with None number"
type = BillTypeData.section_75
class sport:
year = 2019
number = 1
title = "2010 FIFA World Cup South Africa Special Measures Bill"
type = BillTypeData.section_75
class draft:
year = 2019
title = "Test Draft Bill"
type = BillTypeData.draft
class identical_date_events:
year = 2019
title = "Bill with multiple events"
type = BillTypeData.section_74
introduced_by = "Minister of sorting"
date_of_introduction = datetime.datetime(2019, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
status = BillStatusData.current
class CallForCommentData(DataSet):
class arts_call_for_comment_one:
date = datetime.datetime(2019, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
title = "Feeds and Pet Food Bill - draft"
committee = CommitteeData.arts
start_date = datetime.datetime(2019, 1, 30, 0, 0, 0, tzinfo=pytz.utc)
end_date = datetime.datetime(2019, 4, 30, 0, 0, 0, tzinfo=pytz.utc)
body = "The Bill seeks to provide for: - regulation of feed and pet food, - regulation of feed ingredients used in the manufacturing of feed and pet food,"
summary = "The Department of Agriculture, Forestry and Fisheries has published the draft Feeds and Pet Food Bill, and is asking you to comment."
class communications_call_for_comment_one:
date = datetime.datetime(2020, 2, 14, 0, 0, 0, tzinfo=pytz.utc)
title = "Public Procurement Bill"
committee = CommitteeData.communications
start_date = datetime.datetime(2020, 1, 30, 0, 0, 0, tzinfo=pytz.utc)
body = "The draft Bill aims to create a single regulatory framework for public procurement"
class TabledCommitteeReportData(DataSet):
class arts_tabled_committee_report_one:
title = "ATC190710: Report of the Portfolio Committee on Agriculture, Land Reform and Rural Development on the 2019/20 Annual Performance Plan and the Budget of the Department of Agriculture, Forestry and Fisheries (Vote 24) and its Entities, dated 10 July 2019."
start_date = datetime.datetime(2019, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
committee = CommitteeData.arts
end_date = datetime.datetime(2019, 4, 30, 0, 0, 0, tzinfo=pytz.utc)
body = "The Portfolio Committee on Agriculture, Land Reform and Rural Development (hereinafter referred to as the Committee) examined Budget Vote 24: Agriculture, Forestry and Fisheries including the Annual Performance Plan of the Department of Agriculture, Forestry and Fisheries (hereinafter referred to as DAFF or the Department) for the 2019/20 financial year and budget projections for the Medium Term Expenditure Framework (MTEF) period ending in 2021/22."
class PartyData(DataSet):
class da:
name = "Democratic Alliance (DA)"
class anc:
name = "African National Congress (ANC)"
class ProvinceData(DataSet):
class western_cape:
name = "Western Cape"
class gauteng:
name = "Gauteng"
class MemberData(DataSet):
class veronica:
name = "Ms Veronica Van Dyk"
profile_pic_url = "https://www.pa.org.za/media_root/cache/02/93/0293cce7701daf86fa88fe02e1db9c58.jpg"
bio = "Ms Veronica van Dyk is the Deputy Shadow Minister for Communications in the DA, since June 2014. She is a former Ward Councillor of the Nama Khoi Local Municipality."
house = HouseData.na
party = PartyData.da
province = ProvinceData.western_cape
start_date = datetime.datetime(2019, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
pa_link = "http://www.pa.org.za"
current = True
class not_current_member:
name = "Phoebe Noxolo Abraham"
house = HouseData.na
party = PartyData.anc
start_date = datetime.datetime(2019, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
current = False
class laetitia:
name = "Laetitia Heloise Arries"
house = HouseData.joint
party = PartyData.anc
start_date = datetime.datetime(2019, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
current = True
class CommitteeMeetingAttendanceData(DataSet):
class arts_meeting_attendance_one:
date = datetime.datetime(2019, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
attendance = "P"
meeting = CommitteeMeetingData.arts_meeting_two
member = MemberData.laetitia
class arts_meeting_attendance_two:
date = datetime.datetime(2019, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
attendance = "A"
meeting = CommitteeMeetingData.arts_meeting_two
member = MemberData.veronica
class arts_future_meeting_attendance_one:
date = datetime.datetime(2019, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
attendance = "P"
meeting = CommitteeMeetingData.arts_future_meeting_one
member = MemberData.laetitia
class arts_future_meeting_attendance_two:
date = datetime.datetime(2019, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
attendance = "A"
meeting = CommitteeMeetingData.arts_future_meeting_two
member = MemberData.veronica
class CommitteeQuestionData(DataSet):
class arts_committee_question_one:
minister = MinisterData.minister_of_arts
code = "NA1"
question_number = 1
house = HouseData.na
written_number = 1
oral_number = 1
answer_type = "oral"
date = datetime.datetime(2019, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
year = 2019
question = "What programmes that promote the languages, culture and heritage of the Khoi and San has the Government implemented in each province in each of the past five years"
answer = "Through possible funding and strategic partnerships between PanSALB and my Department, PanSALB was able to initiate and support the following programmes."
question_to_name = "Minister of Sports, Arts and Culture"
intro = "Van Dyk, Ms V to ask the Minister of Sports, Arts and Culture:"
asked_by_name = "Van Dyk, Ms V"
asked_by_member = MemberData.veronica
class arts_committee_question_two:
minister = MinisterData.minister_of_arts
code = "NA1"
question_number = 2
house = HouseData.na
written_number = 2
oral_number = 2
answer_type = "oral"
date = datetime.datetime(2018, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
year = 2018
question = "What has he found were the reasons for not reporting on the 2018-19 Fourth Quarter expenditure?"
answer = "During the Fourth Quarter of the 2018-19 financial year there were no expenditure incurred on the development of the Rail Safety Bill and therefore there was no reporting."
question_to_name = "Minister of Sports, Arts and Culture"
intro = "Van Dyk, Ms V to ask the Minister of Sports, Arts and Culture:"
asked_by_name = "Van Dyk, Ms V"
asked_by_member = MemberData.veronica
class EventData(DataSet):
class arts_bill_event_one:
date = datetime.datetime(2019, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
title = "2010 FIFA World Cup South Africa Special Measures Bill [B13-2006]: Department briefing"
type = "committee-meeting"
committee = CommitteeData.arts
house = HouseData.na
bills = [BillData.public, BillData.food]
class food_bill_hansard_event:
date = datetime.datetime(2020, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
title = "Hansard event"
type = "plenary"
house = HouseData.na
bills = [BillData.food]
class identical_date_bill_event1:
date = datetime.datetime(2020, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
title = "Hansard event 2"
type = "bill-signed"
house = HouseData.na
bills = [BillData.identical_date_events]
class identical_date_bill_event2:
date = datetime.datetime(2020, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
title = "Hansard event 2"
type = "bill-introduced"
house = HouseData.na
bills = [BillData.identical_date_events]
class FeaturedData(DataSet):
class the_week_ahead:
title = "The Week Ahead: End of the First Term"
link = "https://pmg.org.za/blog/The%20Week%20Ahead:%20End%20of%20the%20First%20Term"
start_date = datetime.datetime(2019, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
class current_bills:
title = "Current Bills"
start_date = datetime.datetime(2019, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
link = "https://pmg.org.za/bills/current/"
class PageData(DataSet):
class section_25_review_process:
title = "Section 25 review process"
slug = "Section25reviewprocess"
body = "In February 2018, the National Assembly adopted a motion proposed by the EFF, with amendments by the ANC that Parliament's Constitutional Review Committee investigates mechanisms through which land can be expropriated without compensation."
date = datetime.datetime(2019, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
featured = True
class un_featured_page:
title = "Unfeatured page"
slug = "unfeaturedpage"
date = datetime.datetime(2019, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
featured = False
class PostData(DataSet):
class the_week_ahead:
title = "The Week Ahead: End of the First Term"
slug = "theweekahead"
featured = True
body = "A lot was packed into the first term of the Sixth Parliament."
date = datetime.datetime(2019, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
class first_term_review:
title = "First Term Review: Sixth Parliament"
slug = "FirstTermReview"
featured = True
body = "Parliaments first term ended last week. According to the programme, the term was 11 weeks but the main thrust of the work was compressed into the final 5 weeks of the quarter."
date = datetime.datetime(2019, 2, 17, 0, 0, 0, tzinfo=pytz.utc)
class brief_explainer:
title = "BRIEF EXPLAINER: LAPSED BILLS IN PARLIAMENT"
slug = "BriefExplainer"
featured = True
body = "There were 39 unfinished bills when the Fifth Parliament ended."
date = datetime.datetime(2019, 2, 17, 12, 0, 0, tzinfo=pytz.utc)
class government_priorities:
title = "Government's legislative priorities"
slug = "GovernmentPriorities"
featured = True
body = "The Constitution of South Africa empowers the Executive to prepare and initiate legislation. Similarly, Parliament (through its committees) and individual MPs also have initiating power but the vast majority of legislation (92%) is introduced by the Executive."
date = datetime.datetime(2018, 8, 17, 0, 0, 0, tzinfo=pytz.utc)
class RoleData(DataSet):
class admin:
name = "user-admin"
description = "user-admin"
class editor:
name = "editor"
description = "editor"
class UserData(DataSet):
class admin:
email = "admin@pmg.org.za"
name = "Admin User"
active = True
roles = [RoleData.admin, RoleData.editor]
current_login_at = datetime.datetime.utcnow()
confirmed = True
confirmed_at = datetime.datetime.utcnow()
committee_alerts = [CommitteeData.arts]
class editor:
email = "editor@pmg.org.za"
name = "Editor User"
active = True
roles = [RoleData.editor]
current_login_at = datetime.datetime.utcnow()
confirmed = True
confirmed_at = datetime.datetime.utcnow()
committee_alerts = [CommitteeData.arts]
class inactive:
email = "inactive@pmg.org.za"
name = "Inactive User"
active = False
roles = [RoleData.editor]
current_login_at = datetime.datetime.utcnow()
confirmed = True
confirmed_at = datetime.datetime.utcnow()
committee_alerts = [CommitteeData.arts]
class OrganisationData(DataSet):
class pmg:
name = "PMG"
domain = "PMG Domain"
paid_subscriber = True
expiry = datetime.datetime.utcnow() + datetime.timedelta(days=365)
contact = "pmg@pmg.com"
subscriptions = [CommitteeData.arts]
users = [UserData.admin]
class MembershipTypeData(DataSet):
class member:
name = "Member"
class MembershipData(DataSet):
class arts_membership_one:
type = MembershipTypeData.member
committee = CommitteeData.arts
member = MemberData.veronica
class EmailTemplateData(DataSet):
class template_one:
name = "Template One"
description = "Template One Description"
subject = "Template One Subject"
body = "Template One Body"
class DailyScheduleData(DataSet):
class schedule_provincial:
title = "Schedule provincial"
start_date = datetime.datetime(2019, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
body = "Schedule provincial"
house = HouseData.western_cape
class schedule_ncop:
title = "Schedule NCOP"
start_date = datetime.datetime(2019, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
body = "Schedule NCOP body"
house = HouseData.ncop
dbfixture = SQLAlchemyFixture(
env=globals(), style=NamedDataStyle(), engine=db.engine, scoped_session=db.Session
)
| 33.578292
| 470
| 0.647237
| 2,272
| 18,871
| 5.285211
| 0.209507
| 0.012492
| 0.025316
| 0.037975
| 0.397235
| 0.342688
| 0.31962
| 0.286059
| 0.262492
| 0.240257
| 0
| 0.042352
| 0.271793
| 18,871
| 561
| 471
| 33.638146
| 0.831466
| 0.001484
| 0
| 0.358407
| 0
| 0.028761
| 0.273118
| 0.001169
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.00885
| 0
| 0.232301
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c828bd04e92dcf2b104e584217bad8d4f09ebabf
| 455
|
py
|
Python
|
Google Search/GoogleSearch.py
|
cclauss/Browser-Automation
|
7baca74d40ac850f9570d7e40a47021dc0e8e387
|
[
"Apache-2.0"
] | 35
|
2016-07-16T07:05:24.000Z
|
2021-07-07T15:18:55.000Z
|
Google Search/GoogleSearch.py
|
cclauss/Browser-Automation
|
7baca74d40ac850f9570d7e40a47021dc0e8e387
|
[
"Apache-2.0"
] | null | null | null |
Google Search/GoogleSearch.py
|
cclauss/Browser-Automation
|
7baca74d40ac850f9570d7e40a47021dc0e8e387
|
[
"Apache-2.0"
] | 7
|
2016-07-27T10:25:10.000Z
|
2019-12-06T08:45:03.000Z
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
driver = webdriver.Chrome("D:\chromedriver\chromedriver")
driver.get("http://www.google.com")
if not "Google" in driver.title:
raise Exception("Unable to load google page!")
elem = driver.find_element_by_name("q")
elem.send_keys("selenium")
elem.submit()
print (driver.title)
driver.quit()
| 28.4375
| 78
| 0.789011
| 62
| 455
| 5.709677
| 0.612903
| 0.101695
| 0.118644
| 0.152542
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092308
| 455
| 15
| 79
| 30.333333
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0.061538
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c82a3f71eb898781a7532e4f8e200f17688bdd99
| 2,265
|
py
|
Python
|
PuppeteerLibrary/puppeteer/async_keywords/puppeteer_formelement.py
|
qahive/robotframework-puppeteer
|
6377156c2e5b3a4d3841c33a2d3ff9ab0b38854a
|
[
"Apache-2.0"
] | 37
|
2019-10-28T01:35:43.000Z
|
2022-03-31T04:11:49.000Z
|
PuppeteerLibrary/puppeteer/async_keywords/puppeteer_formelement.py
|
qahive/robotframework-puppeteer
|
6377156c2e5b3a4d3841c33a2d3ff9ab0b38854a
|
[
"Apache-2.0"
] | 61
|
2020-07-16T00:18:22.000Z
|
2022-03-24T07:12:05.000Z
|
PuppeteerLibrary/puppeteer/async_keywords/puppeteer_formelement.py
|
qahive/robotframework-puppeteer
|
6377156c2e5b3a4d3841c33a2d3ff9ab0b38854a
|
[
"Apache-2.0"
] | 10
|
2020-03-03T05:28:05.000Z
|
2022-02-14T10:03:44.000Z
|
from PuppeteerLibrary.utils.coverter import str2bool, str2str
import os
import glob
import shutil
import time
from PuppeteerLibrary.ikeywords.iformelement_async import iFormElementAsync
class PuppeteerFormElement(iFormElementAsync):
def __init__(self, library_ctx):
super().__init__(library_ctx)
async def input_text(self, locator: str, text: str, clear=True):
text = str2str(text)
clear = str2bool(clear)
if clear:
await self._clear_input_text(locator)
await self.library_ctx.get_current_page().type_with_selenium_locator(locator, text)
async def input_password(self, locator: str, text: str, clear=True):
text = str2str(text)
clear = str2bool(clear)
await self.input_text(locator, text, clear)
async def clear_element_text(self, locator: str):
await self._clear_input_text(locator)
async def download_file(self, locator: str, timeout=None):
path = os.getcwd()+''+os.sep+'tmp-download'
try:
shutil.rmtree(path)
except:
self.info('Cannot cleanup the tmp download folder.')
page = self.library_ctx.get_current_page().get_page()
await page._client.send('Page.setDownloadBehavior', {
'behavior': 'allow',
'downloadPath': path
})
await self.library_ctx.get_current_page().click_with_selenium_locator(locator)
timeout = self.timestr_to_secs_for_default_timeout(timeout)
max_time = time.time() + timeout
file = None
while time.time() < max_time:
time.sleep(1)
files = glob.glob(path+''+os.sep+'*')
if len(files) == 1:
file = files[0]
break
return file
async def upload_file(self, locator: str, file_path: str):
element = await self.library_ctx.get_current_page().querySelector_with_selenium_locator(locator)
return await element.uploadFile(file_path)
async def _clear_input_text(self, selenium_locator):
await self.library_ctx.get_current_page().click_with_selenium_locator(selenium_locator, {'clickCount': 3})
await self.library_ctx.get_current_page().get_page().keyboard.press('Backspace')
| 38.389831
| 114
| 0.666225
| 274
| 2,265
| 5.248175
| 0.306569
| 0.055633
| 0.06815
| 0.070932
| 0.311544
| 0.311544
| 0.274687
| 0.248261
| 0.166898
| 0.166898
| 0
| 0.00577
| 0.234879
| 2,265
| 58
| 115
| 39.051724
| 0.824005
| 0
| 0
| 0.122449
| 0
| 0
| 0.05298
| 0.010596
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020408
| false
| 0.020408
| 0.122449
| 0
| 0.204082
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c82ab1a64645a1b9f4d0449b2c09332ab3971afe
| 7,187
|
py
|
Python
|
cnns/nnlib/pytorch_architecture/resnet1d.py
|
anonymous-user-commits/perturb-net
|
66fc7c4a1234fa34b92bcc85751f0a6e23d80a23
|
[
"MIT"
] | 1
|
2018-03-25T13:19:46.000Z
|
2018-03-25T13:19:46.000Z
|
cnns/nnlib/pytorch_architecture/resnet1d.py
|
anonymous-user-commits/perturb-net
|
66fc7c4a1234fa34b92bcc85751f0a6e23d80a23
|
[
"MIT"
] | null | null | null |
cnns/nnlib/pytorch_architecture/resnet1d.py
|
anonymous-user-commits/perturb-net
|
66fc7c4a1234fa34b92bcc85751f0a6e23d80a23
|
[
"MIT"
] | null | null | null |
import shutil, os, csv, itertools, glob
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
import torch.optim as optim
from sklearn.metrics import confusion_matrix
import pandas as pd
import pickle as pk
cuda = torch.cuda.is_available()
print("is conv1D_cuda available: ", cuda)
# Utils
def load_pickle(filename):
try:
p = open(filename, 'r')
except IOError:
print("Pickle file cannot be opened.")
return None
try:
picklelicious = pk.load(p)
except ValueError:
print('load_pickle failed once, trying again')
p.close()
p = open(filename, 'r')
picklelicious = pk.load(p)
p.close()
return picklelicious
def save_pickle(data_object, filename):
pickle_file = open(filename, 'w')
pk.dump(data_object, pickle_file)
pickle_file.close()
def read_data(filename):
print("Loading Data...")
df = pd.read_csv(filename, header=None)
data = df.values
return data
def read_line(csvfile, line):
with open(csvfile, 'r') as f:
data = next(itertools.islice(csv.reader(f), line, None))
return data
## 1D Variant of ResNet taking in 200 dimensional fixed time series inputs
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv1d(inplanes, planes, kernel_size=3, padding=1,
stride=stride, bias=False)
self.bn1 = nn.BatchNorm1d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv1d(planes, planes, kernel_size=3, padding=1,
stride=stride, bias=False)
self.bn2 = nn.BatchNorm1d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
# print('out', out.size(), 'res', residual.size(), self.downsample)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv1d(inplanes, planes, kernel_size=1, padding=1,
stride=stride, bias=False)
self.bn1 = nn.BatchNorm1d(planes)
self.conv2 = nn.Conv1d(planes, planes, kernel_size=1, padding=1,
stride=stride, bias=False)
self.bn2 = nn.BatchNorm1d(planes)
self.conv3 = nn.Conv1d(planes, planes * 4, kernel_size=1, padding=1,
stride=stride, bias=False)
self.bn3 = nn.BatchNorm1d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes, arch):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv1d(1, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm1d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1]) # , stride=2)
self.layer3 = self._make_layer(block, 256, layers[2]) # , stride=2)
self.layer4 = self._make_layer(block, 512, layers[3]) # , stride=2)
self.avgpool = nn.AvgPool1d(7, stride=1)
self.fc = nn.Linear(22528, num_classes) # 512 * block.expansion
self.arch = arch
for m in self.modules():
if isinstance(m, nn.Conv1d):
n = m.kernel_size[0] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv1d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm1d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
# print(x.size())
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Arguments:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], arch='resnet18', **kwargs)
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Arguments:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], arch='resnet34', **kwargs)
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Arguments:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], arch='resnet50', **kwargs)
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Arguments:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], arch='resnet101', **kwargs)
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Arguments:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], arch='resnet152', **kwargs)
return model
| 28.863454
| 76
| 0.594128
| 918
| 7,187
| 4.582789
| 0.208061
| 0.024958
| 0.021393
| 0.024245
| 0.47112
| 0.439743
| 0.365106
| 0.342287
| 0.332303
| 0.313763
| 0
| 0.03506
| 0.285655
| 7,187
| 248
| 77
| 28.979839
| 0.784379
| 0.109225
| 0
| 0.361963
| 0
| 0
| 0.024278
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09816
| false
| 0
| 0.07362
| 0
| 0.282209
| 0.02454
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c82fbb8e27137ecf71edbf4cda57e644ec71cfa9
| 1,398
|
py
|
Python
|
other_models/AAN/adaptive-aggregation-networks/dataloaders/cifar100_dirmap.py
|
kreimanlab/AugMem
|
cb0e8d39eb0c469da46c7c550c19229927a2bec5
|
[
"MIT"
] | 6
|
2021-04-07T15:17:24.000Z
|
2021-07-07T04:37:29.000Z
|
other_models/Remind/image_classification_experiments/dataloaders/cifar100_dirmap.py
|
kreimanlab/AugMem
|
cb0e8d39eb0c469da46c7c550c19229927a2bec5
|
[
"MIT"
] | null | null | null |
other_models/Remind/image_classification_experiments/dataloaders/cifar100_dirmap.py
|
kreimanlab/AugMem
|
cb0e8d39eb0c469da46c7c550c19229927a2bec5
|
[
"MIT"
] | null | null | null |
import os
import sys
import pandas as pd
# USAGE: python cifar100_dirmap.py <path to cifar100 dataset directory>
# Organized cifar100 directory can be created using cifar2png: https://github.com/knjcode/cifar2png
if len(sys.argv) > 1:
DATA_DIR = sys.argv[1]
else:
DATA_DIR = "./../data/cifar100"
# Get class names
class_names = [
file for file in os.listdir(os.path.join(DATA_DIR, "train"))
if os.path.isdir(os.path.join(DATA_DIR, "train", file))
]
class_names.sort()
class_dicts = [{"class": class_names[i], "label": i} for i in range(len(class_names))]
pd.DataFrame(class_dicts).to_csv("cifar100_classes.csv", index=False)
image_list = []
for train_test_idx, train_test in enumerate(["train", "test"]):
for img_class in class_names:
img_files = [f for f in os.listdir(os.path.join(DATA_DIR, train_test, img_class)) if f.endswith(".png")]
for fname in img_files:
image_list.append({
"class": img_class,
"object": 0,
"session": train_test_idx,
"im_path": os.path.join(train_test, img_class, fname),
})
img_df = pd.DataFrame(image_list)
img_df = img_df.sort_values(by=["class", "object", "session", "im_path"], ignore_index=True)
img_df["im_num"] = img_df.groupby(["class", "object", "session"]).cumcount() + 1
img_df.to_csv("cifar100_dirmap.csv")
print(img_df.head())
| 34.95
| 112
| 0.666667
| 213
| 1,398
| 4.164319
| 0.366197
| 0.039459
| 0.045096
| 0.047351
| 0.099211
| 0.099211
| 0.074408
| 0.074408
| 0.074408
| 0
| 0
| 0.021053
| 0.184549
| 1,398
| 39
| 113
| 35.846154
| 0.757018
| 0.130901
| 0
| 0
| 0
| 0
| 0.135425
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c82fd0f2d54b784533c3c8e4ad5838457eb0383a
| 5,616
|
py
|
Python
|
tests/integration_tests/data_steward/cdr_cleaner/cleaning_rules/remove_participant_data_past_deactivation_date_test.py
|
lrwb-aou/curation
|
e80447e56d269dc2c9c8bc79e78218d4b0dc504c
|
[
"MIT"
] | 16
|
2017-06-30T20:05:05.000Z
|
2022-03-08T21:03:19.000Z
|
tests/integration_tests/data_steward/cdr_cleaner/cleaning_rules/remove_participant_data_past_deactivation_date_test.py
|
lrwb-aou/curation
|
e80447e56d269dc2c9c8bc79e78218d4b0dc504c
|
[
"MIT"
] | 342
|
2017-06-23T21:37:40.000Z
|
2022-03-30T16:44:16.000Z
|
tests/integration_tests/data_steward/cdr_cleaner/cleaning_rules/remove_participant_data_past_deactivation_date_test.py
|
lrwb-aou/curation
|
e80447e56d269dc2c9c8bc79e78218d4b0dc504c
|
[
"MIT"
] | 33
|
2017-07-01T00:12:20.000Z
|
2022-01-26T18:06:53.000Z
|
"""
Ensures there is no data past the deactivation date for deactivated participants.
Original Issue: DC-686
The intent is to sandbox and drop records dated after the date of deactivation for participants
who have deactivated from the Program
This test will mock calling the PS API and provide a returned value. Everything
within the bounds of our team will be tested.
"""
# Python imports
import mock
import os
# Third party imports
import pandas as pd
# Project imports
from app_identity import PROJECT_ID
from common import OBSERVATION
from cdr_cleaner.cleaning_rules.remove_participant_data_past_deactivation_date import (
RemoveParticipantDataPastDeactivationDate)
from constants.retraction.retract_deactivated_pids import DEACTIVATED_PARTICIPANTS
from tests.integration_tests.data_steward.cdr_cleaner.cleaning_rules.bigquery_tests_base import BaseTest
class RemoveParticipantDataPastDeactivationDateTest(
BaseTest.CleaningRulesTestBase):
@classmethod
def setUpClass(cls):
print('**************************************************************')
print(cls.__name__)
print('**************************************************************')
super().initialize_class_vars()
# set the test project identifier
project_id = os.environ.get(PROJECT_ID)
cls.project_id = project_id
# set the expected test datasets
dataset_id = os.environ.get('COMBINED_DATASET_ID')
cls.dataset_id = dataset_id
sandbox_id = f"{dataset_id}_sandbox"
cls.sandbox_id = sandbox_id
cls.kwargs = {
'table_namer': 'bar_ds',
'api_project_id': 'foo-project-id'
}
cls.rule_instance = RemoveParticipantDataPastDeactivationDate(
project_id, dataset_id, sandbox_id, **cls.kwargs)
sb_table_names = cls.rule_instance.get_sandbox_tablenames()
cls.fq_sandbox_table_names = [
f'{project_id}.{sandbox_id}.{table_name}'
for table_name in sb_table_names
]
# append table name here to ensure proper cleanup
cls.fq_sandbox_table_names.append(
f"{project_id}.{sandbox_id}.{DEACTIVATED_PARTICIPANTS}")
cls.fq_table_names = [
f"{project_id}.{dataset_id}.{tablename}"
for tablename in cls.rule_instance.affected_tables
]
cls.fq_obs_table = [
table for table in cls.fq_table_names if 'observation' in table
][0]
# call super to set up the client, create datasets, and create
# empty test tables
# NOTE: does not create empty sandbox tables.
super().setUpClass()
def setUp(self):
"""
Add data to the tables for the rule to run on.
"""
insert_fake_data_tmpls = [
self.jinja_env.from_string("""
INSERT INTO `{{fq_table_name}}`
(observation_id, person_id, observation_concept_id, observation_date,
observation_type_concept_id, observation_source_concept_id)
VALUES
-- Values to exist after running the cleaning rule --
-- 801 is before the user deactivates --
-- 802, the user doesn't deactivate --
(801, 1, 1585899, date('2019-05-01'), 45905771, 111111),
(802, 2, 1585899, date('2019-05-01'), 45905771, 222222),
-- Values that should be removed by the cleaning rule --
-- 804 is after person 1 deactivates --
-- 805 is after user 3 deactivates --
(804, 1, 1585899, date('2020-05-01'), 45905771, null),
(805, 3, 1585899, date('2020-05-01'), 45905771, 45)
""")
]
self.load_statements = []
# create the string(s) to load the data
for tmpl in insert_fake_data_tmpls:
query = tmpl.render(fq_table_name=self.fq_obs_table)
self.load_statements.append(query)
super().setUp()
@mock.patch(
'utils.participant_summary_requests.get_deactivated_participants')
@mock.patch('retraction.retract_utils.is_deid_label_or_id')
def test_removing_data_past_deactivated_date(self, mock_deid, mock_func):
"""
Validate deactivated participant records are dropped via cleaning rule.
Validates pre-conditions, test execution and post conditions based on
the load statements and the tables_and_counts variable. Uses a mock to
return a staged data frame object for this test instead of calling
the PS API.
"""
columns = ['deactivated_date', 'person_id', 'suspension_status']
values = [
['2020-01-01', 1, 'NO_CONTACT'], # corresponds with record 804
['2020-01-01', 3, 'NO_CONTACT'] # corresponds with record 805
]
deactivated_df = pd.DataFrame(values, columns=columns)
mock_func.return_value = deactivated_df
mock_deid.return_value = False
self.load_test_data(self.load_statements)
# Using the 0 position because there is only one sandbox table and
# one affected OMOP table
obs_sandbox = [
table for table in self.fq_sandbox_table_names
if 'observation' in table
][0]
tables_and_counts = [{
'name': 'observation',
'fq_table_name': self.fq_obs_table,
'fq_sandbox_table_name': obs_sandbox,
'fields': ['observation_id'],
'loaded_ids': [801, 802, 804, 805],
'sandboxed_ids': [804, 805],
'cleaned_values': [(801,), (802,)]
}]
self.default_test(tables_and_counts)
| 37.44
| 104
| 0.639067
| 679
| 5,616
| 5.055965
| 0.343152
| 0.028838
| 0.016021
| 0.016604
| 0.131372
| 0.064084
| 0.032625
| 0
| 0
| 0
| 0
| 0.045487
| 0.256232
| 5,616
| 149
| 105
| 37.691275
| 0.776395
| 0.209758
| 0
| 0.042105
| 0
| 0
| 0.332104
| 0.106015
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031579
| false
| 0
| 0.084211
| 0
| 0.126316
| 0.031579
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c834981294e35ab677847178ee1ed2e7e3411bb0
| 2,476
|
py
|
Python
|
charlie2/tools/trial.py
|
sammosummo/Charlie2
|
e856b9bfc83c11e57a63d487fa14a63764e3f6ae
|
[
"MIT"
] | 5
|
2019-10-10T08:22:29.000Z
|
2021-04-09T02:34:13.000Z
|
charlie2/tools/trial.py
|
sammosummo/Charlie2
|
e856b9bfc83c11e57a63d487fa14a63764e3f6ae
|
[
"MIT"
] | 20
|
2018-06-20T21:15:48.000Z
|
2018-09-06T17:13:46.000Z
|
charlie2/tools/trial.py
|
sammosummo/Charlie2
|
e856b9bfc83c11e57a63d487fa14a63764e3f6ae
|
[
"MIT"
] | 3
|
2019-11-24T04:10:40.000Z
|
2020-04-04T07:50:57.000Z
|
"""Defines the trial class.
"""
from datetime import datetime
from logging import getLogger
logger = getLogger(__name__)
class Trial(dict):
def __init__(self, *args, **kwds) -> None:
"""Create a trial object.
Trials objects are fancy dictionaries whose items are also attributes. They are
initialised exactly like dictionaries except that the resulting object must
contain the attribute `'trial_number'`. Trials typically contain several other
attributes in addition to those listed below. Trials from the same experiment
should contain the same attributes.
"""
super(Trial, self).__init__(*args, **kwds)
logger.debug(f"initialised {type(self)}")
self.__dict__ = self
defaults = {
"block_number": 0,
"status": "pending",
"practice": False,
"resumed_from_here": False,
"started_timestamp": datetime.now(),
"correct": None,
"reason_skipped": "not skipped",
"finished_timestamp": None,
# "_remaining_trials_in_block": None,
# "_remaining_trials_in_test": None,
}
self.__dict__.update({**defaults, **self.__dict__})
assert "trial_number" in self.__dict__, "must contain trial_number"
assert isinstance(self.trial_number, int), "trial_number must be an int"
if self.block_number == 0:
self.__dict__["first_block"] = True
else:
self.__dict__["first_block"] = False
if self.trial_number == 0:
self.__dict__["first_trial_in_block"] = True
else:
self.__dict__["first_trial_in_block"] = False
if self.first_block and self.first_trial_in_block:
self.__dict__["first_trial_in_test"] = True
else:
self.__dict__["first_trial_in_test"] = False
# rtib = self._remaining_trials_in_block
# if rtib is not None:
# if len(rtib) == 0:
# self.__dict__["last_trial_in_block"] = True
# else:
# self.__dict__["last_trial_in_block"] = False
# rtit = self._remaining_trials_in_test
# if rtit is not None:
# if len(rtit) == 0:
# self.__dict__["last_trial_in_test"] = True
# else:
# self.__dict__["last_trial_in_test"] = False
logger.debug("finished constructing trial object")
| 35.884058
| 87
| 0.600969
| 282
| 2,476
| 4.826241
| 0.329787
| 0.082292
| 0.057311
| 0.05878
| 0.225569
| 0.185158
| 0.114622
| 0
| 0
| 0
| 0
| 0.002885
| 0.300081
| 2,476
| 68
| 88
| 36.411765
| 0.782458
| 0.348142
| 0
| 0.088235
| 0
| 0
| 0.220416
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 1
| 0.029412
| false
| 0
| 0.058824
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c835c38f6e541b5231eac621e19ad8646fab5eb5
| 1,839
|
py
|
Python
|
categorize_reviews.py
|
curtislb/ReviewTranslation
|
b2d14d349b6016d275fa22532eae6b67af243a55
|
[
"Apache-2.0"
] | null | null | null |
categorize_reviews.py
|
curtislb/ReviewTranslation
|
b2d14d349b6016d275fa22532eae6b67af243a55
|
[
"Apache-2.0"
] | null | null | null |
categorize_reviews.py
|
curtislb/ReviewTranslation
|
b2d14d349b6016d275fa22532eae6b67af243a55
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import ast
import sys
import nltk
import numpy as np
from review_data import read_reviews
###############################################################################
def main():
low = 3.0
high = 4.0
target_language = u"english"
topics = []
with open(sys.argv[1]) as infile:
for topic in infile:
topics.append(ast.literal_eval(topic))
outfiles = []
prefix = sys.argv[3]
for i in xrange(len(topics)):
outfile = []
outfile.append(open(prefix + str(i) + "-.json" ,"w"))
outfile.append(open(prefix + str(i) + "=.json" ,"w"))
outfile.append(open(prefix + str(i) + "+.json" ,"w"))
outfiles.append(outfile)
counter = 0
for review in read_reviews(sys.argv[2]):
if review['lang'] != target_language:
continue
text = review['text']
tokens = nltk.word_tokenize(text)
best_value = [0]*len(topics)
for token in tokens:
for i in xrange(len(topics)):
if token in topics[i]:
best_value[i] += topics[i][token]
rating = review['rating']
del review['lang']
del review['rating']
if rating < low:
outfiles[np.argmax(best_value)][0].write(str(review) + '\n')
elif rating > high:
outfiles[np.argmax(best_value)][2].write(str(review) + '\n')
else:
outfiles[np.argmax(best_value)][1].write(str(review) + '\n')
counter+=1
if counter %10000 == 0:
for outfile in outfiles:
for ofile in outfile:
ofile.flush()
for outfile in outfiles:
for ofile in outfile:
ofile.close()
if __name__ == '__main__':
main()
| 26.271429
| 79
| 0.504622
| 215
| 1,839
| 4.223256
| 0.334884
| 0.049559
| 0.056167
| 0.075991
| 0.327093
| 0.244493
| 0.198238
| 0.198238
| 0.198238
| 0.105727
| 0
| 0.015335
| 0.326264
| 1,839
| 69
| 80
| 26.652174
| 0.717514
| 0.010875
| 0
| 0.117647
| 0
| 0
| 0.037953
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0
| 0.098039
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c83adde56479731d1abd45b0e5be159767406e09
| 1,063
|
py
|
Python
|
Two_Sum_1.py
|
JazzikPeng/Algorithm-in-Python
|
915135b1cdd02a6bb8d7068a54b2f497b2ec31d4
|
[
"MIT"
] | 3
|
2018-02-05T06:15:57.000Z
|
2019-04-07T23:33:07.000Z
|
Two_Sum_1.py
|
JazzikPeng/Algorithm-in-Python
|
915135b1cdd02a6bb8d7068a54b2f497b2ec31d4
|
[
"MIT"
] | null | null | null |
Two_Sum_1.py
|
JazzikPeng/Algorithm-in-Python
|
915135b1cdd02a6bb8d7068a54b2f497b2ec31d4
|
[
"MIT"
] | null | null | null |
class Solution:
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
# fill initial set/dict
s = {target-nums[0]}
d = {nums[0]: 0}
for i in range(1, len(nums)):
if nums[i] in s:
return [d[target-nums[i]], i]
else:
s.add(target-nums[i])
d[nums[i]] = i
return None
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
for i in nums:
nums[nums.index(i)] = 'current'
if (target - i) in nums and nums[nums.index(target - i)]!='current':
print(i, nums.index('current'), nums)
nums[nums.index('current')] = 'marked'
nums[nums.index(target - i)] = 'other'
return [nums.index('marked'), nums.index('other')]
nums[nums.index('current')] = 'visited'
| 30.371429
| 80
| 0.464722
| 127
| 1,063
| 3.889764
| 0.291339
| 0.145749
| 0.131579
| 0.068826
| 0.336032
| 0.255061
| 0.255061
| 0.255061
| 0.255061
| 0.255061
| 0
| 0.006088
| 0.381938
| 1,063
| 34
| 81
| 31.264706
| 0.745814
| 0.129821
| 0
| 0.095238
| 0
| 0
| 0.07565
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0
| 0
| 0.333333
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c83f4c51440116a7f88bf4d5e46dda85c09f8606
| 2,069
|
py
|
Python
|
shortest_path_revisit_and_NP/week1/apsp_johnsons.py
|
liaoaoyuan97/standford_algorithms_specialization
|
2914fdd397ce895d986ac855e78afd7a51ceff68
|
[
"MIT"
] | null | null | null |
shortest_path_revisit_and_NP/week1/apsp_johnsons.py
|
liaoaoyuan97/standford_algorithms_specialization
|
2914fdd397ce895d986ac855e78afd7a51ceff68
|
[
"MIT"
] | null | null | null |
shortest_path_revisit_and_NP/week1/apsp_johnsons.py
|
liaoaoyuan97/standford_algorithms_specialization
|
2914fdd397ce895d986ac855e78afd7a51ceff68
|
[
"MIT"
] | 1
|
2021-01-18T19:35:48.000Z
|
2021-01-18T19:35:48.000Z
|
import time
import numpy as np
from os import path
def read_graph(filename):
i = 0
with open(path.join('.', filename), 'r') as f:
for row in f.readlines():
if i == 0:
_list = row.strip("\n").split(' ')
n_vertex, n_edge = int(_list[0]), int(_list[1])
shortest_paths = np.ones((n_vertex + 1, n_vertex + 1, n_vertex + 1)) * float('inf')
i += 1
else:
_list = row.strip("\n").split(' ')
shortest_paths[int(_list[0])][int(_list[1])][0] = float(_list[2])
for i in range(1, n_vertex + 1):
shortest_paths[i][i][0] = 0
return n_vertex, shortest_paths
def compute_apsp(n_vertex, shortest_paths):
for k in range(1, n_vertex + 1):
for i in range(1, n_vertex + 1):
for j in range(1, n_vertex + 1):
if shortest_paths[i][j][k - 1] > (shortest_paths[i][k][k - 1] + shortest_paths[k][j][k - 1]):
shortest_paths[i][j][k] = shortest_paths[i][k][k - 1] + shortest_paths[k][j][k - 1]
else:
shortest_paths[i][j][k] = shortest_paths[i][j][k - 1]
for i in range(1, n_vertex + 1):
if shortest_paths[i][i][n_vertex] < 0:
return None
m = shortest_paths[1][2][n_vertex]
for i in range(1, n_vertex + 1):
for j in range(1, n_vertex + 1):
if i != j and shortest_paths[i][j][n_vertex] < m:
m = shortest_paths[i][j][n_vertex]
return m
if __name__ == "__main__":
time_start = time.time()
n_vertex, shortest_paths = read_graph("grh1.txt")
print(compute_apsp(n_vertex, shortest_paths))
print(time.time() - time_start)
time_start = time.time()
n_vertex, shortest_paths = read_graph("grh2.txt")
print(compute_apsp(n_vertex, shortest_paths))
print(time.time() - time_start)
time_start = time.time()
n_vertex, shortest_paths = read_graph("grh3.txt")
print(compute_apsp(n_vertex, shortest_paths))
print(time.time() - time_start)
| 32.84127
| 109
| 0.564524
| 316
| 2,069
| 3.455696
| 0.177215
| 0.147436
| 0.07326
| 0.074176
| 0.712454
| 0.679487
| 0.554945
| 0.541209
| 0.480769
| 0.475275
| 0
| 0.027064
| 0.285645
| 2,069
| 62
| 110
| 33.370968
| 0.711773
| 0
| 0
| 0.395833
| 0
| 0
| 0.020783
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.0625
| 0
| 0.166667
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c84122fcd1573afd525866c481ac3a9686f3174d
| 2,448
|
py
|
Python
|
Cardio-Monitor-main/visualization.py
|
jrderek/computer-vision-exercises
|
e9735394220f8120453de70b58596ef9e87df926
|
[
"MIT"
] | null | null | null |
Cardio-Monitor-main/visualization.py
|
jrderek/computer-vision-exercises
|
e9735394220f8120453de70b58596ef9e87df926
|
[
"MIT"
] | null | null | null |
Cardio-Monitor-main/visualization.py
|
jrderek/computer-vision-exercises
|
e9735394220f8120453de70b58596ef9e87df926
|
[
"MIT"
] | null | null | null |
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import io
import random
import numpy as np
def visualizationpreprocess(age,sex,cp,trestbps,restecg,chol,fbs,thalach,exang,oldpeak,slope,ca,thal,result):
if sex=="male":
sex=1
else: sex=0
if cp=="Typical angina":
cp=0
elif cp=="Atypical angina":
cp=1
elif cp=="Non-anginal pain":
cp=2
elif cp=="Asymptomatic":
cp=2
if exang=="Yes":
exang=1
elif exang=="No":
exang=0
if fbs=="Yes":
fbs=1
elif fbs=="No":
fbs=0
if slope=="Upsloping: better heart rate with excercise(uncommon)":
slope=0
elif slope=="Flatsloping: minimal change(typical healthy heart)":
slope=1
elif slope=="Downsloping: signs of unhealthy heart":
slope=2
if thal=="fixed defect: used to be defect but ok now":
thal=2
elif thal=="reversable defect: no proper blood movement when excercising":
thal=3
elif thal=="normal":
thal=1
if restecg=="Nothing to note":
restecg=0
elif restecg=="ST-T Wave abnormality":
restecg=1
elif restecg=="Possible or definite left ventricular hypertrophy":
restecg=2
#final_list=[int(cp),int(trestbps),int(restecg),int(chol),int(fbs),int(thalach),int(exang),float(oldpeak),int(slope),int(ca),int(thal)]
normal_value1=[0.478261,0.159420,0.449275,0.550725,1.585507,1.166667,1.166667,2.543478]
user_value1=[float(cp),float(fbs),float(restecg),float(exang),float(oldpeak),float(slope),float(ca),float(thal)]
normal_value2=[134.398551,251.086957,139.101449]
user_value2=[float(trestbps),float(chol),float(thalach)]
list1=[normal_value1,user_value1]
list2=[normal_value2,user_value2]
return list1,list2
# def create_figure1(data1):
# fig = plt.figure()
# axis = fig.add_axes([0,0,1,1])
# y1 = data1[0]
# y2 = data1[1]
# width = 0.30
# x=np.arange(8)
# axis.bar(x-0.3, y1, width, color='cyan')
# axis.bar(x, y2, width, color='orange')
# # axis.bar(xs, ys)
# # axis.xticks(x, ['cp','chol','fbs','exang','oldpeak','slope','ca','thal'])
# # axis.xlabel("Heart health defining attributes")
# axis.set_ylabel("values")
# # axis.legend(["Normal", "Yours"])
# axis.set_title('Your data corresponding to normal data')
# return fig
| 31.792208
| 139
| 0.635212
| 348
| 2,448
| 4.428161
| 0.422414
| 0.016223
| 0.022064
| 0.024659
| 0.029851
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069466
| 0.21201
| 2,448
| 76
| 140
| 32.210526
| 0.729393
| 0.276552
| 0
| 0.04
| 0
| 0
| 0.231386
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.1
| 0
| 0.14
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8413d24c21af2dd79f48f95f23cc0565affc86b
| 6,006
|
py
|
Python
|
at_tmp/model/util/TMP_DB_OPT__.py
|
zuoleilei3253/zuoleilei
|
e188b15a0aa4a9fde00dba15e8300e4b87973e2d
|
[
"Apache-2.0"
] | null | null | null |
at_tmp/model/util/TMP_DB_OPT__.py
|
zuoleilei3253/zuoleilei
|
e188b15a0aa4a9fde00dba15e8300e4b87973e2d
|
[
"Apache-2.0"
] | null | null | null |
at_tmp/model/util/TMP_DB_OPT__.py
|
zuoleilei3253/zuoleilei
|
e188b15a0aa4a9fde00dba15e8300e4b87973e2d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/7/15 16:36
# @Author : bxf
# @File : P_DB_OPT.py
# @Software: PyCharm
import pymysql
import json
from datetime import date, datetime
from model.util import md_Config
from model.util.PUB_LOG import *
'''
提供数据的增删改查功能:
'''
class DB_CONN():
def __init__(self):
'''
初始化连接数据,并输出连接步骤
'''
try:
conn = pymysql.Connect(
host=md_Config.getConfig("DATABASE1", "IP"),
port=int(md_Config.getConfig("DATABASE1", "port")),
user=md_Config.getConfig("DATABASE1", "user"),
passwd=md_Config.getConfig("DATABASE1", "password"),
db=md_Config.getConfig("DATABASE1", "db"),
charset=md_Config.getConfig("DATABASE1", "charset")
)
exeLog(
"数据库:【 " + md_Config.getConfig("DATABASE1", "db") + "】 连接成功!数据库环境为: " + md_Config.getConfig("DATABASE1",
"IP"))
self.conn = conn
except Exception as e:
dataOptLog("***数据库:【 " + md_Config.getConfig("DATABASE1",
"db") + "】 连接失败,请检查连接参数!错误信息:%s" % e + "数据库环境为:" + md_Config.getConfig(
"DATABASE1", "IP"))
def db_Query_Json(self, sql):
'''
获取数据json格式游标,使用需要fetchall()或fetchone()fetchmany()
:param sql: 查询语句
:return: 游标json格式 使用时需要使用fetchall()或fetchone()fetchmany()
'''
cur = self.conn.cursor(cursor=pymysql.cursors.DictCursor)
try:
cur.execute(sql)
exeLog("***查询获取游标成功!查询语句为:" + sql)
return cur
except Exception as e:
dataOptLog('***执行查询失败,请检查数据!错误信息:%s' % e + "查询语句为:" + sql)
finally:
cur.close()
self.conn.close()
#
def db_Query_tuple(self, sql):
'''
获取数据元组格式游标,使用需要fetchall()或fetchone()fetchmany()
:param sql: 查询语句
:return: 元组格式游标,使用需要fetchall()或fetchone()fetchmany()
'''
cur = self.conn.cursor()
try:
cur.execute(sql)
exeLog("***查询获取游标成功!查询语句为:" + sql)
return cur
except Exception as e:
dataOptLog('***执行查询失败,请检查数据!错误信息:%s' % e + "查询语句为:" + sql)
finally:
cur.close()
self.conn.close()
# 数据库插入
def db_Insert(self, sql, params):
'''
数据库插入
:param sql: 插入语句
:param params: 插入数据
:return: 插入成功数目
'''
cur = self.conn.cursor()
try:
data_counts = cur.execute(sql, params)
self.conn.commit()
exeLog("***数据插入成功!执行语句为:" + sql)
return data_counts
except Exception as e:
self.conn.rollback()
dataOptLog('***插入失败,请检查数据!错误信息:%s' % e + "查询语句为:" + sql)
finally:
cur.close()
self.conn.close()
# 数据库更新
def db_Update(self, sql):
'''
:param sql:
:return:
'''
cur = self.conn.cursor()
try:
data_counts = cur.execute(sql)
self.conn.commit()
exeLog("***更新数据成功!更新语句为:" + sql)
return data_counts
except Exception as e:
self.conn.rollback()
dataOptLog('***执行更新失败,请检查数据!错误信息:%s' % e + "查询语句为:" + sql)
finally:
cur.close()
self.conn.close()
# 数据库中时间转换json格式 在返回的json方法里加上cls=MyEncoder
class MyEncoder(json.JSONEncoder):
def default(self, obj):
'''
针对datetime格式的转换
:param obj: 参数数据
:return: 返回json格式
'''
try:
# if isinstance(obj, datetime.datetime):
# return int(mktime(obj.timetuple()))
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
else:
return json.JSONEncoder.default(self, obj)
except Exception as e:
return False
# 数据库数据直接转换成json格式输出 无数据 返回FALSE
def getJsonFromDatabase(sql):
cur = DB_CONN().db_Query_Json(sql)
if cur.rowcount == 0:
exeLog("***数据库内容为空")
return False
else:
exeLog("***返回JSON数据成功")
return cur.fetchall()
def getTupleFromDatabase(sql):
cur = DB_CONN().db_Query_tuple(sql)
if cur.rowcount == 0:
exeLog("***数据库内容为空")
return False
else:
exeLog("***返回JSON数据成功")
return cur.fetchall()
def insertToDatabase(table,data,**kwargs):
'''
:param table: 表名
:param data: 插入数据
:return: 插入成功数
'''
col_list=dict()
# print(type(data))
# print(type(kwargs))
col_list.update(data)
col_list.update(kwargs)
col_lists=col_list.keys()
col=''
for j in col_lists:
col=col+j+','
val=[]
for i in col_lists:
val_one=col_list[i]
val.append(val_one)
var_lists=tuple(val)
sql='INSERT INTO '+table +' ( '+ col[:-1] +' ) VALUE '+str(var_lists)
exeLog("******生成添加语句成功!~~***")
result=DB_CONN().db_Update(sql)
exeLog("******记录新增成功******")
return result
def updateToDatabase(table, data, col, val):
'''
更新
:param table:表名
:param data: 更新数据
:param col: 定位
:param val:定位值
:return: 更新成功数
'''
col_lists = tuple(data.keys())
list_one = ""
for i in col_lists:
val_one = data[i]
list_one = list_one + i + '= "' + str(val_one) + '",'
sql = "UPDATE " + table + ' SET ' + list_one[:-1] + ' WHERE ' + col + ' = "' + str(val) + '"'
exeLog("生成更新语句成功!")
return sql
| 28.330189
| 129
| 0.494006
| 604
| 6,006
| 4.817881
| 0.293046
| 0.035739
| 0.058419
| 0.089347
| 0.434364
| 0.391065
| 0.295533
| 0.248797
| 0.248797
| 0.248797
| 0
| 0.006849
| 0.367965
| 6,006
| 211
| 130
| 28.464455
| 0.759747
| 0.141692
| 0
| 0.452381
| 0
| 0
| 0.116274
| 0.019451
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079365
| false
| 0.007937
| 0.039683
| 0
| 0.246032
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8422d03ff6c162a7a235c164df43ce7fd4202c5
| 1,025
|
py
|
Python
|
setup.py
|
drougge/wellpapp-pyclient
|
43d66a1e2a122ac87e477905c5e2460e11be3c26
|
[
"MIT"
] | null | null | null |
setup.py
|
drougge/wellpapp-pyclient
|
43d66a1e2a122ac87e477905c5e2460e11be3c26
|
[
"MIT"
] | null | null | null |
setup.py
|
drougge/wellpapp-pyclient
|
43d66a1e2a122ac87e477905c5e2460e11be3c26
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from setuptools import setup
fuse_reqs = [
'fuse-python >= 0.3.1; python_version < "3"',
'fuse-python >= 1.0.0; python_version > "3"',
]
readme = open('README.md', 'r').read()
readme = readme.replace(
'(FUSE.md)',
'(https://github.com/drougge/wellpapp-pyclient/blob/master/FUSE.md)'
)
setup(
name='wellpapp',
version='CHANGEME.dev', # set this for each release
packages=[
'wellpapp',
'wellpapp.shell',
],
entry_points={
'console_scripts': [
'wp = wellpapp.__main__:main',
],
},
install_requires=[
'Pillow >= 3.1.2',
'PyGObject >= 3.20',
],
extras_require={
'fuse': fuse_reqs,
'all': fuse_reqs,
},
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
author='Carl Drougge',
author_email='bearded@longhaired.org',
url='https://github.com/drougge/wellpapp-pyclient',
license='MIT',
description='Client library and application for the wellpapp image tagging system.',
long_description=readme,
long_description_content_type='text/markdown',
)
| 21.808511
| 85
| 0.659512
| 138
| 1,025
| 4.76087
| 0.572464
| 0.03653
| 0.009132
| 0.063927
| 0.112633
| 0.112633
| 0
| 0
| 0
| 0
| 0
| 0.03044
| 0.134634
| 1,025
| 46
| 86
| 22.282609
| 0.710259
| 0.045854
| 0
| 0.076923
| 0
| 0.051282
| 0.517418
| 0.045082
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.025641
| 0
| 0.025641
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8427460e2bcf42333ee94274c805a7a6ae2d6ab
| 715
|
py
|
Python
|
students/K33421/Zmievskiy_Danil/Lr1/Task04/server.py
|
DanilZmievskiy/ITMO_ICT_WebDevelopment_2020-2021
|
8bb6e90e6592c04f4b959184310e0890aaa24e16
|
[
"MIT"
] | null | null | null |
students/K33421/Zmievskiy_Danil/Lr1/Task04/server.py
|
DanilZmievskiy/ITMO_ICT_WebDevelopment_2020-2021
|
8bb6e90e6592c04f4b959184310e0890aaa24e16
|
[
"MIT"
] | null | null | null |
students/K33421/Zmievskiy_Danil/Lr1/Task04/server.py
|
DanilZmievskiy/ITMO_ICT_WebDevelopment_2020-2021
|
8bb6e90e6592c04f4b959184310e0890aaa24e16
|
[
"MIT"
] | null | null | null |
import socket
import threading
conn = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
conn.bind (('', 7070))
conn.listen()
clients = []
print ('Start Server')
def new_client():
while True:
clientsocket, address = conn.accept()
if clientsocket not in clients:
clients.append(clientsocket)
threading.Thread(target = chat, args = [clientsocket, address]).start()
def chat(clientsocket, address):
print (address[0], address[1])
while True:
try:
data = clientsocket.recv(1024)
for client in clients:
if client == clientsocket:
continue
client.send(data)
except Exception:
clients.remove(clientsocket)
clientsocket.close()
threading.Thread(target=new_client()).start()
| 21.029412
| 74
| 0.711888
| 87
| 715
| 5.804598
| 0.505747
| 0.112871
| 0.083168
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016694
| 0.162238
| 715
| 34
| 75
| 21.029412
| 0.826377
| 0
| 0
| 0.076923
| 0
| 0
| 0.01676
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.076923
| 0
| 0.153846
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c843403db7b167cca6757d1608c2ce426ef07684
| 1,563
|
py
|
Python
|
cutelog/pop_in_dialog.py
|
CS-GSI/cutelog
|
faca7a7bfd16559973178d3c87cb3b0c6667d4d3
|
[
"MIT"
] | 125
|
2018-07-27T15:23:35.000Z
|
2022-03-09T18:18:08.000Z
|
cutelog/pop_in_dialog.py
|
CS-GSI/cutelog
|
faca7a7bfd16559973178d3c87cb3b0c6667d4d3
|
[
"MIT"
] | 12
|
2019-02-02T01:02:59.000Z
|
2022-03-14T08:31:41.000Z
|
cutelog/pop_in_dialog.py
|
CS-GSI/cutelog
|
faca7a7bfd16559973178d3c87cb3b0c6667d4d3
|
[
"MIT"
] | 26
|
2018-08-24T23:49:58.000Z
|
2022-01-27T12:29:38.000Z
|
from qtpy.QtCore import Signal
from qtpy.QtWidgets import QDialog, QDialogButtonBox, QListWidget, QVBoxLayout
class PopInDialog(QDialog):
pop_in_tabs = Signal(list)
def __init__(self, parent, loggers):
super().__init__(parent)
self.loggers = loggers
self.setupUi()
def setupUi(self):
self.resize(200, 320)
self.vbox = QVBoxLayout(self)
self.listWidget = QListWidget(self)
self.listWidget.setSelectionMode(self.listWidget.MultiSelection)
self.listWidget.selectionModel().reset()
self.vbox.addWidget(self.listWidget)
self.buttonBox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel, self)
self.vbox.addWidget(self.buttonBox)
self.buttonBox.accepted.connect(self.accept)
self.listWidget.doubleClicked.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.fill_logger_list()
def fill_logger_list(self):
for logger in self.loggers:
if logger.popped_out:
self.listWidget.addItem(logger.name)
self.listWidget.setCurrentRow(0)
def accept(self, index=None):
names = []
if index is not None:
item = self.listWidget.itemFromIndex(index)
names.append(item.text())
else:
for item in self.listWidget.selectedItems():
names.append(item.text())
if len(names) > 0:
self.pop_in_tabs.emit(names)
self.done(0)
def reject(self):
self.done(0)
| 31.26
| 94
| 0.643634
| 172
| 1,563
| 5.75
| 0.383721
| 0.141557
| 0.0182
| 0.042467
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008591
| 0.255278
| 1,563
| 49
| 95
| 31.897959
| 0.841065
| 0
| 0
| 0.102564
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128205
| false
| 0
| 0.051282
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c84378df20614229cbb5ed8f3fb0fb2de32e4ad3
| 6,730
|
py
|
Python
|
MineSweeper/minesweeper.py
|
Ratnesh4193/Amazing-Python-Scripts
|
0652a6066a3eeaf31830d7235da209699c45f779
|
[
"MIT"
] | 1
|
2021-04-17T08:33:25.000Z
|
2021-04-17T08:33:25.000Z
|
MineSweeper/minesweeper.py
|
Ratnesh4193/Amazing-Python-Scripts
|
0652a6066a3eeaf31830d7235da209699c45f779
|
[
"MIT"
] | null | null | null |
MineSweeper/minesweeper.py
|
Ratnesh4193/Amazing-Python-Scripts
|
0652a6066a3eeaf31830d7235da209699c45f779
|
[
"MIT"
] | 1
|
2021-07-22T07:06:09.000Z
|
2021-07-22T07:06:09.000Z
|
# Importing required libraries
from tkinter import *
from tkinter import messagebox as mb
from tkinter import ttk
import random
# function to create screen for the game
def board():
global value,w
# initialising screen
root=Tk()
root.geometry("320x335")
root.title("MineSweeper")
root.resizable(False,False)
root.eval('tk::PlaceWindow . center')
# creating label
w = Label(root, text="Start Playing!",bg='yellow',fg='red')
# creating buttons
but11=Button(root,bg="grey", text="", padx=7.5, pady=5,bd=4,font="digifacewide 18",height=1,width=2,command=lambda:game(but11,root))
but12=Button(root,bg="grey", text="", padx=7.5, pady=5,bd=4,font="digifacewide 18",height=1,width=2,command=lambda:game(but12,root))
but13=Button(root,bg="grey", text="", padx=7.5, pady=5,bd=4,font="digifacewide 18",height=1,width=2,command=lambda:game(but13,root))
but14=Button(root,bg="grey", text="", padx=7.5, pady=5,bd=4,font="digifacewide 18",height=1,width=2,command=lambda:game(but14,root))
but15=Button(root,bg="grey", text="", padx=7.5, pady=5,bd=4,font="digifacewide 18",height=1,width=2,command=lambda:game(but15,root))
but21=Button(root,bg="grey", text="", padx=7.5, pady=5,bd=4,font="digifacewide 18",height=1,width=2,command=lambda:game(but21,root))
but22=Button(root,bg="grey", text="", padx=7.5, pady=5,bd=4,font="digifacewide 18",height=1,width=2,command=lambda:game(but22,root))
but23=Button(root,bg="grey", text="", padx=7.5, pady=5,bd=4,font="digifacewide 18",height=1,width=2,command=lambda:game(but23,root))
but24=Button(root,bg="grey", text="", padx=7.5, pady=5,bd=4,font="digifacewide 18",height=1,width=2,command=lambda:game(but24,root))
but25=Button(root,bg="grey", text="", padx=7.5, pady=5,bd=4,font="digifacewide 18",height=1,width=2,command=lambda:game(but25,root))
but31=Button(root,bg="grey", text="", padx=7.5, pady=5,bd=4,font="digifacewide 18",height=1,width=2,command=lambda:game(but31,root))
but32=Button(root,bg="grey", text="", padx=7.5, pady=5,bd=4,font="digifacewide 18",height=1,width=2,command=lambda:game(but32,root))
but33=Button(root,bg="grey", text="", padx=7.5, pady=5,bd=4,font="digifacewide 18",height=1,width=2,command=lambda:game(but33,root))
but34=Button(root,bg="grey", text="", padx=7.5, pady=5,bd=4,font="digifacewide 18",height=1,width=2,command=lambda:game(but34,root))
but35=Button(root,bg="grey", text="", padx=7.5, pady=5,bd=4,font="digifacewide 18",height=1,width=2,command=lambda:game(but35,root))
but41=Button(root,bg="grey", text="", padx=7.5, pady=5,bd=4,font="digifacewide 18",height=1,width=2,command=lambda:game(but41,root))
but42=Button(root,bg="grey", text="", padx=7.5, pady=5,bd=4,font="digifacewide 18",height=1,width=2,command=lambda:game(but42,root))
but43=Button(root,bg="grey", text="", padx=7.5, pady=5,bd=4,font="digifacewide 18",height=1,width=2,command=lambda:game(but43,root))
but44=Button(root,bg="grey", text="", padx=7.5, pady=5,bd=4,font="digifacewide 18",height=1,width=2,command=lambda:game(but44,root))
but45=Button(root,bg="grey", text="", padx=7.5, pady=5,bd=4,font="digifacewide 18",height=1,width=2,command=lambda:game(but45,root))
but51=Button(root,bg="grey", text="", padx=7.5, pady=5,bd=4,font="digifacewide 18",height=1,width=2,command=lambda:game(but51,root))
but52=Button(root,bg="grey", text="", padx=7.5, pady=5,bd=4,font="digifacewide 18",height=1,width=2,command=lambda:game(but52,root))
but53=Button(root,bg="grey", text="", padx=7.5, pady=5,bd=4,font="digifacewide 18",height=1,width=2,command=lambda:game(but53,root))
but54=Button(root,bg="grey", text="", padx=7.5, pady=5,bd=4,font="digifacewide 18",height=1,width=2,command=lambda:game(but54,root))
but55=Button(root,bg="grey", text="", padx=7.5, pady=5,bd=4,font="digifacewide 18",height=1,width=2,command=lambda:game(but55,root))
# adding buttons to the screen
but11.grid(row=1,column=1)
but12.grid(row=1,column=2)
but13.grid(row=1,column=3)
but14.grid(row=1,column=4)
but15.grid(row=1,column=5)
but21.grid(row=2,column=1)
but22.grid(row=2,column=2)
but23.grid(row=2,column=3)
but24.grid(row=2,column=4)
but25.grid(row=2,column=5)
but31.grid(row=3,column=1)
but32.grid(row=3,column=2)
but33.grid(row=3,column=3)
but34.grid(row=3,column=4)
but35.grid(row=3,column=5)
but41.grid(row=4,column=1)
but42.grid(row=4,column=2)
but43.grid(row=4,column=3)
but44.grid(row=4,column=4)
but45.grid(row=4,column=5)
but51.grid(row=5,column=1)
but52.grid(row=5,column=2)
but53.grid(row=5,column=3)
but54.grid(row=5,column=4)
but55.grid(row=5,column=5)
# adding label to the screen
w.grid(row=0,column=0,columnspan=6)
# creating values for each cell from 1-5 and "b" for bomb
butlist=[but11,but12,but13,but14,but15,but21,but22,but23,but24,but25,
but31,but32,but33,but34,but35,but41,but42,but43,but44,but45,
but51,but52,but53,but54,but55]
vallist=['1','2','3','4','1','2','3','4','1','2','3','4','1','2','3','4',
'1','2','3','4','b','b','b','b','b']
value={}
random.shuffle(vallist)# shuffle for randomness
for i in range(25):
value[butlist[i]]=vallist[i]# assining values to buttons
root.mainloop()
def game(b,tk):
if value[b]=='b': # if bomb is clicked
bomb_clicked(b,tk)
else: # if number is clicked
number_clicked(b,int(value[b]),tk)
total =0
#function when bomb is clicked
def bomb_clicked(b,tk):
# making changes to cell
b['text']="\U0001f600"
b['relief']=SUNKEN;
b['bg']="orange"
global value,total
# displaying message and asking for replay
a=mb.askquestion("YOU LOSE"," Your score : " +str(total) +"\nDo you want to play again??")
tk.destroy()# exiting current board
if a=='yes' :
total = 0
board()
def number_clicked(b,n,tk):
global value,total
if n!=0 and b['text']=="":
# making changes to cell and updating score
b['text']=n
total+=n
value[b]='0'
w['text']="Your Score : " +str(total)
if total>=50: # if player reached score of 50 he won
b['text']="\U0001f600"
b['relief']=SUNKEN;
b['bg']="orange"
# displaying message and asking for replay
a=mb.askquestion("YOU WON"," Your score : " +str(total) +"\nDo you want to play again??")
tk.destroy()# exiting current board
if a=='yes' :
total=0
board()
# showinfo("YOU WON", "YOUR SCORE : " + str(total))
tk.destroy()
board()
| 52.170543
| 136
| 0.649034
| 1,129
| 6,730
| 3.865368
| 0.135518
| 0.041705
| 0.068744
| 0.091659
| 0.558662
| 0.558662
| 0.552704
| 0.552704
| 0.552704
| 0.536205
| 0
| 0.088632
| 0.151709
| 6,730
| 128
| 137
| 52.578125
| 0.675775
| 0.093314
| 0
| 0.17757
| 0
| 0
| 0.126151
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037383
| false
| 0
| 0.037383
| 0
| 0.074766
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c84571d0d767e8fe81786eba5dfb74f8e16357fc
| 1,240
|
py
|
Python
|
tests/test_session.py
|
Streetwise/streetwise-app
|
13c1649077766e0e20d6903adcd057ae3c07cc9c
|
[
"MIT"
] | 1
|
2020-05-28T06:50:01.000Z
|
2020-05-28T06:50:01.000Z
|
tests/test_session.py
|
Streetwise/streetwise-app
|
13c1649077766e0e20d6903adcd057ae3c07cc9c
|
[
"MIT"
] | 72
|
2020-05-01T11:11:17.000Z
|
2022-02-14T09:01:50.000Z
|
tests/test_session.py
|
Streetwise/streetwise-app
|
13c1649077766e0e20d6903adcd057ae3c07cc9c
|
[
"MIT"
] | 3
|
2020-05-06T20:35:32.000Z
|
2020-05-07T15:00:51.000Z
|
""" Python unit tests """
import pytest, json
from streetwise.models import Campaign
from . import app, app_context, db
@pytest.fixture(scope="module")
def client():
app.config['TESTING'] = True
return app.test_client()
def test_campaign_all(client):
with app_context:
campaign1 = Campaign()
campaign2 = Campaign()
db.session.add(campaign1)
db.session.add(campaign2)
db.session.commit()
resp = client.get('/api/campaign/all')
assert resp.status_code == 200
result = json.loads(resp.data)
assert len(result)>1
def test_campaign_sequence(client):
with app_context:
resp = client.get('/api/campaign/next')
result1 = json.loads(resp.data)
resp = client.get('/api/campaign/next')
result2 = json.loads(resp.data)
assert result1['id'] != result2['id']
def test_campaign_post(client):
with app_context:
resp = client.post('/api/campaign/next', json={'campaign_id':None})
result1 = json.loads(resp.data)
id1 = result1['id']
resp = client.post('/api/campaign/next', json={'campaign_id':id1})
result2 = json.loads(resp.data)
assert result1['id'] != result2['id']
| 28.837209
| 75
| 0.629839
| 155
| 1,240
| 4.948387
| 0.322581
| 0.065189
| 0.084746
| 0.110821
| 0.48631
| 0.362451
| 0.237288
| 0.237288
| 0.237288
| 0.125163
| 0
| 0.019874
| 0.229032
| 1,240
| 42
| 76
| 29.52381
| 0.782427
| 0.01371
| 0
| 0.333333
| 0
| 0
| 0.110288
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 1
| 0.121212
| false
| 0
| 0.090909
| 0
| 0.242424
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c84abb9eac74cceda1f9caab92fdc8319c29f197
| 4,900
|
py
|
Python
|
crawler/spiders/weighted_index_spider.py
|
ChuangYuMing/futures_spread_analysis
|
71540671eed7ea3abba0a9a5af45f49dcf662ce3
|
[
"MIT"
] | 2
|
2019-09-19T05:11:00.000Z
|
2020-07-23T07:26:03.000Z
|
crawler/spiders/weighted_index_spider.py
|
ChuangYuMing/futures_spread_analysis
|
71540671eed7ea3abba0a9a5af45f49dcf662ce3
|
[
"MIT"
] | 11
|
2020-07-14T10:42:59.000Z
|
2022-03-02T14:54:10.000Z
|
crawler/spiders/weighted_index_spider.py
|
ChuangYuMing/futures_spread_analysis
|
71540671eed7ea3abba0a9a5af45f49dcf662ce3
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
# pylint: disable=E1101
# 加權指數
# https://www.twse.com.tw/zh/page/trading/indices/MI_5MINS_HIST.html
import scrapy
from scrapy import signals, Spider
from urllib.parse import urlencode
import time
from random import randint
import datetime
import logging
from copy import copy
from dateutil.relativedelta import relativedelta
import collections
import json
from zoneinfo import ZoneInfo
# for cloud function call && scrapy crawl command call
# softlink package folder to root
try:
from package.tools import is_settle, format_number
from package.storage import Storage
except:
from spiders.package.tools import is_settle, format_number
from spiders.package.storage import Storage
class WeightedIndexSpider(scrapy.Spider):
name = 'weighted_index'
def __init__(self, category=None, *args, **kwargs):
super(WeightedIndexSpider, self).__init__(*args, **kwargs)
self.dataStorage = Storage(self.name)
self.data = collections.OrderedDict()
self.today = datetime.datetime.now(ZoneInfo("Asia/Taipei"))
self.url = 'https://www.twse.com.tw/indicesReport/MI_5MINS_HIST'
self.params = {
'response': 'json',
'date': '20110101'
}
self.startDate = getattr(self, 'start', self.getFormatDate(self.today))
self.endDate = getattr(self, 'end', self.getFormatDate(self.today))
self.startObj = self.parseDate(self.startDate)
self.endObj = self.parseDate(self.endDate)
def parseDate(self, dateString):
year = int(dateString[0:4])
month = int(dateString[4:6])
day = int(dateString[6:8])
return {
'year': year,
'month': month,
'day': day,
'datetime': datetime.date(year, month, day)
}
def getFormatDate(self, date):
year = str(date.year)
month = str(date.month) if len(str(date.month)) != 1 else "0" + str(date.month)
day = '01'
return year + month + day
# 西元
def format_ad_date(self, date):
date_arr = date.split('/')
year = date_arr[0]
month = date_arr[1]
day = date_arr[2]
year = str(int(year) + 1911)
return year + '/' + month + '/' + day
def start_requests(self):
print('start request - %s' % self.name)
targetDateObj = copy(self.startObj)
while(targetDateObj['datetime'] <= self.endObj['datetime']):
self.params['date'] = self.getFormatDate(targetDateObj['datetime'])
url = self.url + '?' + urlencode(self.params)
yield scrapy.Request(
url=url,
callback=self.parse,
cb_kwargs=dict(targetDateObj=copy(targetDateObj)),
errback=self.handle_failure)
targetDateObj['datetime'] = targetDateObj['datetime'] + relativedelta(months=1)
targetDateObj['year'] = targetDateObj['datetime'].year
targetDateObj['month'] = targetDateObj['datetime'].month
def handle_failure(self, failure):
self.log(failure, level=logging.ERROR)
# try with a new proxy
self.log('restart from the failed url {}'.format(failure.request.url))
time.sleep(120)
yield scrapy.Request(
url=failure.request.url,
callback=self.parse,
cb_kwargs=failure.request.cb_kwargs,
errback=self.handle_failure)
def parse(self, response, targetDateObj):
print(targetDateObj['datetime'])
result = json.loads(response.text)
data = result['data']
year = targetDateObj['year']
for item in data:
datestart = self.format_ad_date(item[0])
if year not in self.data:
self.data[year] = {}
self.data[year][datestart] = {}
self.data[year][datestart]["open"] = format_number(item[1].split(".")[0]) # 開盤
self.data[year][datestart]["high"] = format_number(item[2].split(".")[0]) # 最高
self.data[year][datestart]["low"] = format_number(item[3].split(".")[0]) # 最低
self.data[year][datestart]["w_index"] = format_number(item[4].split(".")[0]) # 收盤
self.data[year][datestart]["is_settle"] = is_settle(datestart, '/')
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super(WeightedIndexSpider, cls).from_crawler(crawler, *args, **kwargs)
crawler.signals.connect(spider.spider_closed, signal=signals.spider_closed)
return spider
def spider_closed(self, spider):
for year in self.data:
newData = self.data[year]
data = dict()
try:
data = self.dataStorage.getOldData(year)
except:
pass
data.update(newData)
self.dataStorage.saveData(year, data)
| 35.507246
| 93
| 0.610408
| 559
| 4,900
| 5.271914
| 0.300537
| 0.029861
| 0.032576
| 0.042755
| 0.079403
| 0.047506
| 0.028504
| 0.028504
| 0
| 0
| 0
| 0.012469
| 0.263469
| 4,900
| 137
| 94
| 35.766423
| 0.804101
| 0.046939
| 0
| 0.091743
| 0
| 0
| 0.064433
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082569
| false
| 0.009174
| 0.146789
| 0
| 0.284404
| 0.018349
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c84ea79f1edbb49a2816dca5b35662a00efd9c2f
| 1,198
|
py
|
Python
|
modules/tensorflow/keras/datasets/gaussian_mixture.py
|
avogel88/compare-VAE-GAE
|
aa3419c41a58ca6c1a9c1031c0aed7e07c3d4f90
|
[
"MIT"
] | null | null | null |
modules/tensorflow/keras/datasets/gaussian_mixture.py
|
avogel88/compare-VAE-GAE
|
aa3419c41a58ca6c1a9c1031c0aed7e07c3d4f90
|
[
"MIT"
] | null | null | null |
modules/tensorflow/keras/datasets/gaussian_mixture.py
|
avogel88/compare-VAE-GAE
|
aa3419c41a58ca6c1a9c1031c0aed7e07c3d4f90
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
from os.path import dirname, join
from modules.numpy import covmix, varroll
from modules.pandas import DesignMatrix
from modules.scipy.stats import gaussian_mixture
def gaussian_mixture_generate(file, train=60000, test=60000, validate=10000):
dim_x, dim_z = 784, 10
# distributions
π = [.2, .3, .5]
K, N, D = len(π), dim_z, dim_x
µ = np.zeros((K, D))
Σ = covmix(varroll(range(K), (N, D - N), (10, .1)))
x_dist = gaussian_mixture(weights=π, mean=µ, cov=Σ)
# sampling
x = DesignMatrix(x_dist.rvs(train))
y = DesignMatrix(x_dist.rvs(test))
z = DesignMatrix(x_dist.rvs(validate))
# save distribution
os.makedirs(dirname(file), exist_ok=True)
x_dist.save(file)
# save
x.to_csv(join(dirname(file), 'train.csv'))
y.to_csv(join(dirname(file), 'test.csv'))
z.to_csv(join(dirname(file), 'validate.csv'))
def gaussian_mixture_load(path):
x_dist = gaussian_mixture.load(path)
x = DesignMatrix.read_csv(join(dirname(path), 'train.csv'))
y = DesignMatrix.read_csv(join(dirname(path), 'test.csv'))
z = DesignMatrix.read_csv(join(dirname(path), 'validate.csv'))
return x_dist, x, y, z
| 29.95
| 77
| 0.674457
| 187
| 1,198
| 4.187166
| 0.336898
| 0.0447
| 0.10728
| 0.076628
| 0.268199
| 0.130268
| 0
| 0
| 0
| 0
| 0
| 0.026396
| 0.177796
| 1,198
| 39
| 78
| 30.717949
| 0.768528
| 0.037563
| 0
| 0
| 0
| 0
| 0.050523
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.222222
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c85276ff92552a878b6545824f777a6c37822c3a
| 7,860
|
py
|
Python
|
Desktop Assistant.py
|
PRASUNR0Y/Desktop-Assistant
|
6f07cd3bc50bfca3d3f243d9e01d1bb0ef2e9029
|
[
"MIT"
] | 15
|
2020-07-21T09:54:16.000Z
|
2022-02-08T15:34:25.000Z
|
Desktop Assistant.py
|
RisingStar522/Desktop-Assistant
|
6f07cd3bc50bfca3d3f243d9e01d1bb0ef2e9029
|
[
"MIT"
] | 1
|
2020-11-26T15:47:23.000Z
|
2020-11-26T15:47:23.000Z
|
Desktop Assistant.py
|
RisingStar522/Desktop-Assistant
|
6f07cd3bc50bfca3d3f243d9e01d1bb0ef2e9029
|
[
"MIT"
] | 16
|
2020-08-04T10:47:45.000Z
|
2022-01-14T19:29:35.000Z
|
import pyttsx3 #pip install pyttsx3
import speech_recognition as sr #pip install speechRecognition
import datetime
import wikipedia #pip install wikipedia
import webbrowser
import os
import smtplib
import random
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
# print(voices[0].id)
engine.setProperty('voice', voices[1].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak("Good Morning! ")
elif hour>=12 and hour<17:
speak("Good Afternoon! ")
elif hour>=17 and hour<19 :
speak("Good Evening! ")
else:
speak("Good Night! ")
speak("I am your Vertual Assistant Suzi. Please tell me how may I help you")
def takeCommand():
#It takes microphone input from the user and returns string output
rr = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
rr.pause_threshold = 1
audio = rr.listen(source)
try:
print("Recognizing...")
query = rr.recognize_google(audio, language='en-in')
print(f"User said: {query}\n")
except Exception as e:
# print(e)
print("Say that again please...")
speak("Connection error")
return "None"
return query
def sendEmail(to, content):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login('youremail@gmail.com', 'your-password')
server.sendmail('youremail@gmail.com', to, content)
server.close()
if __name__ == "__main__":
wishMe()
while True:
# if 1:
query = takeCommand().lower()
# Logic for executing tasks based on query
if 'wikipedia' in query:
speak('Searching Wikipedia...')
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=2)
speak("According to Wikipedia")
print(results)
speak(results)
elif "hello" in query or "hello Suzi" in query:
hello1 = "Hello ! How May i Help you.."
print(hello1)
speak(hello1)
elif "who are you" in query or "about you" in query or "your details" in query:
who_are_you = "I am Suzi an A I based computer program but i can help you lot like a your assistant ! try me to give simple command !"
print(who_are_you)
speak(who_are_you)
elif 'who make you' in query or 'who made you' in query or 'who created you' in query or 'who develop you' in query:
speak(" For your information Prasun Roy Created me ! I can show you his Linked In profile if you want to see. Yes or no .....")
ans_from_user_who_made_you = takeCommand()
if 'yes' in ans_from_user_who_made_you or 'ok' in ans_from_user_who_made_you or 'yeah' in ans_from_user_who_made_you:
webbrowser.open("https://www.linkedin.com/in/prasun-roy-")
speak('opening his profile...... please wait')
elif 'no' in ans_from_user_who_made_you or 'no thanks' in ans_from_user_who_made_you or 'not' in ans_from_user_who_made_you:
speak("All right ! OK...")
else :
speak("I can't understand. Please say that again !")
elif 'open youtube' in query:
webbrowser.open("www.youtube.com")
speak("opening youtube")
elif 'open github' in query:
webbrowser.open("https://www.github.com")
speak("opening github")
elif 'open facebook' in query:
webbrowser.open("https://www.facebook.com")
speak("opening facebook")
elif 'open instagram' in query:
webbrowser.open("https://www.instagram.com")
speak("opening instagram")
elif 'open google' in query:
webbrowser.open("google.com")
speak("opening google")
elif 'open stackoverflow' in query:
webbrowser.open("stackoverflow.com")
speak("opening stackoverflow")
elif 'open yahoo' in query:
webbrowser.open("https://www.yahoo.com")
speak("opening yahoo")
elif 'open gmail' in query:
webbrowser.open("https://mail.google.com")
speak("opening google mail")
elif 'open snapdeal' in query:
webbrowser.open("https://www.snapdeal.com")
speak("opening snapdeal")
elif 'open amazon' in query or 'shop online' in query:
webbrowser.open("https://www.amazon.com")
speak("opening amazon")
elif 'open flipkart' in query:
webbrowser.open("https://www.flipkart.com")
speak("opening flipkart")
elif 'play music' in query:
speak("ok i am playing music")
music_dir = 'E:\\My MUSIC'
songs = os.listdir(music_dir)
print(songs)
os.startfile(os.path.join(music_dir, songs[0]))
elif 'video from pc' in query or "video" in query:
speak("ok i am playing videos")
video_dir = 'E:\\\My Videos'
Videos = os.listdir(video_dir)
print(Videos)
os.startfile(os.path.join(video_dir,Videos[0]))
elif 'good bye' in query:
speak("good bye")
exit()
elif "shutdown" in query:
speak("shutting down")
os.system('shutdown -s')
elif "your name" in query or "sweat name" in query:
naa_mme = "Thanks for Asking my self ! Suzi"
print(naa_mme)
speak(naa_mme)
elif "you feeling" in query:
print("feeling Very happy to help you")
speak("feeling Very happy to help you")
elif query == 'none':
continue
elif 'exit' in query or 'stop' in query or 'quit' in query :
exx_exit = 'See you soon. Bye'
speak(exx_exit)
exit()
elif 'the time' in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"the time is {strTime}")
elif 'open code' in query:
codePath = "D:\\vs\\Microsoft VS Code\\Code.exe"
os.startfile(codePath)
speak("opening visual studio code")
elif 'email to prasun' in query:
try:
speak("What should I say?")
content = takeCommand()
to = "prasunroy988@gmail.com"
sendEmail(to, content)
speak("Email has been sent!")
except Exception as e:
print(e)
speak("Sorry.... I am not able to send this email")
elif 'how are you' in query:
setMsgs = ['Just doing my thing!', 'I am fine!', 'Nice!']
ans_qus = random.choice(setMsgs)
speak(ans_qus)
speak(" How are you'")
ans_from_user_how_are_you = takeCommand()
if 'fine' in ans_from_user_how_are_you or 'happy' in ans_from_user_how_are_you or 'okey' in ans_from_user_how_are_you:
speak('Great')
elif 'not' in ans_from_user_how_are_you or 'sad' in ans_from_user_how_are_you or 'upset' in ans_from_user_how_are_you:
speak('Tell me how can i make you happy')
else :
speak("I can't understand. Please say that again !")
else:
tempp = query.replace(' ','+')
prasun_url="https://www.google.com/search?q="
res_prasun = 'sorry! i cant understand but i search from internet to give your answer !'
print(res_prasun)
speak(res_prasun)
webbrowser.open(prasun_url+tempp)
| 34.933333
| 146
| 0.575318
| 1,001
| 7,860
| 4.412587
| 0.26973
| 0.058637
| 0.034865
| 0.035318
| 0.219832
| 0.174326
| 0.096898
| 0.075617
| 0.019017
| 0.019017
| 0
| 0.005785
| 0.318193
| 7,860
| 225
| 147
| 34.933333
| 0.818436
| 0.026718
| 0
| 0.068182
| 0
| 0.011364
| 0.293602
| 0.002878
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022727
| false
| 0.005682
| 0.045455
| 0
| 0.079545
| 0.073864
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8551c2705d7c8211e2870c34856750e96ab7d03
| 11,467
|
py
|
Python
|
exif_processing.py
|
Strubbl/upload-scripts
|
da2f73a322490c0ca572dcc21bc8ba7f68f76734
|
[
"MIT"
] | null | null | null |
exif_processing.py
|
Strubbl/upload-scripts
|
da2f73a322490c0ca572dcc21bc8ba7f68f76734
|
[
"MIT"
] | 1
|
2020-08-05T18:37:15.000Z
|
2020-08-07T14:12:56.000Z
|
exif_processing.py
|
Strubbl/upload-scripts
|
da2f73a322490c0ca572dcc21bc8ba7f68f76734
|
[
"MIT"
] | 1
|
2020-08-05T16:23:51.000Z
|
2020-08-05T16:23:51.000Z
|
"""Module responsible to parse Exif information from a image"""
import math
import datetime
from enum import Enum
from typing import Optional
# third party
import exifread
import piexif
MPH_TO_KMH_FACTOR = 1.60934
"""miles per hour to kilometers per hour conversion factor"""
KNOTS_TO_KMH_FACTOR = 1.852
"""knots to kilometers per hour conversion factor"""
class ExifTags(Enum):
"""This is a enumeration of exif tags. More info here
http://owl.phy.queensu.ca/~phil/exiftool/TagNames/GPS.html """
DATE_TIME_ORIGINAL = "EXIF DateTimeOriginal"
DATE_Time_DIGITIZED = "EXIF DateTimeDigitized"
# latitude
GPS_LATITUDE = "GPS GPSLatitude"
GPS_LATITUDE_REF = "GPS GPSLatitudeRef"
# longitude
GPS_LONGITUDE = "GPS GPSLongitude"
GPS_LONGITUDE_REF = "GPS GPSLongitudeRef"
# altitude
GPS_ALTITUDE_REF = "GPS GPSAltitudeRef"
GPS_ALTITUDE = "GPS GPSAltitude"
# timestamp
GPS_TIMESTAMP = "GPS GPSTimeStamp"
GPS_DATE_STAMP = "GPS GPSDateStamp"
GPS_DATE = "GPS GPSDate"
# speed
GPS_SPEED_REF = "GPS GPSSpeedRef"
GPS_SPEED = "GPS GPSSpeed"
# direction
GPS_DIRECTION_REF = "GPS GPSImgDirectionRef"
GPS_DIRECTION = "GPS GPSImgDirection"
class CardinalDirection(Enum):
"""Exif Enum with all cardinal directions"""
N = "N"
S = "S"
E = "E"
W = "W"
TrueNorth = "T"
MagneticNorth = "M"
class SeaLevel(Enum):
"""Exif Enum
If the reference is sea level and the
altitude is above sea level, 0 is given.
If the altitude is below sea level, a value of 1 is given and
the altitude is indicated as an absolute value in the GPSAltitude tag.
The reference unit is meters. Note that this tag is BYTE type,
unlike other reference tags."""
ABOVE = 0
BELOW = 1
class SpeedUnit(Enum):
"""Exif speed unit enum"""
KMH = "K"
MPH = "M"
KNOTS = "N"
@classmethod
def convert_mph_to_kmh(cls, mph) -> float:
"""This method converts from miles per hour to kilometers per hour"""
return mph * MPH_TO_KMH_FACTOR
@classmethod
def convert_knots_to_kmh(cls, knots) -> float:
"""This method converts from knots to kilometers per hour"""
return knots * KNOTS_TO_KMH_FACTOR
def all_tags(path) -> {str: str}:
"""Method to return Exif tags"""
file = open(path, "rb")
tags = exifread.process_file(file, details=False)
return tags
def __dms_to_dd(dms_value) -> float:
"""DMS is Degrees Minutes Seconds, DD is Decimal Degrees.
A typical format would be dd/1,mm/1,ss/1.
When degrees and minutes are used and, for example,
fractions of minutes are given up to two decimal places,
the format would be dd/1,mmmm/100,0/1 """
# degrees
degrees_nominator = dms_value.values[0].num
degrees_denominator = dms_value.values[0].den
degrees = float(degrees_nominator) / float(degrees_denominator)
# minutes
minutes_nominator = dms_value.values[1].num
minutes_denominator = dms_value.values[1].den
minutes = float(minutes_nominator) / float(minutes_denominator)
# seconds
seconds_nominator = dms_value.values[2].num
seconds_denominator = dms_value.values[2].den
seconds = float(seconds_nominator) / float(seconds_denominator)
# decimal degrees
return degrees + (minutes / 60.0) + (seconds / 3600.0)
def gps_latitude(gps_data: {str: str}) -> Optional[float]:
"""Exif latitude from gps_data represented by gps tags found in image exif"""
if ExifTags.GPS_LATITUDE.value in gps_data:
# latitude exists
dms_values = gps_data[ExifTags.GPS_LATITUDE.value]
_latitude = __dms_to_dd(dms_values)
if ExifTags.GPS_LATITUDE_REF.value in gps_data and \
(str(gps_data[ExifTags.GPS_LATITUDE_REF.value]) == str(CardinalDirection.S.value)):
# cardinal direction is S so the latitude should be negative
_latitude = -1 * _latitude
return _latitude
# no latitude info found
return None
def gps_longitude(gps_data: {str: str}) -> Optional[float]:
"""Exif longitude from gps_data represented by gps tags found in image exif"""
if ExifTags.GPS_LONGITUDE.value in gps_data:
# longitude exists
dms_values = gps_data[ExifTags.GPS_LONGITUDE.value]
_longitude = __dms_to_dd(dms_values)
if ExifTags.GPS_LONGITUDE_REF.value in gps_data and \
str(gps_data[ExifTags.GPS_LONGITUDE_REF.value]) == str(CardinalDirection.W.value):
# cardinal direction is W so the longitude should be negative
_longitude = -1 * _longitude
return _longitude
# no longitude info found
return None
def gps_compass(gps_data: {str: str}) -> Optional[float]:
"""Exif compass from gps_data represented by gps tags found in image exif.
reference relative to true north"""
if ExifTags.GPS_DIRECTION.value in gps_data:
# compass exists
compass_ratio = gps_data[ExifTags.GPS_DIRECTION.value].values[0]
if ExifTags.GPS_DIRECTION_REF.value in gps_data and \
gps_data[ExifTags.GPS_DIRECTION_REF.value] == CardinalDirection.MagneticNorth:
# if we find magnetic north then we don't consider a valid compass
return None
return compass_ratio.num / compass_ratio.den
# no compass found
return None
def gps_timestamp(gps_data: {str: str}) -> Optional[float]:
"""Exif gps time from gps_data represented by gps tags found in image exif.
In exif there are values giving the hour, minute, and second.
This is UTC time"""
if ExifTags.GPS_TIMESTAMP.value in gps_data:
# timestamp exists
_timestamp = gps_data[ExifTags.GPS_TIMESTAMP.value]
hours: exifread.Ratio = _timestamp.values[0]
minutes: exifread.Ratio = _timestamp.values[1]
seconds: exifread.Ratio = _timestamp.values[2]
day_timestamp = hours.num / hours.den * 3600 + \
minutes.num / minutes.den * 60 + \
seconds.num / seconds.den
if ExifTags.GPS_DATE_STAMP.value in gps_data:
# this tag is the one present in the exif documentation
# but from experience ExifTags.GPS_DATE is replacing this tag
gps_date = gps_data[ExifTags.GPS_DATE_STAMP.value].values
date_timestamp = datetime.datetime.strptime(gps_date, "%Y:%m:%d").timestamp()
return day_timestamp + date_timestamp
if ExifTags.GPS_DATE.value in gps_data:
# this tag is a replacement for ExifTags.GPS_DATE_STAMP
gps_date = gps_data[ExifTags.GPS_DATE.value].values
date_timestamp = datetime.datetime.strptime(gps_date, "%Y:%m:%d").timestamp()
return day_timestamp + date_timestamp
# no date information only hour minutes second of day -> no valid gps timestamp
return None
# no gps timestamp found
return None
def timestamp(tags: {str: str}) -> Optional[float]:
"""Original timestamp determined by the digital still camera. This is timezone corrected."""
if ExifTags.DATE_TIME_ORIGINAL.value in tags:
date_taken = tags[ExifTags.DATE_TIME_ORIGINAL.value].values
_timestamp = datetime.datetime.strptime(date_taken, "%Y:%m:%d %H:%M:%S").timestamp()
return _timestamp
if ExifTags.DATE_Time_DIGITIZED.value in tags:
date_taken = tags[ExifTags.DATE_Time_DIGITIZED.value].values
_timestamp = datetime.datetime.strptime(date_taken, "%Y:%m:%d %H:%M:%S").timestamp()
return _timestamp
# no timestamp information found
return None
def gps_altitude(gps_tags: {str: str}) -> Optional[float]:
"""GPS altitude form exif """
if ExifTags.GPS_ALTITUDE.value in gps_tags:
# altitude exists
altitude_ratio = gps_tags[ExifTags.GPS_ALTITUDE.value].values[0]
altitude = altitude_ratio.num / altitude_ratio.den
if ExifTags.GPS_ALTITUDE_REF.value in gps_tags and \
gps_tags[ExifTags.GPS_ALTITUDE_REF.value] == SeaLevel.BELOW.value:
altitude = -1 * altitude
return altitude
return None
def gps_speed(gps_tags: {str: str}) -> Optional[float]:
"""Returns GPS speed from exif in km per hour or None if no gps speed tag found"""
if ExifTags.GPS_SPEED.value in gps_tags:
# gps speed exist
speed_ratio = gps_tags[ExifTags.GPS_SPEED.value].values[0]
speed = speed_ratio.num / speed_ratio.den
if ExifTags.GPS_SPEED_REF.value in gps_tags:
if gps_tags[ExifTags.GPS_SPEED_REF.value] == SpeedUnit.MPH.value:
speed = SpeedUnit.convert_mph_to_kmh(speed)
if gps_tags[ExifTags.GPS_SPEED_REF.value] == SpeedUnit.KNOTS.value:
speed = SpeedUnit.convert_knots_to_kmh(speed)
return speed
# no gps speed tag found
return None
def add_gps_tags(path: str, gps_tags: {str: any}):
"""This method will add gps tags to the photo found at path"""
exif_dict = piexif.load(path)
for tag, tag_value in gps_tags.items():
exif_dict["GPS"][tag] = tag_value
exif_bytes = piexif.dump(exif_dict)
piexif.insert(exif_bytes, path)
def create_required_gps_tags(timestamp_gps: float,
latitude: float,
longitude: float) -> {str: any}:
"""This method will creates gps required tags """
exif_gps = {}
dms_latitude = __dd_to_dms(latitude)
dms_longitude = __dd_to_dms(longitude)
day = int(timestamp_gps / 86400) * 86400
hour = int((timestamp_gps - day) / 3600)
minutes = int((timestamp_gps - day - hour * 3600) / 60)
seconds = int(timestamp_gps - day - hour * 3600 - minutes * 60)
day_timestamp_str = datetime.date.fromtimestamp(day).strftime("%Y:%m:%d")
exif_gps[piexif.GPSIFD.GPSTimeStamp] = [(hour, 1),
(minutes, 1),
(seconds, 1)]
exif_gps[piexif.GPSIFD.GPSDateStamp] = day_timestamp_str
exif_gps[piexif.GPSIFD.GPSLatitudeRef] = "S" if latitude < 0 else "N"
exif_gps[piexif.GPSIFD.GPSLatitude] = dms_latitude
exif_gps[piexif.GPSIFD.GPSLongitudeRef] = "W" if longitude < 0 else "E"
exif_gps[piexif.GPSIFD.GPSLongitude] = dms_longitude
return exif_gps
def add_optional_gps_tags(exif_gps: {str: any},
speed: float,
altitude: float,
compass: float) -> {str: any}:
"""This method will append optional tags to exif_gps tags dictionary"""
if speed:
exif_gps[piexif.GPSIFD.GPSSpeed] = (speed, 1)
exif_gps[piexif.GPSIFD.GPSSpeedRef] = SpeedUnit.KMH.value
if altitude:
exif_gps[piexif.GPSIFD.GPSAltitude] = (altitude, 1)
sea_level = SeaLevel.BELOW.value if altitude < 0 else SeaLevel.ABOVE.value
exif_gps[piexif.GPSIFD.GPSAltitudeRef] = sea_level
if compass:
exif_gps[piexif.GPSIFD.GPSImgDirection] = (compass, 1)
exif_gps[piexif.GPSIFD.GPSImgDirectionRef] = CardinalDirection.TrueNorth.value
def __dd_to_dms(decimal_degree) -> [(float, int)]:
decimal_degree_abs = abs(decimal_degree)
degrees = math.floor(decimal_degree_abs)
minute_float = (decimal_degree_abs - degrees) * 60
minute = math.floor(minute_float)
seconds = round((minute_float - minute) * 60 * 100)
return [(degrees, 1), (minute, 1), (seconds, 100)]
| 38.871186
| 99
| 0.669574
| 1,523
| 11,467
| 4.851609
| 0.160867
| 0.043172
| 0.018947
| 0.030857
| 0.302747
| 0.203952
| 0.166734
| 0.119096
| 0.111246
| 0.089051
| 0
| 0.011338
| 0.238511
| 11,467
| 294
| 100
| 39.003401
| 0.83486
| 0.215052
| 0
| 0.104972
| 0
| 0
| 0.038033
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082873
| false
| 0.033149
| 0.033149
| 0
| 0.414365
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8559b8c4871bf63b43c48e1fd50163d6997b0b7
| 1,505
|
py
|
Python
|
NPTEL - 2017 PDSA/Nptel_EX_5.py
|
Siddharth2016/PYTHON3_prog
|
9dfa258d87f5b00779d39d9de9a49c1c6cea06be
|
[
"MIT"
] | 2
|
2019-02-26T14:06:53.000Z
|
2019-02-27T17:13:01.000Z
|
NPTEL - 2017 PDSA/Nptel_EX_5.py
|
Siddharth2016/PYTHON3_prog
|
9dfa258d87f5b00779d39d9de9a49c1c6cea06be
|
[
"MIT"
] | null | null | null |
NPTEL - 2017 PDSA/Nptel_EX_5.py
|
Siddharth2016/PYTHON3_prog
|
9dfa258d87f5b00779d39d9de9a49c1c6cea06be
|
[
"MIT"
] | 2
|
2017-12-26T07:59:57.000Z
|
2018-06-24T03:35:05.000Z
|
# NPTEL EXERCISE 5
courses = {}
students = []
grades = {}
f = 0
while(True):
S = input()
if S=="EndOfInput":
break
if S=='Courses':
f = 1
continue
elif S=='Students':
f = 2
continue
elif S=='Grades':
f = 3
continue
if f==1 :
S = S.split("~")
courses[S[0]] = S[2:]
elif f==2:
S = S.split("~")
students += [S]
elif f==3:
S = S.split("~")
try:
grades[S[0]].append(S[1:])
except:
grades[S[0]] = [S[1:]]
#print(courses)
#print(students)
#print(grades)
students.sort()
for stud in students:
roll = stud[0]
gpa = 0
count = 0
for key in grades.keys():
for res in grades[key]:
if roll==res[2]:
count += 1
if res[3]=='A':
gpa += 10
elif res[3]=='AB':
gpa += 9
elif res[3]=='B':
gpa += 8
elif res[3]=='BC':
gpa += 7
elif res[3]=='C':
gpa += 6
elif res[3]=='CD':
gpa += 5
elif res[3]=='D':
gpa += 4
if gpa!=0:
gpa = (gpa/count)
ans = "~".join(stud) + "~" + "{0:3.1f}".format(gpa)
else:
ans = "~".join(stud) + "~" + str(gpa)
print(ans)
| 22.462687
| 60
| 0.348173
| 171
| 1,505
| 3.064327
| 0.315789
| 0.053435
| 0.091603
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.049479
| 0.489701
| 1,505
| 66
| 61
| 22.80303
| 0.632813
| 0.038538
| 0
| 0.103448
| 0
| 0
| 0.040698
| 0
| 0.017241
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.017241
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8587f2977c7befab3e26288435a9698c942b8e4
| 2,719
|
py
|
Python
|
ultron8/api/api_v1/endpoints/loggers.py
|
bossjones/ultron8
|
45db73d32542a844570d44bc83defa935e15803f
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
ultron8/api/api_v1/endpoints/loggers.py
|
bossjones/ultron8
|
45db73d32542a844570d44bc83defa935e15803f
|
[
"Apache-2.0",
"MIT"
] | 43
|
2019-06-01T23:08:32.000Z
|
2022-02-07T22:24:53.000Z
|
ultron8/api/api_v1/endpoints/loggers.py
|
bossjones/ultron8
|
45db73d32542a844570d44bc83defa935e15803f
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
from __future__ import annotations
# SOURCE: https://blog.bartab.fr/fastapi-logging-on-the-fly/
import logging
from fastapi import APIRouter, HTTPException
from ultron8.api.models.loggers import LoggerModel, LoggerPatch
LOG_LEVELS = {
"critical": logging.CRITICAL,
"error": logging.ERROR,
"warning": logging.WARNING,
"info": logging.INFO,
"debug": logging.DEBUG,
}
LOGGER = logging.getLogger(__name__)
router = APIRouter()
def get_lm_from_tree(loggertree: LoggerModel, find_me: str) -> LoggerModel:
if find_me == loggertree.name:
LOGGER.debug("Found")
return loggertree
else:
for ch in loggertree.children:
LOGGER.debug(f"Looking in: {ch.name}")
i = get_lm_from_tree(ch, find_me)
if i:
return i
def generate_tree() -> LoggerModel:
# adapted from logging_tree package https://github.com/brandon-rhodes/logging_tree
rootm = LoggerModel(
name="root", level=logging.getLogger().getEffectiveLevel(), children=[]
)
nodesm = {}
items = list(logging.root.manager.loggerDict.items()) # type: ignore
items.sort()
for name, loggeritem in items:
if isinstance(loggeritem, logging.PlaceHolder):
nodesm[name] = nodem = LoggerModel(name=name, children=[])
else:
nodesm[name] = nodem = LoggerModel(
name=name, level=loggeritem.getEffectiveLevel(), children=[]
)
i = name.rfind(".", 0, len(name) - 1) # same formula used in `logging`
if i == -1:
parentm = rootm
else:
parentm = nodesm[name[:i]]
parentm.children.append(nodem)
return rootm
# Multiple RecursionErrors with self-referencing models
# https://github.com/samuelcolvin/pydantic/issues/524
# https://github.com/samuelcolvin/pydantic/issues/531
@router.get("/{logger_name}", response_model=LoggerModel)
def logger_get(logger_name: str):
LOGGER.debug(f"getting logger {logger_name}")
rootm = generate_tree()
lm = get_lm_from_tree(rootm, logger_name)
if lm is None:
raise HTTPException(status_code=404, detail=f"Logger {logger_name} not found")
return lm
@router.patch("/")
def logger_patch(loggerpatch: LoggerPatch):
rootm = generate_tree()
lm = get_lm_from_tree(rootm, loggerpatch.name)
LOGGER.debug(f"Actual level of {lm.name} is {lm.level}")
LOGGER.debug(f"Setting {loggerpatch.name} to {loggerpatch.level}")
logging.getLogger(loggerpatch.name).setLevel(LOG_LEVELS[loggerpatch.level])
return loggerpatch
@router.get("/", response_model=LoggerModel)
def loggers_list():
rootm = generate_tree()
LOGGER.debug(rootm)
return rootm
| 31.252874
| 86
| 0.670099
| 325
| 2,719
| 5.476923
| 0.341538
| 0.037079
| 0.020225
| 0.029213
| 0.124719
| 0.124719
| 0.041573
| 0.041573
| 0.041573
| 0
| 0
| 0.006044
| 0.2089
| 2,719
| 86
| 87
| 31.616279
| 0.821478
| 0.125414
| 0
| 0.123077
| 0
| 0
| 0.093671
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.061538
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c85898f206e8cc65031cd08af9075a430861ba23
| 422
|
py
|
Python
|
exception6.py
|
PRASAD-DANGARE/PYTHON
|
36214f7dc3762d327e5a29e40752edeb098249c8
|
[
"MIT"
] | 1
|
2021-06-07T07:55:28.000Z
|
2021-06-07T07:55:28.000Z
|
exception6.py
|
PRASAD-DANGARE/PYTHON
|
36214f7dc3762d327e5a29e40752edeb098249c8
|
[
"MIT"
] | null | null | null |
exception6.py
|
PRASAD-DANGARE/PYTHON
|
36214f7dc3762d327e5a29e40752edeb098249c8
|
[
"MIT"
] | null | null | null |
# Python Program To Understand The Usage Of try With finally Blocks
'''
Function Name : Usage Of try With finally Blocks
Function Date : 23 Sep 2020
Function Author : Prasad Dangare
Input : String
Output : String
'''
try:
x = int(input('Enter A Number : '))
y = 1 / x
finally:
print("We Are Not Catching The Exception.")
print("The Inverse Is : ", y)
| 22.210526
| 68
| 0.592417
| 55
| 422
| 4.545455
| 0.690909
| 0.056
| 0.08
| 0.112
| 0.28
| 0.28
| 0.28
| 0
| 0
| 0
| 0
| 0.024561
| 0.324645
| 422
| 18
| 69
| 23.444444
| 0.852632
| 0.56872
| 0
| 0
| 0
| 0
| 0.435897
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c85a1c9c9f35a67fa594c9e1e36235e098af53be
| 4,036
|
py
|
Python
|
V1/GliderScienceSet_Plots.py
|
NOAA-PMEL/EcoFOCI_OculusGlider
|
5655c0e173432768706416932c94a089a3e7993f
|
[
"Unlicense"
] | 2
|
2018-04-12T19:49:05.000Z
|
2020-10-01T11:46:48.000Z
|
V1/GliderScienceSet_Plots.py
|
NOAA-PMEL/EcoFOCI_OculusGlider
|
5655c0e173432768706416932c94a089a3e7993f
|
[
"Unlicense"
] | null | null | null |
V1/GliderScienceSet_Plots.py
|
NOAA-PMEL/EcoFOCI_OculusGlider
|
5655c0e173432768706416932c94a089a3e7993f
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
"""
Background:
--------
GliderScienceSet_Plots.py
Purpose:
--------
History:
--------
"""
import argparse
import os
from io_utils import ConfigParserLocal
import numpy as np
import xarray as xa
# Visual Stack
import matplotlib as mpl
import matplotlib.pyplot as plt
def plot_ts(salt, temp, press, srange=[31,33], trange=[-2,10], ptitle="",labels=True, label_color='k', bydepth=False):
plt.style.use('ggplot')
# Figure out boudaries (mins and maxs)
smin = srange[0]
smax = srange[1]
tmin = trange[0]
tmax = trange[1]
# Calculate how many gridcells we need in the x and y dimensions
xdim = int(round((smax-smin)/0.1+1,0))
ydim = int(round((tmax-tmin)+1,0))
#print 'ydim: ' + str(ydim) + ' xdim: ' + str(xdim) + ' \n'
if (xdim > 10000) or (ydim > 10000):
print('To many dimensions for grid in file. Likely missing data \n')
return
# Create empty grid of zeros
dens = np.zeros((ydim,xdim))
# Create temp and salt vectors of appropiate dimensions
ti = np.linspace(0,ydim-1,ydim)+tmin
si = np.linspace(0,xdim-1,xdim)*0.1+smin
# Loop to fill in grid with densities
for j in range(0,int(ydim)):
for i in range(0, int(xdim)):
dens[j,i]=sw.dens0(si[i],ti[j])
# Substract 1000 to convert to sigma-t
dens = dens - 1000
# Plot data ***********************************************
ax1 = fig.add_subplot(111)
if labels:
CS = plt.contour(si,ti,dens, linestyles='dashed', colors='k')
if labels:
plt.clabel(CS, fontsize=12, inline=1, fmt='%1.1f') # Label every second level
if bydepth:
ts = ax1.scatter(salt,temp, c=press, cmap='gray', s=10)
else:
ts = ax1.scatter(salt,temp,s=10,c=label_color)
plt.ylim(tmin,tmax)
plt.xlim(smin,smax)
if labels:
if bydepth:
plt.colorbar(ts )
ax1.set_xlabel('Salinity (PSU)')
ax1.set_ylabel('Temperature (C)')
t = fig.suptitle(ptitle, fontsize=12, fontweight='bold')
t.set_y(1.08)
return fig
"""-------------------------------- Main -----------------------------------------------"""
parser = argparse.ArgumentParser(description='Plot archived NetCDF glider data and Science Data')
parser.add_argument('ofilepath', metavar='ofilepath', type=str,
help='path to directory with UW initial Oculus netcdf data')
parser.add_argument('sfilepath', metavar='sfilepath', type=str,
help='path to directory with Oculus Science Data netcdf data')
parser.add_argument('profileid',metavar='profileid', type=str,
help='divenumber - eg p4010260')
args = parser.parse_args()
isUW, ismerged, isup, isdown = True, True, True, True
# There are potentially three files - original UW file, a merged file and an upcast/downcast file
filein = args.ofilepath + args.profileid + '.nc'
try:
df = xa.open_dataset(filein, autoclose=True)
except IOError:
isUW = False
filein_m = args.sfilepath + args.profileid + '_m.nc'
ismerged = True
try:
df_m = xa.open_dataset(filein_m, autoclose=True)
except IOError:
ismerged = False
filein_u = args.sfilepath + args.profileid + '_u.nc'
try:
df_u = xa.open_dataset(filein_u, autoclose=True)
except IOError:
isup = False
filein_d = args.sfilepath + args.profileid + '_d.nc'
try:
df_d = xa.open_dataset(filein_d, autoclose=True)
except IOError:
isdown = False
fig = plt.figure(figsize=(6, 6))
if isUW:
fig = plot_ts(df.salinity,df.temperature,df.depth,labels=True,label_color='g')
print("Added original data")
if ismerged:
fig = plot_ts(df_m.Salinity,df_m.Temperature,df_m.Pressure,labels=False,label_color='k')
print("Added merged data")
if isup:
fig = plot_ts(df_u.Salinity,df_u.Temperature,df_u.Pressure,labels=False,label_color='b')
print("Added binned upcast data")
if isdown:
fig = plot_ts(df_d.Salinity,df_d.Temperature,df_d.Pressure,labels=False,label_color='r')
print("Added binned downcast data")
| 27.834483
| 119
| 0.640981
| 582
| 4,036
| 4.365979
| 0.369416
| 0.023613
| 0.020464
| 0.029909
| 0.094845
| 0.023613
| 0.023613
| 0
| 0
| 0
| 0
| 0.023148
| 0.197225
| 4,036
| 144
| 120
| 28.027778
| 0.761111
| 0.15114
| 0
| 0.149425
| 0
| 0
| 0.138662
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011494
| false
| 0
| 0.08046
| 0
| 0.114943
| 0.057471
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c85aba6739f248fb55a041a97d59cbb716b417c3
| 17,416
|
py
|
Python
|
manager/users/models.py
|
jlbrewe/hub
|
c737669e6493ad17536eaa240bed3394b20c6b7d
|
[
"Apache-2.0"
] | 30
|
2016-03-26T12:08:04.000Z
|
2021-12-24T14:48:32.000Z
|
manager/users/models.py
|
jlbrewe/hub
|
c737669e6493ad17536eaa240bed3394b20c6b7d
|
[
"Apache-2.0"
] | 1,250
|
2016-03-23T04:56:50.000Z
|
2022-03-28T02:27:58.000Z
|
manager/users/models.py
|
jlbrewe/hub
|
c737669e6493ad17536eaa240bed3394b20c6b7d
|
[
"Apache-2.0"
] | 11
|
2016-07-14T17:04:20.000Z
|
2021-07-01T16:19:09.000Z
|
"""
Define models used in this app.
This module only serves to provide some consistency across the
`users`, `accounts` , `projects` etc apps so that you can
`from users.models import Users`, just like you can for
`from projects.models import Projects` and instead of having to remember
to do the following.
"""
from typing import Dict, Optional
import django.contrib.auth.models
import shortuuid
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import connection, models
from django.db.models import Count, F, Max, Q
from django.db.models.expressions import RawSQL
from django.http import HttpRequest
from django.shortcuts import reverse
from django.utils import timezone
from invitations.adapters import get_invitations_adapter
from invitations.models import Invitation
from rest_framework.exceptions import ValidationError
from waffle.models import AbstractUserFlag
# Needed to ensure signals are loaded
import users.signals # noqa
from manager.helpers import EnumChoice
User: django.contrib.auth.models.User = get_user_model()
def get_email(user: User) -> Optional[str]:
"""
Get the best email address for a user.
The "best" email is the verified primary email,
falling back to verified if none marked as primary,
falling back to the first if none is verified,
falling back to `user.email`, falling back to
their public email.
"""
best = None
emails = user.emailaddress_set.all()
for email in emails:
if (email.primary and email.verified) or (not best and email.verified):
best = email.email
if not best and len(emails) > 0:
best = emails[0].email
if not best:
best = user.email
if not best and user.personal_account:
best = user.personal_account.email
# Avoid returning an empty string, return None instead
return best or None
def get_name(user: User) -> Optional[str]:
"""
Get the best name to display for a user.
The "best" name is their account's display name,
falling back to first_name + last_name,
falling back to username.
"""
if user.personal_account and user.personal_account.display_name:
return user.personal_account.display_name
if user.first_name or user.last_name:
return f"{user.first_name} {user.last_name}".strip()
return user.username
def get_attributes(user: User) -> Dict:
"""
Get a dictionary of user attributes.
Used for updating external services with current
values of user attributes e.g number of projects etc.
Flattens various other summary dictionaries e.g `get_projects_summary`
into a single dictionary.
"""
return {
**dict(
(f"feature_{name}", value)
for name, value in get_feature_flags(user).items()
),
**dict(
(f"orgs_{name}", value) for name, value in get_orgs_summary(user).items()
),
**dict(
(f"projects_{name}", value)
for name, value in get_projects_summary(user).items()
),
}
def get_orgs(user: User):
"""
Get all organizational accounts that a user is a member of.
"""
from accounts.models import Account
return Account.objects.filter(user__isnull=True, users__user=user).annotate(
role=F("users__role")
)
def get_orgs_summary(user: User) -> Dict:
"""
Get a summary of organizational accounts the user is a member of.
"""
from accounts.models import AccountRole
zero_by_role = dict([(role.name.lower(), 0) for role in AccountRole])
orgs = get_orgs(user)
orgs_summary = orgs.values("role").annotate(count=Count("id"), tier=Max("tier"))
orgs_by_role = dict([(row["role"].lower(), row["count"]) for row in orgs_summary])
return {
"max_tier": max(row["tier"] for row in orgs_summary) if orgs_summary else None,
"total": sum(orgs_by_role.values()),
**zero_by_role,
**orgs_by_role,
}
def get_projects(user: User, include_public=True):
"""
Get a queryset of projects for the user.
For authenticated users, each project is annotated with the
role of the user for the project.
"""
from projects.models.projects import Project
if user.is_authenticated:
# Annotate the queryset with the role of the user
# Role is the "greater" of the project role and the
# account role (for the account that owns the project).
# Authenticated users can see public projects and those in
# which they have a role
return Project.objects.annotate(
role=RawSQL(
"""
SELECT
CASE account_role.role
WHEN 'OWNER' THEN 'OWNER'
WHEN 'MANAGER' THEN
CASE project_role.role
WHEN 'OWNER' THEN 'OWNER'
ELSE 'MANAGER' END
ELSE project_role.role END AS "role"
FROM projects_project AS project
LEFT JOIN
(SELECT project_id, "role" FROM projects_projectagent WHERE user_id = %s) AS project_role
ON project.id = project_role.project_id
LEFT JOIN
(SELECT account_id, "role" FROM accounts_accountuser WHERE user_id = %s) AS account_role
ON project.account_id = account_role.account_id
WHERE project.id = projects_project.id""",
[user.id, user.id],
)
).filter((Q(public=True) if include_public else Q()) | Q(role__isnull=False))
else:
# Unauthenticated users can only see public projects
return Project.objects.filter(public=True).extra(select={"role": "NULL"})
def get_projects_summary(user: User) -> Dict:
"""
Get a summary of project memberships for a user.
"""
from projects.models.projects import ProjectRole
zero_by_role = dict([(role.name.lower(), 0) for role in ProjectRole])
projects = get_projects(user, include_public=False)
projects_by_role = dict(
[
(row["role"].lower(), row["count"])
for row in projects.values("role").annotate(count=Count("id"))
]
)
return {
"total": sum(projects_by_role.values()),
**zero_by_role,
**projects_by_role,
}
def get_feature_flags(user: User) -> Dict[str, str]:
"""
Get the feature flag settings for a user.
"""
with connection.cursor() as cursor:
cursor.execute(
"""
SELECT "name", "default", "user_id"
FROM users_flag
LEFT JOIN (
SELECT *
FROM users_flag_users
WHERE user_id = %s
) AS subquery ON users_flag.id = subquery.flag_id
WHERE users_flag.settable
""",
[user.id],
)
rows = cursor.fetchall()
features = {}
for row in rows:
name, default, has_flag = row
if has_flag:
features[name] = "off" if default == "on" else "on"
else:
features[name] = default
return features
def generate_anonuser_id():
"""
Generate a unique id for an anonymous user.
"""
return shortuuid.ShortUUID().random(length=32)
class AnonUser(models.Model):
"""
A model to store anonymous users when necessary.
Used to associate unauthenticated users with objects,
for example, so that the same session job can be provided
to them on multiple page refreshes.
"""
id = models.CharField(
primary_key=True,
max_length=64,
default=generate_anonuser_id,
help_text="The unique id of the anonymous user.",
)
created = models.DateTimeField(
auto_now_add=True, help_text="The time the anon user was created."
)
@staticmethod
def get_id(request: HttpRequest) -> Optional[str]:
"""
Get the id of the anonymous user, if any.
"""
if request.user.is_anonymous:
return request.session.get("user", {}).get("id")
return None
@staticmethod
def get_or_create(request: HttpRequest) -> "AnonUser":
"""
Create an instance in the database.
Only use this when necessary. e.g when you need
to associated an anonymous user with another object.
"""
id = AnonUser.get_id(request)
if id:
anon_user, created = AnonUser.objects.get_or_create(id=id)
return anon_user
else:
anon_user = AnonUser.objects.create()
request.session["user"] = {"anon": True, "id": anon_user.id}
return anon_user
class Flag(AbstractUserFlag):
"""
Custom feature flag model.
Adds fields to allow users to turn features on/off themselves.
In the future, fields may be
added to allow flags to be set based on the account (in addition to, or instead
of, only the user).
See https://waffle.readthedocs.io/en/stable/types/flag.html#custom-flag-models
"""
label = models.CharField(
max_length=128,
null=True,
blank=True,
help_text="A label for the feature to display to users.",
)
default = models.CharField(
max_length=3,
choices=[("on", "On"), ("off", "Off")],
default="on",
help_text='If the default is "on" then when the flag is active, '
'the feature should be considered "off" and vice versa.',
)
settable = models.BooleanField(
default=False, help_text="User can turn this flag on/off for themselves."
)
def is_active_for_user(self, user) -> bool:
"""
Is the feature "on" for a user.
Changes the underlying behaviour of Waffle flags based on
the `default` field for the flag.
"""
is_active = super().is_active_for_user(user)
return is_active if self.default == "off" else not is_active
def generate_invite_key():
"""
Generate a unique invite key.
The is separate function to avoid new AlterField migrations
being created as happens when `default=shortuuid.uuid`.
"""
return shortuuid.ShortUUID().random(length=32)
class InviteAction(EnumChoice):
"""
Actions to take when a user has accepted an invite.
"""
join_account = "join_account"
join_team = "join_team"
join_project = "join_project"
take_tour = "take_tour"
@staticmethod
def as_choices():
"""Return as a list of field choices."""
return [
(InviteAction.join_account.name, "Join account"),
(InviteAction.join_team.name, "Join team"),
(InviteAction.join_project.name, "Join project"),
(InviteAction.take_tour.name, "Take tour"),
]
class Invite(models.Model):
"""
An extension of the default invitation model.
Allows for different types of invitations, with actions
after success.
Re-implements the interface of `invitations.Invitation`
instead of extending it so that some fields can be redefined
e.g shorter case sensitive `key`; e.g. avoid the unique constraint
on `email` (because of actions, a single email address could
be invited more than once).
The methods for each action should use API view sets
with synthetic requests having the `inviter` as the
request user. This reduces code and provides consistency
in permissions checking, thereby reducing errors.
Adds `subject_object` `GenericForeignKey` to allow
querying from other models
"""
key = models.CharField(
max_length=64,
unique=True,
default=generate_invite_key,
help_text="The key for the invite.",
)
inviter = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="invites",
help_text="The user who created the invite.",
)
email = models.EmailField(
max_length=2048, help_text="The email address of the person you are inviting."
)
message = models.TextField(
null=True, blank=True, help_text="An optional message to send to the invitee."
)
created = models.DateTimeField(
auto_now_add=True, help_text="When the invite was created."
)
sent = models.DateTimeField(
null=True, blank=True, help_text="When the invite was sent."
)
accepted = models.BooleanField(
default=False,
help_text="Whether the invite has been accepted. "
"Will only be true if the user has clicked on the invitation AND authenticated.",
)
completed = models.DateTimeField(
null=True, blank=True, help_text="When the invite action was completed",
)
action = models.CharField(
max_length=64,
null=True,
blank=True,
choices=InviteAction.as_choices(),
help_text="The action to perform when the invitee signs up.",
)
subject_type = models.ForeignKey(
ContentType,
null=True,
blank=True,
on_delete=models.CASCADE,
help_text="The type of the target of the action. e.g Team, Account",
)
subject_id = models.IntegerField(
null=True, blank=True, help_text="The id of the target of the action.",
)
subject_object = GenericForeignKey("subject_type", "subject_id")
arguments = models.JSONField(
null=True,
blank=True,
help_text="Any additional arguments to pass to the action.",
)
# These methods need to be implemented for the `invitations` API
key_expired = Invitation.key_expired
def send_invitation(self, request):
"""Extend method to add the invite object to the template context."""
context = dict(
inviter=self.inviter,
inviter_name=self.inviter.get_full_name() or self.inviter.username,
invite_message=self.message,
invite_url=request.build_absolute_uri(
reverse("ui-users-invites-accept", args=[self.key])
),
reason_for_sending="This email was sent by user '{0}' to invite you to "
"collaborate with them on Stencila Hub.".format(self.inviter.username),
)
get_invitations_adapter().send_mail(
"invitations/email/email_invite", self.email, context
)
self.sent = timezone.now()
self.save()
def __str__(self):
return "Invite {0} {1}".format(self.action, self.email)
# These methods implement invitation actions
def redirect_url(self) -> str:
"""
Get the URL to redirect the user to after the invite has been accepted.
"""
if self.action == "join_account":
return reverse("ui-accounts-retrieve", args=[self.arguments["account"]])
elif self.action == "join_team":
return reverse(
"ui-accounts-teams-retrieve",
args=[self.arguments["account"], self.arguments["team"]],
)
elif self.action == "join_project":
return reverse(
"ui-projects-retrieve",
args=[self.arguments["account"], self.arguments["project"]],
)
elif self.action == "take_tour":
return self.arguments["page"] + "?tour=" + self.arguments["tour"]
else:
return "/"
def create_request(self, data) -> HttpRequest:
"""
Create a synthetic request to pass to view sets.
"""
request = HttpRequest()
request.data = data
request.user = self.inviter
return request
def perform_action(self, request, user=None):
"""
Perform the action (if any) registered for this invitation.
"""
# Accept and save in case the action fails below
self.accepted = True
self.save()
if self.action:
method = getattr(self, self.action)
if not method:
raise RuntimeError("No such action {0}".format(self.action))
method(user or request.user)
self.completed = timezone.now()
self.save()
def join_account(self, invitee):
"""
Add invitee to account with a particular role.
"""
from accounts.api.views import AccountsUsersViewSet
self.arguments["id"] = invitee.id
request = self.create_request(data=self.arguments)
viewset = AccountsUsersViewSet.init(
"create", request, args=[], kwargs=self.arguments
)
viewset.create(request, **self.arguments)
def join_project(self, invitee):
"""
Add invitee to project with a particular role.
If the user already has a project role, then the
invite is ignored.
"""
from projects.api.views.projects import ProjectsAgentsViewSet
self.arguments["type"] = "user"
self.arguments["agent"] = invitee.id
request = self.create_request(data=self.arguments)
viewset = ProjectsAgentsViewSet.init(
"create", request, args=[], kwargs=self.arguments
)
try:
viewset.create(request, **self.arguments)
except ValidationError as exc:
if "Already has a project role" not in str(exc):
raise exc
def take_tour(self, invitee):
"""
Nothing needs to be done here. User is redirected to tour URL.
"""
pass
| 30.824779
| 93
| 0.634933
| 2,202
| 17,416
| 4.920981
| 0.193915
| 0.012551
| 0.010797
| 0.01412
| 0.199982
| 0.140181
| 0.10179
| 0.063861
| 0.050941
| 0.034884
| 0
| 0.002049
| 0.271302
| 17,416
| 564
| 94
| 30.879433
| 0.851785
| 0.233751
| 0
| 0.167763
| 0
| 0
| 0.126566
| 0.006688
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072368
| false
| 0.006579
| 0.078947
| 0.003289
| 0.325658
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c85bff69906cd84ddfe9e581be8b49ceea14621c
| 6,075
|
py
|
Python
|
scripts/automation/trex_control_plane/interactive/trex/emu/emu_plugins/emu_plugin_dhcpsrv.py
|
GabrielGanne/trex-core
|
688a0fe0adb890964691473723d70ffa98e00dd3
|
[
"Apache-2.0"
] | 956
|
2015-06-24T15:04:55.000Z
|
2022-03-30T06:25:04.000Z
|
scripts/automation/trex_control_plane/interactive/trex/emu/emu_plugins/emu_plugin_dhcpsrv.py
|
hjat2005/trex-core
|
400f03c86c844a0096dff3f6b13e58a808aaefff
|
[
"Apache-2.0"
] | 782
|
2015-09-20T15:19:00.000Z
|
2022-03-31T23:52:05.000Z
|
scripts/automation/trex_control_plane/interactive/trex/emu/emu_plugins/emu_plugin_dhcpsrv.py
|
hjat2005/trex-core
|
400f03c86c844a0096dff3f6b13e58a808aaefff
|
[
"Apache-2.0"
] | 429
|
2015-06-27T19:34:21.000Z
|
2022-03-23T11:02:51.000Z
|
from trex.emu.api import *
from trex.emu.emu_plugins.emu_plugin_base import *
import trex.utils.parsing_opts as parsing_opts
class DHCPSRVPlugin(EMUPluginBase):
"""
Defines DHCP Server plugin based on `DHCP <https://en.wikipedia.org/wiki/Dynamic_Host_Configuration_Protocol>`_
Implemented based on `RFC 2131 Server <https://datatracker.ietf.org/doc/html/rfc2131>`_
"""
plugin_name = 'DHCPSRV'
INIT_JSON_NS = {'dhcpsrv': {}}
"""
:parameters:
Empty.
"""
INIT_JSON_CLIENT = {'dhcpsrv': "Pointer to INIT_JSON_NS below"}
"""
:parameters:
default_lease: uint32
Default lease time in seconds to offer to DHCP clients. Defaults to 300 seconds, 5 mins.
max_lease: uint32
Maximal lease time in seconds that the server is willing to offer the client in case he requests a specific lease.
If `default_lease` is provided and greater than an unprovided `max_lease`, then `max_lease` will be overridden
by `default_lease`. Defaults to 600 seconds, 10 mins.
min_lease: uint32
Minimal lease time in seconds that the server is willing to offer the client in case he requests a specific lease.
If `default_lease` is provided and less than an unprovided `min_lease`, then `min_lease` will be overridden
by `default_lease`. Defaults to 60 seconds, 1 min.
next_server_ip: str
IPv4 address of the next server as a field. In case you provide it, the server will write the IPv4 as the next server IPv4
in the packets it sends. Defaults to 0.0.0.0.
pools: list
List of dictionaries that represent IPv4 pools or otherwise known as scopes. At lease one pool must be provided.
Each dictionary is composed of:
:min: str
Minimal IPv4 address of the pool. If this happens to be the Network Id, this address will be skipped.
:max: str
Maximal IPv4 address of the pool. If this happens to be the Broadcast Id, this address will be skipped.
:prefix: uint8
Subnet Mask represented as a prefix, an unsigned integer between (0, 32) non exclusive.
:exclude: list
List of IPv4 strings that are excluded from the pool and can't be offered to the client.
.. note:: Two different pools cannot be in the same subnet. If two pools share the same subnet, with the current implementation we will always offer an IP from the first pool in the list.
.. highlight:: python
.. code-block:: python
"pools": [
{
"min": "192.168.0.0",
"max": "192.168.0.100",
"prefix": 24,
"exclude": ["192.168.0.1", "192.168.0.2"]
},
{
"min": "10.0.0.2",
"max": "10.0.255.255",
"prefix": 8
}
]
options: dict
Dictionary that contains DHCP Options. There are three keys possible: `offer`, `ack` and `nak`.
Each key represents a DHCP Response that the server can send.
Each key's value is a list. The list is composed by dictionaries, where each dictionary represents a DHCP option.
Options are represented by their type (byte), and their value (byte list).
In the following example, we add the following options to `offer` and `ack` responses.
Type: 6 (DNS Server) -> Value (8.8.8.8)
Type: 15 (Domain Name) -> Value cisco.com
.. highlight:: python
.. code-block:: python
"options": {
"offer": [
{
"type": 6,
"data": [8, 8, 8, 8]
},
{
"type": 15,
"data": [99, 105, 115, 99, 111, 46, 99, 111, 109]
}
]
"ack": [
{
"type": 6,
"data": [8, 8, 8, 8]
},
{
"type": 15,
"data": [99, 105, 115, 99, 111, 46, 99, 111, 109]
}
]
}
"""
def __init__(self, emu_client):
super(DHCPSRVPlugin, self).__init__(emu_client, client_cnt_rpc_cmd='dhcpsrv_c_cnt')
# API methods
@client_api('getter', True)
@update_docstring(EMUPluginBase._get_client_counters.__doc__.replace("$PLUGIN_NAME", plugin_name))
def get_counters(self, c_key, cnt_filter=None, zero=True, verbose=True):
return self._get_client_counters(c_key, cnt_filter, zero, verbose)
@client_api('command', True)
@update_docstring(EMUPluginBase._clear_client_counters.__doc__.replace("$PLUGIN_NAME", plugin_name))
def clear_counters(self, c_key):
return self._clear_client_counters(c_key)
# Plugins methods
@plugin_api('dhcpsrv_show_counters', 'emu')
def dhcpsrv_show_counters_line(self, line):
'''Show DHCP Server counters.\n'''
parser = parsing_opts.gen_parser(self,
"show_counters_dhcpsrv",
self.dhcpsrv_show_counters_line.__doc__,
parsing_opts.EMU_SHOW_CNT_GROUP,
parsing_opts.EMU_NS_GROUP,
parsing_opts.EMU_CLIENT_GROUP,
parsing_opts.EMU_DUMPS_OPT
)
opts = parser.parse_args(line.split())
self.emu_c._base_show_counters(self.client_data_cnt, opts, req_ns = True)
return True
| 39.967105
| 200
| 0.539095
| 714
| 6,075
| 4.420168
| 0.331933
| 0.005703
| 0.005703
| 0.01711
| 0.223067
| 0.204056
| 0.184411
| 0.184411
| 0.184411
| 0.126109
| 0
| 0.04222
| 0.380082
| 6,075
| 151
| 201
| 40.231788
| 0.795805
| 0.042469
| 0
| 0
| 0
| 0
| 0.083622
| 0.024221
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.1
| 0.066667
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c86191051fc7c1834649eb4ef9230e67b31da3c1
| 2,683
|
py
|
Python
|
lenet-chinese_mnist/generate.py
|
leonwanghui/mindspore-jina-apps
|
e2912d9a93689c69005345758e3b7a2f8ba6133e
|
[
"Apache-2.0"
] | null | null | null |
lenet-chinese_mnist/generate.py
|
leonwanghui/mindspore-jina-apps
|
e2912d9a93689c69005345758e3b7a2f8ba6133e
|
[
"Apache-2.0"
] | null | null | null |
lenet-chinese_mnist/generate.py
|
leonwanghui/mindspore-jina-apps
|
e2912d9a93689c69005345758e3b7a2f8ba6133e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
import struct
import argparse
import numpy as np
from PIL import Image
def load_mnist(dir_path, kind='train'):
"""Load MNIST Dataset from the given path"""
labels_path = os.path.join(dir_path, '%s-labels-idx1-ubyte' % kind)
images_path = os.path.join(dir_path, '%s-images-idx3-ubyte' % kind)
with open(labels_path, 'rb') as labels_file:
magic, num = struct.unpack('>II', labels_file.read(8))
labels = np.fromfile(labels_file, dtype=np.uint8)
with open(images_path, 'rb') as images_file:
magic, num, rows, cols = struct.unpack(">IIII", images_file.read(16))
images = np.fromfile(images_file, dtype=np.uint8)
return images, labels, num
def save_mnist_to_jpg(images, labels, save_dir, kind, num):
"""Convert and save the MNIST dataset to.jpg image format"""
one_pic_pixels = 28 * 28
for i in range(num):
img = images[i * one_pic_pixels:(i + 1) * one_pic_pixels]
img_np = np.array(img, dtype=np.uint8).reshape(28, 28)
label_val = labels[i]
jpg_name = os.path.join(save_dir, '{}_{}_{}.jpg'.format(kind, i, label_val))
Image.fromarray(img_np).save(jpg_name)
print('{} ==> {}_{}_{}.jpg'.format(i, kind, i, label_val))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="MNIST Dataset Operations")
parser.add_argument('--data_dir', type=str, default='/root/jina/chinese-mnist', help='MNIST dataset dir')
parser.add_argument('--kind', type=str, default='train', help='MNIST dataset: train or t10k')
parser.add_argument('--save_dir', type=str, default='/root/jina/chinese-mnist/jpg', help='used to save mnist jpg')
args = parser.parse_args()
if not os.path.exists(args.data_dir):
os.makedirs(args.data_dir)
images_np, labels_np, kind_num = load_mnist(args.data_dir, args.kind)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
save_mnist_to_jpg(images_np, labels_np, args.save_dir, args.kind, kind_num)
| 43.274194
| 118
| 0.6776
| 402
| 2,683
| 4.358209
| 0.363184
| 0.034247
| 0.017123
| 0.018265
| 0.114155
| 0.091324
| 0.067352
| 0.042237
| 0
| 0
| 0
| 0.012108
| 0.168841
| 2,683
| 61
| 119
| 43.983607
| 0.773543
| 0.273202
| 0
| 0
| 0
| 0
| 0.140187
| 0.026999
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.138889
| 0
| 0.222222
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c862ff0586dafe12df4bfd251af96f7087dbad08
| 900
|
py
|
Python
|
app/api/v1/routes.py
|
kwanj-k/storemanager-API
|
e51511545a717341a7b1eb100eb3eab625a8b011
|
[
"MIT"
] | 1
|
2019-05-08T08:39:08.000Z
|
2019-05-08T08:39:08.000Z
|
app/api/v1/routes.py
|
kwanj-k/storemanager-API
|
e51511545a717341a7b1eb100eb3eab625a8b011
|
[
"MIT"
] | 2
|
2019-10-21T17:56:01.000Z
|
2019-10-29T07:36:39.000Z
|
app/api/v1/routes.py
|
kwanj-k/storemanager-API
|
e51511545a717341a7b1eb100eb3eab625a8b011
|
[
"MIT"
] | null | null | null |
"""
This file contains all the version one routes
"""
# Third party imports
from flask import Blueprint, request
from flask_restplus import Api, Resource, fields
# Local application imports
from .views.products_views import v1 as pro_routes
from .views.sales_views import v1 as sales_routes
from .views.stores_views import v1 as stores_routes
from .views.auth import v1 as auth_routes
authorizations = {
'apikey': {
'type': 'apiKey',
'in': 'header',
'name': 'Authorization'
}}
v_1 = Blueprint('v_1', __name__, url_prefix="/api/v1")
api = Api(v_1)
v1 = api.namespace(
'v1',
description='Store manager Api without persitent data storage',
authorizations=authorizations)
api.add_namespace(pro_routes, path="/products/")
api.add_namespace(sales_routes, path="/sales")
api.add_namespace(stores_routes, path="/stores")
api.add_namespace(auth_routes, path="/")
| 26.470588
| 67
| 0.73
| 124
| 900
| 5.104839
| 0.41129
| 0.056872
| 0.063191
| 0.07109
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013141
| 0.154444
| 900
| 33
| 68
| 27.272727
| 0.81866
| 0.102222
| 0
| 0
| 0
| 0
| 0.156446
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.272727
| 0
| 0.272727
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8643e92fdc3cc522b5000bf37f329ece9e89e82
| 5,605
|
py
|
Python
|
tests/test_region_aggregation.py
|
IAMconsortium/nomenclature
|
15973d86d91e38424fe30719d44a1f23526c6eea
|
[
"Apache-2.0"
] | 9
|
2021-06-10T15:11:23.000Z
|
2022-02-02T16:22:01.000Z
|
tests/test_region_aggregation.py
|
IAMconsortium/nomenclature
|
15973d86d91e38424fe30719d44a1f23526c6eea
|
[
"Apache-2.0"
] | 83
|
2021-06-22T09:04:29.000Z
|
2022-03-21T16:29:54.000Z
|
tests/test_region_aggregation.py
|
IAMconsortium/nomenclature
|
15973d86d91e38424fe30719d44a1f23526c6eea
|
[
"Apache-2.0"
] | 3
|
2021-06-17T10:44:48.000Z
|
2021-09-16T15:30:03.000Z
|
from pathlib import Path
import jsonschema
import pydantic
import pytest
from nomenclature.processor.region import (
ModelMappingCollisionError,
RegionAggregationMapping,
RegionProcessor,
)
from conftest import TEST_DATA_DIR
TEST_FOLDER_REGION_MAPPING = TEST_DATA_DIR / "region_aggregation"
def test_mapping():
mapping_file = "working_mapping.yaml"
# Test that the file is read and represented correctly
obs = RegionAggregationMapping.from_file(TEST_FOLDER_REGION_MAPPING / mapping_file)
exp = {
"model": "model_a",
"file": (TEST_FOLDER_REGION_MAPPING / mapping_file).relative_to(Path.cwd()),
"native_regions": [
{"name": "region_a", "rename": "alternative_name_a"},
{"name": "region_b", "rename": "alternative_name_b"},
{"name": "region_c", "rename": None},
],
"common_regions": [
{
"name": "common_region_1",
"constituent_regions": ["region_a", "region_b"],
},
{
"name": "common_region_2",
"constituent_regions": ["region_c"],
},
],
}
assert obs.dict() == exp
@pytest.mark.parametrize(
"file, error_type, error_msg_pattern",
[
(
"illegal_mapping_invalid_format_dict.yaml",
jsonschema.ValidationError,
".*common_region_1.*not.*'array'.*",
),
(
"illegal_mapping_illegal_attribute.yaml",
jsonschema.ValidationError,
"Additional properties are not allowed.*",
),
(
"illegal_mapping_conflict_regions.yaml",
pydantic.ValidationError,
".*Name collision in native and common regions.*common_region_1.*",
),
(
"illegal_mapping_duplicate_native.yaml",
pydantic.ValidationError,
".*Name collision in native regions.*alternative_name_a.*",
),
(
"illegal_mapping_duplicate_native_rename.yaml",
pydantic.ValidationError,
".*Name collision in native regions.*alternative_name_a.*",
),
(
"illegal_mapping_duplicate_common.yaml",
pydantic.ValidationError,
".*Name collision in common regions.*common_region_1.*",
),
(
"illegal_mapping_model_only.yaml",
pydantic.ValidationError,
".*one of the two: 'native_regions', 'common_regions'.*",
),
],
)
def test_illegal_mappings(file, error_type, error_msg_pattern):
# This is to test a few different failure conditions
with pytest.raises(error_type, match=f"{error_msg_pattern}{file}.*"):
RegionAggregationMapping.from_file(TEST_FOLDER_REGION_MAPPING / file)
@pytest.mark.parametrize(
"region_processor_path",
[
TEST_DATA_DIR / "regionprocessor_working",
(TEST_DATA_DIR / "regionprocessor_working").relative_to(Path.cwd()),
],
)
def test_region_processor_working(region_processor_path):
obs = RegionProcessor.from_directory(region_processor_path)
exp_data = [
{
"model": "model_a",
"file": (
TEST_DATA_DIR / "regionprocessor_working/mapping_1.yaml"
).relative_to(Path.cwd()),
"native_regions": [
{"name": "World", "rename": None},
],
"common_regions": None,
},
{
"model": "model_b",
"file": (
TEST_DATA_DIR / "regionprocessor_working/mapping_2.yaml"
).relative_to(Path.cwd()),
"native_regions": None,
"common_regions": [
{
"name": "World",
"constituent_regions": ["region_a", "region_b"],
}
],
},
]
exp_models = {value["model"] for value in exp_data}
exp_dict = {value["model"]: value for value in exp_data}
assert exp_models == set(obs.mappings.keys())
assert all(exp_dict[m] == obs.mappings[m].dict() for m in exp_models)
def test_region_processor_not_defined(simple_definition):
# Test a RegionProcessor with regions that are not defined in the data structure
# definition
error_msg = (
"model_(a|b)\n.*region_a.*mapping_(1|2).yaml.*value_error.region_not_defined."
"*\n.*model_(a|b)\n.*region_a.*mapping_(1|2).yaml.*value_error."
"region_not_defined"
)
with pytest.raises(pydantic.ValidationError, match=error_msg):
RegionProcessor.from_directory(
TEST_DATA_DIR / "regionprocessor_not_defined"
).validate_mappings(simple_definition)
def test_region_processor_duplicate_model_mapping():
error_msg = ".*model_a.*mapping_(1|2).yaml.*mapping_(1|2).yaml"
with pytest.raises(ModelMappingCollisionError, match=error_msg):
RegionProcessor.from_directory(TEST_DATA_DIR / "regionprocessor_duplicate")
def test_region_processor_wrong_args():
# Test if pydantic correctly type checks the input of RegionProcessor.from_directory
# Test with an integer
with pytest.raises(pydantic.ValidationError, match=".*path\n.*not a valid path.*"):
RegionProcessor.from_directory(123)
# Test with a file, a path pointing to a directory is required
with pytest.raises(
pydantic.ValidationError,
match=".*path\n.*does not point to a directory.*",
):
RegionProcessor.from_directory(
TEST_DATA_DIR / "regionprocessor_working/mapping_1.yaml"
)
| 33.562874
| 88
| 0.615165
| 586
| 5,605
| 5.568259
| 0.196246
| 0.022066
| 0.03034
| 0.055777
| 0.435182
| 0.393503
| 0.331903
| 0.189396
| 0.131781
| 0.131781
| 0
| 0.004658
| 0.272257
| 5,605
| 166
| 89
| 33.76506
| 0.795293
| 0.063872
| 0
| 0.294964
| 0
| 0.014388
| 0.308647
| 0.163008
| 0
| 0
| 0
| 0
| 0.021583
| 1
| 0.043165
| false
| 0
| 0.043165
| 0
| 0.086331
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8657a8c0a88d1cd1bd12e0d16b56dc5546e1b6c
| 2,300
|
py
|
Python
|
render_object.py
|
VanGy-code/3D-House-Blender
|
8a9d91b1f3cc3988c0dcd7079223f2e541f9ec71
|
[
"MIT"
] | null | null | null |
render_object.py
|
VanGy-code/3D-House-Blender
|
8a9d91b1f3cc3988c0dcd7079223f2e541f9ec71
|
[
"MIT"
] | null | null | null |
render_object.py
|
VanGy-code/3D-House-Blender
|
8a9d91b1f3cc3988c0dcd7079223f2e541f9ec71
|
[
"MIT"
] | 1
|
2021-11-22T00:50:45.000Z
|
2021-11-22T00:50:45.000Z
|
import bpy
import os
import json
import numpy as np
from decimal import Decimal
from mathutils import Vector, Matrix
import argparse
import numpy as np
import sys
sys.path.append(os.path.dirname(__file__))
sys.path.append(os.path.dirname(__file__)+'/tools')
from tools.utils import *
from tools.blender_interface import BlenderInterface
if __name__ == '__main__':
p = argparse.ArgumentParser(description='Renders given obj file by rotation a camera around it.')
p.add_argument('--mesh_fpath', type=str, required=True, help='The path the output will be dumped to.')
p.add_argument('--output_dir', type=str, required=True, help='The path the output will be dumped to.')
p.add_argument('--num_observations', type=int, required=True, help='The path the output will be dumped to.')
p.add_argument('--sphere_radius', type=float, required=True, help='The path the output will be dumped to.')
p.add_argument('--mode', type=str, required=True, help='Options: train and test')
argv = sys.argv
argv = sys.argv[sys.argv.index("--") + 1:]
opt = p.parse_args(argv)
instance_name = opt.mesh_fpath.split('/')[-3]
instance_dir = os.path.join(opt.output_dir, instance_name)
# Start Render
renderer = BlenderInterface(resolution=128)
if opt.mode == 'train':
cam_locations = sample_spherical(opt.num_observations, opt.sphere_radius)
elif opt.mode == 'test':
cam_locations = get_archimedean_spiral(opt.sphere_radius, opt.num_observations)
obj_location = np.zeros((1,3))
cv_poses = look_at(cam_locations, obj_location)
blender_poses = [cv_cam2world_to_bcam2world(m) for m in cv_poses]
shapenet_rotation_mat = np.array([[1.0000000e+00, 0.0000000e+00, 0.0000000e+00],
[0.0000000e+00, -1.0000000e+00, -1.2246468e-16],
[0.0000000e+00, 1.2246468e-16, -1.0000000e+00]])
rot_mat = np.eye(3)
hom_coords = np.array([[0., 0., 0., 1.]]).reshape(1, 4)
obj_pose = np.concatenate((rot_mat, obj_location.reshape(3,1)), axis=-1)
obj_pose = np.concatenate((obj_pose, hom_coords), axis=0)
renderer.import_mesh(opt.mesh_fpath, scale=1., object_world_matrix=obj_pose)
renderer.render(instance_dir, blender_poses, write_cam_params=True)
| 41.818182
| 112
| 0.694348
| 341
| 2,300
| 4.478006
| 0.354839
| 0.045842
| 0.039293
| 0.049771
| 0.27112
| 0.228553
| 0.228553
| 0.18926
| 0.1611
| 0.1611
| 0
| 0.058452
| 0.174348
| 2,300
| 55
| 113
| 41.818182
| 0.745656
| 0.005217
| 0
| 0.047619
| 0
| 0
| 0.139047
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c86659332f0223beeafc6e01030a75e258e463d5
| 2,717
|
py
|
Python
|
mapel/elections/features/clustering.py
|
kaszperro/mapel
|
d4e6486ee97f5d5a5a737c581ba3f9f874ebcef3
|
[
"MIT"
] | null | null | null |
mapel/elections/features/clustering.py
|
kaszperro/mapel
|
d4e6486ee97f5d5a5a737c581ba3f9f874ebcef3
|
[
"MIT"
] | null | null | null |
mapel/elections/features/clustering.py
|
kaszperro/mapel
|
d4e6486ee97f5d5a5a737c581ba3f9f874ebcef3
|
[
"MIT"
] | null | null | null |
import numpy as np
def clustering_v1(experiment, num_clusters=20):
from scipy.cluster.hierarchy import dendrogram, linkage, fcluster
import scipy.spatial.distance as ssd
# skip the paths
SKIP = ['UNID', 'ANID', 'STID', 'ANUN', 'STUN', 'STAN',
'Mallows',
'Urn',
'Identity', 'Uniformity', 'Antagonism', 'Stratification',
]
new_names = []
for i, a in enumerate(list(experiment.distances)):
if not any(tmp in a for tmp in SKIP):
new_names.append(a)
print(len(new_names))
distMatrix = np.zeros([len(new_names), len(new_names)])
for i, a in enumerate(new_names):
for j, b in enumerate(new_names):
if a != b:
distMatrix[i][j] = experiment.distances[a][b]
# Zd = linkage(ssd.squareform(distMatrix), method="complete")
# cld = fcluster(Zd, 500, criterion='distance').reshape(len(new_names), 1)
Zd = linkage(ssd.squareform(distMatrix), method="complete")
cld = fcluster(Zd, 12, criterion='maxclust').reshape(len(new_names), 1)
clusters = {}
for i, name in enumerate(new_names):
clusters[name] = cld[i][0]
for name in experiment.coordinates:
if name not in clusters:
clusters[name] = 0
return {'value': clusters}
def clustering_kmeans(experiment, num_clusters=20):
from sklearn.cluster import KMeans
points = list(experiment.coordinates.values())
kmeans = KMeans(n_clusters=num_clusters)
kmeans.fit(points)
y_km = kmeans.fit_predict(points)
# plt.scatter(points[y_km == 0, 0], points[y_km == 0, 1], s=100, c='red')
# plt.scatter(points[y_km == 1, 0], points[y_km == 1, 1], s=100, c='black')
# plt.scatter(points[y_km == 2, 0], points[y_km == 2, 1], s=100, c='blue')
# plt.scatter(points[y_km == 3, 0], points[y_km == 3, 1], s=100, c='cyan')
# all_distances = []
# for a,b in combinations(experiment.distances, 2):
# all_distances.append([a, b, experiment.distances[a][b]])
# all_distances.sort(key=lambda x: x[2])
#
# clusters = {a: None for a in experiment.distances}
# num_clusters = 0
# for a,b,dist in all_distances:
# if clusters[a] is None and clusters[b] is None:
# clusters[a] = num_clusters
# clusters[b] = num_clusters
# num_clusters += 1
# elif clusters[a] is None and clusters[b] is not None:
# clusters[a] = clusters[b]
# elif clusters[a] is not None and clusters[b] is None:
# clusters[b] = clusters[a]
clusters = {}
for i, name in enumerate(experiment.coordinates):
clusters[name] = y_km[i]
return {'value': clusters}
| 33.54321
| 79
| 0.606183
| 375
| 2,717
| 4.298667
| 0.274667
| 0.049628
| 0.050248
| 0.042184
| 0.30273
| 0.198511
| 0.165012
| 0.109181
| 0.073201
| 0.073201
| 0
| 0.022582
| 0.250276
| 2,717
| 80
| 80
| 33.9625
| 0.768778
| 0.382407
| 0
| 0.105263
| 0
| 0
| 0.061743
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.105263
| 0
| 0.210526
| 0.026316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c86693ef8ab98f83a2f7c7800edbe9c593122043
| 561
|
py
|
Python
|
day15-1.py
|
kenleung5e28/advent-of-code-2021
|
f6de211f0d4f3bafa19572bf28e3407f0fab6d58
|
[
"MIT"
] | null | null | null |
day15-1.py
|
kenleung5e28/advent-of-code-2021
|
f6de211f0d4f3bafa19572bf28e3407f0fab6d58
|
[
"MIT"
] | null | null | null |
day15-1.py
|
kenleung5e28/advent-of-code-2021
|
f6de211f0d4f3bafa19572bf28e3407f0fab6d58
|
[
"MIT"
] | null | null | null |
import math
grid = []
with open('input-day15.txt') as file:
for line in file:
line = line.rstrip()
grid.append([int(s) for s in line])
n = len(grid)
costs = [[math.inf] * n for _ in range(n)]
costs[0][0] = 0
queue = [(0, 0)]
while len(queue) > 0:
x1, y1 = queue.pop(0)
for dx, dy in [(1, 0), (0, 1), (-1, 0), (0, -1)]:
x, y = x1 + dx, y1 + dy
if x >= 0 and y >= 0 and x < n and y < n:
cost = costs[x1][y1] + grid[x][y]
if cost < costs[x][y]:
costs[x][y] = cost
queue.append((x, y))
print(costs[n - 1][n - 1])
| 24.391304
| 51
| 0.504456
| 108
| 561
| 2.611111
| 0.342593
| 0.035461
| 0.021277
| 0.028369
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 0.278075
| 561
| 23
| 52
| 24.391304
| 0.62963
| 0
| 0
| 0
| 0
| 0
| 0.02669
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.05
| 0
| 0.05
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c86731656ffa6ef2b38ba405b2722abcba4b7c94
| 1,217
|
py
|
Python
|
Algorithms/Sorting and Searching/sorting/merge sort/merge-sort-return-list.py
|
bulentsiyah/Python-Basics-Algorithms-Data-Structures-Object-Oriented-Programming-Job-Interview-Questions
|
3a67bdac1525495e6874c5bde61882848f60381d
|
[
"MIT"
] | 14
|
2021-01-23T11:28:16.000Z
|
2021-12-07T16:08:23.000Z
|
Algorithms/Sorting and Searching/sorting/merge sort/merge-sort-return-list.py
|
bulentsiyah/Python-Basics-Algorithms-Data-Structures-Object-Oriented-Programming-Job-Interview-Questions
|
3a67bdac1525495e6874c5bde61882848f60381d
|
[
"MIT"
] | null | null | null |
Algorithms/Sorting and Searching/sorting/merge sort/merge-sort-return-list.py
|
bulentsiyah/Python-Basics-Algorithms-Data-Structures-Object-Oriented-Programming-Job-Interview-Questions
|
3a67bdac1525495e6874c5bde61882848f60381d
|
[
"MIT"
] | 2
|
2021-02-03T12:28:19.000Z
|
2021-09-14T09:50:08.000Z
|
arr: list = [54,26,93,17,77,31,44,55,20]
def merge_sort(arr: list):
result: list = helper(arr, 0, len(arr) - 1)
for i in range(len(arr)):
arr[i] = result[i]
def helper(arr: list, start: int, end: int) -> list:
if start > end:
return []
elif start == end:
return [arr[start]]
else:
midpoint: int = start + (end - start) // 2
leftList = helper(arr, start, midpoint)
rightList = helper(arr, midpoint + 1, end)
return mergelists(leftList, rightList)
def mergelists(leftList: list, rightList: list) -> list:
arr: list = [None] * (len(leftList) + len(rightList))
i = j = k = 0
while i < len(leftList) and j < len(rightList):
if leftList[i] < rightList[j]:
arr[k] = leftList[i]
i += 1
else:
arr[k] = rightList[j]
j += 1
k += 1
while i < len(leftList):
arr[k] = leftList[i]
i += 1
k += 1
while j < len(rightList):
arr[k] = rightList[j]
j += 1
k += 1
return arr
print(arr)
merge_sort(arr)
print(arr)
| 24.836735
| 61
| 0.474117
| 154
| 1,217
| 3.733766
| 0.266234
| 0.048696
| 0.015652
| 0.05913
| 0.114783
| 0.114783
| 0.062609
| 0.062609
| 0
| 0
| 0
| 0.040486
| 0.391126
| 1,217
| 48
| 62
| 25.354167
| 0.735493
| 0
| 0
| 0.394737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078947
| false
| 0
| 0
| 0
| 0.184211
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c867bd2b7a6b9e73aa95e644913f2d2ac179784c
| 3,406
|
py
|
Python
|
cve-manager/cve_manager/handler/task_handler/callback/cve_scan.py
|
seandong37tt4qu/jeszhengq
|
32b3737ab45e89e8c5b71cdce871cefd2c938fa8
|
[
"MulanPSL-1.0"
] | null | null | null |
cve-manager/cve_manager/handler/task_handler/callback/cve_scan.py
|
seandong37tt4qu/jeszhengq
|
32b3737ab45e89e8c5b71cdce871cefd2c938fa8
|
[
"MulanPSL-1.0"
] | null | null | null |
cve-manager/cve_manager/handler/task_handler/callback/cve_scan.py
|
seandong37tt4qu/jeszhengq
|
32b3737ab45e89e8c5b71cdce871cefd2c938fa8
|
[
"MulanPSL-1.0"
] | null | null | null |
#!/usr/bin/python3
# ******************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2021-2022. All rights reserved.
# licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN 'AS IS' BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# ******************************************************************************/
"""
Time:
Author:
Description: callback function of the cve scanning task.
"""
from aops_utils.log.log import LOGGER
from cve_manager.handler.task_handler.callback import TaskCallback
from cve_manager.conf.constant import ANSIBLE_TASK_STATUS, CVE_SCAN_STATUS
class CveScanCallback(TaskCallback):
"""
Callback function for cve scanning.
"""
def __init__(self, user, proxy, host_info):
"""
Args:
user (str): who the scanned hosts belongs to.
proxy (object): database proxy
host_info (list): host info, e.g. hostname, ip, etc.
"""
self.user = user
task_info = {}
for info in host_info:
host_name = info.get('host_name')
task_info[host_name] = info
super().__init__(None, proxy, task_info)
def v2_runner_on_unreachable(self, result):
host_name, result_info, task_name = self._get_info(result)
self.result[host_name][task_name] = {
"info": result_info['msg'], "status": ANSIBLE_TASK_STATUS.UNREACHABLE}
LOGGER.debug("task name: %s, user: %s, host name: %s, result: %s",
task_name, self.user, host_name, ANSIBLE_TASK_STATUS.UNREACHABLE)
self.save_to_db(task_name, host_name, CVE_SCAN_STATUS.DONE)
def v2_runner_on_ok(self, result):
host_name, result_info, task_name = self._get_info(result)
self.result[host_name][task_name] = {
"info": result_info['stdout'], "status": ANSIBLE_TASK_STATUS.SUCCEED}
LOGGER.debug("task name: %s, user: %s, host name: %s, result: %s",
task_name, self.user, host_name, ANSIBLE_TASK_STATUS.SUCCEED)
self.save_to_db(task_name, host_name, CVE_SCAN_STATUS.DONE)
def v2_runner_on_failed(self, result, ignore_errors=False):
host_name, result_info, task_name = self._get_info(result)
self.result[host_name][task_name] = {
"info": result_info['stderr'], "status": ANSIBLE_TASK_STATUS.FAIL}
LOGGER.debug("task name: %s, user: %s, host name: %s, result: %s",
task_name, self.user, host_name, ANSIBLE_TASK_STATUS.FAIL)
self.save_to_db(task_name, host_name, CVE_SCAN_STATUS.DONE)
def save_to_db(self, task_name, host_name, status):
"""
Set the status of the host to database.
Args:
task_name (str): task name in playbook.
host_name (str)
status (str)
"""
host_id = self.task_info[host_name]['host_id']
self.proxy.update_scan_status([host_id])
LOGGER.debug("task name: %s, host_id: %s, status: %s", task_name, host_id, status)
| 40.547619
| 98
| 0.625954
| 458
| 3,406
| 4.412664
| 0.310044
| 0.083127
| 0.058882
| 0.044532
| 0.361702
| 0.335972
| 0.335972
| 0.335972
| 0.335972
| 0.335972
| 0
| 0.006442
| 0.225191
| 3,406
| 83
| 99
| 41.036145
| 0.759379
| 0.3165
| 0
| 0.333333
| 0
| 0.083333
| 0.113595
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.138889
| false
| 0
| 0.083333
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c86aa619ebc8f014032a97d24de5e8f90b466d18
| 2,416
|
py
|
Python
|
tests/result/test_gatling.py
|
LaudateCorpus1/perfsize
|
710d6a5ae0918002e736f3aba8cd5cacb2b11326
|
[
"Apache-2.0"
] | 5
|
2021-08-02T22:44:32.000Z
|
2022-01-07T20:53:48.000Z
|
tests/result/test_gatling.py
|
intuit/perfsize
|
710d6a5ae0918002e736f3aba8cd5cacb2b11326
|
[
"Apache-2.0"
] | 1
|
2022-02-24T08:05:51.000Z
|
2022-02-24T08:05:51.000Z
|
tests/result/test_gatling.py
|
LaudateCorpus1/perfsize
|
710d6a5ae0918002e736f3aba8cd5cacb2b11326
|
[
"Apache-2.0"
] | 1
|
2022-02-24T08:05:41.000Z
|
2022-02-24T08:05:41.000Z
|
from datetime import datetime
from decimal import Decimal
from perfsize.perfsize import (
lt,
lte,
gt,
gte,
eq,
neq,
Condition,
Result,
Run,
Config,
Plan,
StepManager,
EnvironmentManager,
LoadManager,
ResultManager,
Reporter,
Workflow,
)
from perfsize.environment.mock import MockEnvironmentManager
from perfsize.load.mock import MockLoadManager
from perfsize.reporter.mock import MockReporter
from perfsize.result.mock import MockResultManager
from perfsize.result.gatling import Metric, GatlingResultManager
from perfsize.step.mock import MockStepManager
from pprint import pprint
import pytest
from unittest.mock import patch
class TestGatlingResultManager:
def test_gatling_result_manager(self) -> None:
# A plan would define the various configs possible for testing.
# A step manager would pick the next config to test.
# This test is starting with a given Config and an associated Run.
config = Config(
parameters={
"endpoint_name": "LEARNING-model-sim-public-c-1",
"endpoint_config_name": "LEARNING-model-sim-public-c-1-0",
"model_name": "model-sim-public",
"instance_type": "ml.t2.medium",
"initial_instance_count": "1",
"ramp_start_tps": "0",
"ramp_minutes": "0",
"steady_state_tps": "1",
"steady_state_minutes": "1",
},
requirements={
Metric.latency_success_p99: [
Condition(lt(Decimal("200")), "value < 200"),
Condition(gte(Decimal("0")), "value >= 0"),
],
Metric.percent_fail: [
Condition(lt(Decimal("0.01")), "value < 0.01"),
Condition(gte(Decimal("0")), "value >= 0"),
],
},
)
run = Run(
id="test_run_tag",
start=datetime.fromisoformat("2021-04-01T00:00:00"),
end=datetime.fromisoformat("2021-04-01T01:00:00"),
results=[],
)
# GatlingResultManager will parse simulation.log and populate results
result_manager = GatlingResultManager(
results_path="examples/perfsize-results-root"
)
result_manager.query(config, run)
pprint(run.results)
| 33.09589
| 77
| 0.591474
| 250
| 2,416
| 5.616
| 0.464
| 0.059829
| 0.029915
| 0.02849
| 0.076923
| 0.076923
| 0.039886
| 0
| 0
| 0
| 0
| 0.032895
| 0.307947
| 2,416
| 72
| 78
| 33.555556
| 0.806818
| 0.101407
| 0
| 0.060606
| 0
| 0
| 0.168513
| 0.051708
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015152
| false
| 0
| 0.181818
| 0
| 0.212121
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c86bfc31df7a20be6ab83d39b12b217359bfd5df
| 3,904
|
py
|
Python
|
__main__.py
|
GbaCretin/dmf2mlm
|
8a0d3d219aecb9aa14a66537e2deb02651bdfe7d
|
[
"MIT"
] | 2
|
2021-06-13T15:55:55.000Z
|
2021-09-14T08:21:53.000Z
|
__main__.py
|
GbaCretin/dmf2mlm
|
8a0d3d219aecb9aa14a66537e2deb02651bdfe7d
|
[
"MIT"
] | 6
|
2022-03-22T10:02:35.000Z
|
2022-03-31T19:28:13.000Z
|
__main__.py
|
GbaCretin/dmf2mlm
|
8a0d3d219aecb9aa14a66537e2deb02651bdfe7d
|
[
"MIT"
] | null | null | null |
from src import dmf,mzs,utils,sfx
from pathlib import Path
import argparse
def print_info(mlm_sdata):
if len(mlm_sdata.songs) <= 0: return
for i in range(len(mlm_sdata.songs[0].channels)):
channel = mlm_sdata.songs[0].channels[i]
print("\n================[ {0:01X} ]================".format(i))
if channel == None:
print("Empty")
continue
for event in channel.events:
print(event)
if isinstance(event, mzs.SongComJumpToSubEL):
sub_el = mlm_sdata.songs[0].sub_event_lists[i][event.sub_el_idx]
sub_el.print()
print("\t--------")
def print_df_info(mod, channels: [int]):
for ch in channels:
print("|####[${0:02X}]####".format(ch), end='')
print("|")
for i in range(mod.pattern_matrix.rows_in_pattern_matrix):
for ch in channels:
subel_idx = mod.pattern_matrix.matrix[ch][i]
print("|====(${0:02X})====".format(subel_idx), end='')
print("|")
for j in range(mod.pattern_matrix.rows_per_pattern):
for ch in channels:
pat_idx = mod.pattern_matrix.matrix[ch][i]
row = mod.patterns[ch][pat_idx].rows[j]
note_lbl = "--"
oct_lbl = "-"
vol_lbl = "--"
inst_lbl = "--"
fx0_lbl = "----"
if row.octave != None:
oct_lbl = str(row.octave)
if row.note == dmf.Note.NOTE_OFF:
note_lbl = "~~"
oct_lbl = "~"
elif row.note != None:
note_lbl = row.note.name.ljust(2, '-').replace('S', '#')
if row.volume != None:
vol_lbl = "{:02X}".format(row.volume)
if row.instrument != None:
inst_lbl = "{:02X}".format(row.instrument)
if len(row.effects) > 0:
fx0 = row.effects[0]
if fx0.code == dmf.EffectCode.EMPTY:
fx0_lbl = "--"
else:
fx0_lbl = "{:02X}".format(fx0.code.value)
if fx0.value == None:
fx0_lbl += "--"
else:
fx0_lbl += "{:02X}".format(fx0.value)
print("|{0}{1} {2}{3} {4}".format(note_lbl, oct_lbl, vol_lbl, inst_lbl, fx0_lbl), end='')
print("|")
parser = argparse.ArgumentParser(description='Convert DMF modules and SFX to an MLM driver compatible format')
parser.add_argument('dmf_module_paths', type=str, nargs='*', help="The paths to the input DMF files")
parser.add_argument('--sfx-directory', type=Path, help="Path to folder containing .raw files (Only absolute paths; Must be 18500Hz 16bit mono)")
parser.add_argument('--sfx-header', type=Path, help="Where to save the generated SFX c header (Only absolute paths)")
args = parser.parse_args()
dmf_modules = []
sfx_samples = None
if args.sfx_directory != None:
print("Parsing SFX... ", end='', flush=True)
sfx_samples = sfx.SFXSamples(args.sfx_directory)
print("OK")
if args.sfx_header != None:
print("Generating SFX Header... ", end='', flush=True)
c_header = sfx_samples.generate_c_header()
print("OK")
print(f"Saving SFX Header as '{args.sfx_header}'... ", end='', flush=True)
with open(args.sfx_header, "w") as file:
file.write(c_header)
print("OK")
for i in range(len(args.dmf_module_paths)):
with open(args.dmf_module_paths[i], "rb") as file:
print(f"Parsing '{args.dmf_module_paths[i]}'... ", end='', flush=True)
mod = dmf.Module(file.read())
print("OK")
print(f"Optimizing '{args.dmf_module_paths[i]}'... ", end='', flush=True)
mod.patch_for_mzs()
mod.optimize()
print("OK")
dmf_modules.append(mod)
mlm_sdata = mzs.SoundData()
print(f"Converting DMFs... ", end='', flush=True)
mlm_sdata.add_dmfs(dmf_modules)
print("OK")
if sfx_samples != None:
print(f"Converting SFX... ", end='', flush=True)
mlm_sdata.add_sfx(sfx_samples, False)
print("OK")
#print_df_info(dmf_modules[0], [0, 4, 7])
#print_info(mlm_sdata)
print(f"Compiling... ", end='', flush=True)
mlm_compiled_sdata = mlm_sdata.compile_sdata()
mlm_compiled_vrom = mlm_sdata.compile_vrom()
print("OK")
with open("m1_sdata.bin", "wb") as file:
file.write(mlm_compiled_sdata)
with open("vrom.bin", "wb") as file:
file.write(mlm_compiled_vrom)
| 30.984127
| 144
| 0.649846
| 592
| 3,904
| 4.109797
| 0.258446
| 0.036169
| 0.039457
| 0.023017
| 0.228524
| 0.166872
| 0.125771
| 0.102754
| 0.054254
| 0.026305
| 0
| 0.015858
| 0.160092
| 3,904
| 126
| 145
| 30.984127
| 0.726136
| 0.015625
| 0
| 0.15534
| 0
| 0
| 0.1848
| 0.021864
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019417
| false
| 0
| 0.029126
| 0
| 0.048544
| 0.281553
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c06f05eaa2d985c3d75a5edbcfcca422b525cddf
| 2,630
|
py
|
Python
|
python/zephyr/models/__init__.py
|
r-pad/zephyr
|
c8f45e207c11bfc2b21df169db65a7df892d2848
|
[
"MIT"
] | 18
|
2021-05-27T04:40:38.000Z
|
2022-02-08T19:46:31.000Z
|
python/zephyr/models/__init__.py
|
r-pad/zephyr
|
c8f45e207c11bfc2b21df169db65a7df892d2848
|
[
"MIT"
] | null | null | null |
python/zephyr/models/__init__.py
|
r-pad/zephyr
|
c8f45e207c11bfc2b21df169db65a7df892d2848
|
[
"MIT"
] | 2
|
2021-11-07T12:42:00.000Z
|
2022-03-01T12:51:54.000Z
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
from functools import partial
from .linear import MLP, LogReg
from .pointnet import PointNet
from .pointnet2 import PointNet2SSG
from .pointnet3 import PointNet3SSG
from .dgcnn import DGCNN
# from .masked_conv import ConvolutionalPoseModel
from .point_mlp import PointMLP
from pytorch_lightning.core.lightning import LightningModule
def getModel(model_name, args, mode="train"):
if args.resume_path is None or mode == 'train':
if model_name == 'mlp':
model = MLP(args.dim_agg, args)
if model_name == "pmlp":
model = PointMLP(args.dim_point, args)
elif model_name[:2] == 'lg':
model = LogReg(args.dim_agg, args)
elif model_name == "pn":
model = PointNet(args.dim_point, args)
elif model_name == "pn2":
model = PointNet2SSG(args.dim_point, args, num_class=1)
elif model_name == "pn3":
model = PointNet3SSG(args.dim_point, args, num_class=1)
elif model_name == "dgcnn":
model = DGCNN(args.dim_point, args, num_class=1)
# elif model_name == "maskconv":
# model = ConvolutionalPoseModel(args)
else:
raise Exception("Unknown model name:", model_name)
else:
if model_name == 'mlp':
model = MLP.load_from_checkpoint(args.resume_path, args.dim_agg, args)
elif model_name == "pmlp":
model = PointMLP.load_from_checkpoint(args.resume_path, args.dim_point, args)
elif model_name[:2] == 'lg':
model = LogReg.load_from_checkpoint(args.resume_path, args.dim_agg, args)
elif model_name == "pn":
model = PointNet.load_from_checkpoint(args.resume_path, args.dim_point, args)
elif model_name == "pn2":
model = PointNet2SSG.load_from_checkpoint(args.resume_path, args.dim_point, args, num_class=1)
elif model_name == "pn3":
model = PointNet3SSG.load_from_checkpoint(args.resume_path, args.dim_point, args, num_class=1)
elif model_name == "dgcnn":
model = DGCNN.load_from_checkpoint(args.resume_path, args.dim_point, args, num_class=1)
# elif model_name == "maskconv":
# model = ConvolutionalPoseModel.load_from_checkpoint(args.resume_path, args)
else:
raise Exception("Unknown model name:", model_name)
if not args.pretrained_pnfeat is None:
model.loadPretrainedFeat(args.pretrained_pnfeat)
return model
| 43.114754
| 107
| 0.646388
| 330
| 2,630
| 4.942424
| 0.20303
| 0.11588
| 0.103617
| 0.098099
| 0.674433
| 0.648069
| 0.621091
| 0.601471
| 0.601471
| 0.543838
| 0
| 0.010251
| 0.258175
| 2,630
| 60
| 108
| 43.833333
| 0.82573
| 0.087452
| 0
| 0.333333
| 0
| 0
| 0.039417
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0
| 0.254902
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c07358522633a4b5223edee437652e807e46cb27
| 1,054
|
py
|
Python
|
timer.py
|
ryanleesmith/race-timer
|
3a058e3689c9435751b06909d5b7a14db618d2da
|
[
"MIT"
] | null | null | null |
timer.py
|
ryanleesmith/race-timer
|
3a058e3689c9435751b06909d5b7a14db618d2da
|
[
"MIT"
] | null | null | null |
timer.py
|
ryanleesmith/race-timer
|
3a058e3689c9435751b06909d5b7a14db618d2da
|
[
"MIT"
] | null | null | null |
from gps import *
import math
import time
import json
import threading
gpsd = None
poller = None
class Poller(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
global gpsd
gpsd = gps(mode=WATCH_ENABLE|WATCH_NEWSTYLE)
self.current_value = None
self.running = True
def run(self):
global gpsd, poller
while poller.running:
gpsd.next()
def timer():
global gpsd, poller
poller = Poller()
try:
poller.start()
while True:
speed = gpsd.fix.speed
if math.isnan(speed):
speed = 0
#print(speed)
#print(gpsd.fix.mode)
#print(gpsd.satellites)
dump = json.dumps({'x': int(round(time.time() * 1000)), 'y': speed})
yield 'event: SPEED\ndata: {}\n\n'.format(dump)
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
print("\nKilling Thread...")
poller.running = False
poller.join()
| 23.422222
| 80
| 0.555028
| 118
| 1,054
| 4.864407
| 0.483051
| 0.052265
| 0.04878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009957
| 0.333017
| 1,054
| 44
| 81
| 23.954545
| 0.806543
| 0.051233
| 0
| 0.058824
| 0
| 0
| 0.047141
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088235
| false
| 0
| 0.147059
| 0
| 0.264706
| 0.029412
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c078ff18aa77981230542dee77a093f9d2cdb667
| 13,841
|
py
|
Python
|
layer_manager/models.py
|
lueho/BRIT
|
1eae630c4da6f072aa4e2139bc406db4f4756391
|
[
"MIT"
] | null | null | null |
layer_manager/models.py
|
lueho/BRIT
|
1eae630c4da6f072aa4e2139bc406db4f4756391
|
[
"MIT"
] | 4
|
2022-03-29T20:52:31.000Z
|
2022-03-29T20:52:31.000Z
|
layer_manager/models.py
|
lueho/BRIT
|
1eae630c4da6f072aa4e2139bc406db4f4756391
|
[
"MIT"
] | null | null | null |
import django.contrib.gis.db.models as gis_models
from django.apps import apps
from django.db import models, connection
from django.urls import reverse
from distributions.models import TemporalDistribution, Timestep
from inventories.models import Scenario, InventoryAlgorithm
from materials.models import SampleSeries, MaterialComponent
from .exceptions import InvalidGeometryType, NoFeaturesProvided, TableAlreadyExists
class LayerField(models.Model):
"""
Holds all field definitions of GIS layers. Used to recreate a dynamically created model in case it is lost from
the apps registry.
"""
field_name = models.CharField(max_length=63)
data_type = models.CharField(max_length=10)
def data_type_object(self):
if self.data_type == 'float':
return models.FloatField()
elif self.data_type == 'int':
return models.IntegerField()
@staticmethod
def model_field_type(data_type: str):
if data_type == 'float':
return models.FloatField(blank=True, null=True)
elif data_type == 'int':
return models.IntegerField(blank=True, null=True)
elif data_type == 'str':
return models.CharField(blank=True, null=True, max_length=200)
class LayerManager(models.Manager):
supported_geometry_types = ['Point', 'MultiPoint', 'LineString', 'MultiLineString', 'Polygon', 'MultiPolygon', ]
def create_or_replace(self, **kwargs):
results = kwargs.pop('results')
if 'features' not in results or len(results['features']) == 0:
raise NoFeaturesProvided(results)
else:
features = results['features']
fields = {}
# The data types of the fields are detected from their content. Any column that has only null values
# will be omitted completely
if features:
fields_with_unknown_datatype = list(features[0].keys())
for feature in features:
if not fields_with_unknown_datatype:
break
for key, value in feature.items():
if feature[key] and key in fields_with_unknown_datatype:
fields[key] = type(value).__name__
fields_with_unknown_datatype.remove(key)
# At this point there might be fields left out because there were only null values from which the
# data type could be detected. They should be omitted but this information should be logged
# TODO: add omitted columns info to log
kwargs['geom_type'] = fields.pop('geom')
if kwargs['geom_type'] not in self.supported_geometry_types:
raise InvalidGeometryType(kwargs['geom_type'])
kwargs['table_name'] = 'result_of_scenario_' + \
str(kwargs['scenario'].id) + '_algorithm_' + \
str(kwargs['algorithm'].id) + '_feedstock_' + \
str(kwargs['feedstock'].id)
layer, created = super().get_or_create(table_name=kwargs['table_name'], defaults=kwargs)
if created:
layer.add_layer_fields(fields)
feature_collection = layer.update_or_create_feature_collection()
layer.create_feature_table()
else:
if layer.is_defined_by(fields=fields, **kwargs):
feature_collection = layer.get_feature_collection()
feature_collection.objects.all().delete()
else:
layer.delete()
layer = super().create(**kwargs)
layer.add_layer_fields(fields)
feature_collection = layer.update_or_create_feature_collection()
layer.create_feature_table()
layer.delete_aggregated_values()
for feature in features:
feature_collection.objects.create(**feature)
if 'aggregated_values' in results:
layer.add_aggregated_values(results['aggregated_values'])
if 'aggregated_distributions' in results:
layer.add_aggregated_distributions(results['aggregated_distributions'])
return layer, feature_collection
class Layer(models.Model):
"""
Registry of all created layers. This main model holds all meta information about each layer. When a new layer record
is created, another custom model named "features collection" is automatically generated, preserving the original
shape of the gis source dataset as much as required. The feature collection can be used to manage the actual
features of the layer. It will create a separate database table with the name given in "table_name" to store the
features.
"""
name = models.CharField(max_length=56)
geom_type = models.CharField(max_length=20)
table_name = models.CharField(max_length=200)
scenario = models.ForeignKey(Scenario, on_delete=models.CASCADE)
feedstock = models.ForeignKey(SampleSeries, on_delete=models.CASCADE)
algorithm = models.ForeignKey(InventoryAlgorithm, on_delete=models.CASCADE)
layer_fields = models.ManyToManyField(LayerField)
objects = LayerManager()
class Meta:
constraints = [
models.UniqueConstraint(fields=['table_name'], name='unique table_name')
]
def add_aggregated_values(self, aggregates: []):
for aggregate in aggregates:
LayerAggregatedValue.objects.create(name=aggregate['name'],
value=aggregate['value'],
unit=aggregate['unit'],
layer=self)
def add_aggregated_distributions(self, distributions):
for distribution in distributions:
dist = TemporalDistribution.objects.get(id=distribution['distribution'])
aggdist = LayerAggregatedDistribution.objects.create(name=distribution['name'],
distribution=dist,
layer=self)
for dset in distribution['sets']:
distset = DistributionSet.objects.create(
aggregated_distribution=aggdist,
timestep_id=dset['timestep']
)
for share in dset['shares']:
DistributionShare.objects.create(
component_id=share['component'],
average=share['average'],
standard_deviation=0.0, # TODO
distribution_set=distset
)
def add_layer_fields(self, fields: dict):
for field_name, data_type in fields.items():
field, created = LayerField.objects.get_or_create(field_name=field_name, data_type=data_type)
self.layer_fields.add(field)
def as_dict(self):
return {
'name': self.name,
'geom_type': self.geom_type,
'table_name': self.table_name,
'scenario': self.scenario,
'feedstock': self.feedstock,
'inventory_algorithm': self.algorithm,
'layer_fields': [field for field in self.layer_fields.all()],
'aggregated_results': [
{'name': aggregate.name,
'value': int(aggregate.value),
'unit': aggregate.unit}
for aggregate in self.layeraggregatedvalue_set.all()
]
}
def update_or_create_feature_collection(self):
"""
Dynamically creates model connected to this layer instance that is used to handle its features and store them
in a separate custom database table.
"""
# Empty app registry from any previous version of this model
model_name = self.table_name
if model_name in apps.all_models['layer_manager']:
del apps.all_models['layer_manager'][model_name]
attrs = {
'__module__': 'layer_manager.models',
'geom': getattr(gis_models, self.geom_type + 'Field')(srid=4326)
}
# Add all custom columns to model
for field in self.layer_fields.all():
attrs[field.field_name] = LayerField.model_field_type(field.data_type)
# Create model class and assign table_name
model = type(model_name, (models.Model,), attrs)
model._meta.layer = self
model._meta.db_table = self.table_name
return model
def create_feature_table(self):
"""
Creates a new table with all given fields from a model
:return:
"""
feature_collection = self.get_feature_collection()
# Check if any table of the name already exists
with connection.cursor() as cursor:
cursor.execute(f"SELECT to_regclass('{feature_collection._meta.db_table}')")
if cursor.fetchone()[0]:
raise TableAlreadyExists
# After cleanup, now create the new version of the result table
with connection.schema_editor() as schema_editor:
schema_editor.create_model(feature_collection)
def feature_table_url(self):
return reverse('scenario_result_map', kwargs={'pk': self.scenario.id, 'algo_pk': self.algorithm.id})
def delete(self, **kwargs):
self.delete_feature_table()
del apps.all_models['layer_manager'][self.table_name]
super().delete()
def delete_feature_table(self):
"""
Deletes a table from a given model
:return:
"""
feature_collection = self.get_feature_collection()
with connection.cursor() as cursor:
cursor.execute(f"SELECT to_regclass('{feature_collection._meta.db_table}')")
if cursor.fetchone()[0] is None:
return
with connection.schema_editor() as schema_editor:
schema_editor.delete_model(feature_collection)
def delete_aggregated_values(self):
LayerAggregatedValue.objects.filter(layer=self).delete()
def get_feature_collection(self):
"""
Returns the feature collection model that is used to manage the features connected to this layer.
"""
# If the model is already registered, return original model
if self.table_name in apps.all_models['layer_manager']:
return apps.all_models['layer_manager'][self.table_name]
else:
return self.update_or_create_feature_collection()
def is_defined_by(self, **kwargs):
fields = {field.field_name: field.data_type for field in self.layer_fields.all()}
comparisons = [
self.table_name == kwargs['table_name'],
self.geom_type == kwargs['geom_type'],
self.scenario == kwargs['scenario'],
self.algorithm == kwargs['algorithm'],
fields == kwargs['fields']
]
return all(comparisons)
class LayerAggregatedValue(models.Model):
"""
Class to hold all aggregated results from a result layer
"""
name = models.CharField(max_length=63)
value = models.FloatField()
unit = models.CharField(max_length=15, blank=True, null=True, default='')
layer = models.ForeignKey(Layer, on_delete=models.CASCADE)
DISTRIBUTION_TYPES = (
('seasonal', 'seasonal'), # Assumes array with length 12 for each month of the year
)
class LayerAggregatedDistribution(models.Model):
"""
Holds desired aggregated distributions for a layer. Intended for seasonal distributions broken down to feedstock
components but any other distribution works as well.
"""
name = models.CharField(max_length=255, null=True)
type = models.CharField(max_length=255, choices=DISTRIBUTION_TYPES, null=True)
distribution = models.ForeignKey(TemporalDistribution, on_delete=models.CASCADE, null=True)
layer = models.ForeignKey(Layer, on_delete=models.CASCADE, null=True)
@property
def shares(self):
return DistributionShare.objects.filter(distribution_set__aggregated_distribution=self)
@property
def components(self):
return MaterialComponent.objects.filter(
id__in=[share['component'] for share in self.shares.values('component').distinct()]
)
@property
def serialized(self):
dist = []
for component in self.components:
component_dist = {
'label': component.name,
'data': {},
'unit': 'Mg/a'
}
# data = {}
for timestep in self.distribution.timestep_set.all():
try: # TODO: find better way to deal with the fact that there is not a value for every component/timestep combination
share = self.shares.get(component=component, distribution_set__timestep=timestep)
component_dist['data'][timestep.name] = share.average
except:
pass
# component_dist['data'].append(data)
dist.append(component_dist)
return dist
class DistributionSet(models.Model):
timestep = models.ForeignKey(Timestep, on_delete=models.CASCADE, null=True)
aggregated_distribution = models.ForeignKey(LayerAggregatedDistribution, on_delete=models.CASCADE, null=True)
class DistributionShare(models.Model):
distribution_set = models.ForeignKey(DistributionSet, on_delete=models.CASCADE)
component = models.ForeignKey(MaterialComponent, on_delete=models.CASCADE, null=True)
average = models.FloatField()
standard_deviation = models.DecimalField(decimal_places=2, max_digits=5)
| 40.589443
| 134
| 0.627556
| 1,502
| 13,841
| 5.613848
| 0.198402
| 0.044355
| 0.016603
| 0.024905
| 0.206001
| 0.158563
| 0.11741
| 0.093216
| 0.060721
| 0.048387
| 0
| 0.003841
| 0.285312
| 13,841
| 340
| 135
| 40.708824
| 0.848565
| 0.147027
| 0
| 0.109649
| 0
| 0
| 0.072821
| 0.01277
| 0
| 0
| 0
| 0.008824
| 0
| 1
| 0.078947
| false
| 0.004386
| 0.035088
| 0.017544
| 0.328947
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c07ca44e33380193eabc6f8bec1ebe24f8d013c9
| 8,212
|
py
|
Python
|
bin/CAD/Abaqus/AbaqusGeometry.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
bin/CAD/Abaqus/AbaqusGeometry.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
bin/CAD/Abaqus/AbaqusGeometry.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
"""
AbaqusGeometry.py
For use with Abaqus 6.13-1 (Python 2.6.2).
Created by Ozgur Yapar <oyapar@isis.vanderbilt.edu>
Robert Boyles <rboyles@isis.vanderbilt.edu>
- Includes modules which take care of geometrical operations
in the part and assembly level.
"""
import re
import math
from numpy import array, cross, transpose, vstack, dot
from abaqusConstants import *
import numpy.linalg as LA
import string as STR
def regexFriendly(inString):
""" Clean up coordinates read from STEP file, prior to applying regular expressions. """
outString = STR.replace(inString, '\'', '%')
outString = STR.replace(outString, '(', '')
outString = STR.replace(outString, ')', ',')
return outString
def coordinate(stepString):
""" Extract tuple of cartesian coordinates from STEP coordinate string. """
e = re.compile(',\S+,,') # regular expression
coordFind = e.search(stepString) # extract substring containing coordinates
coordList = coordFind.group(0).strip(',').split(',') # separate x, y, and z coordinates by commas
coords = (float(coordList[0]), float(coordList[1]),
float(coordList[2])) # convert coordinate strings to a tuple of floats
return coords # return the coordinate tuple
# calculates transformation matrix between two coordinate systems as defined in STEP
def get3DTransformArray(fromDir1, fromDir2, toDir1, toDir2):
""" Calculate transformation matrix between two coordinate systems as defined in STEP. """
fromDir1 = array(fromDir1) # convert u1 vector to an array object
fromDir2 = array(fromDir2) # convert u2 vector to an array object
fromDir3 = cross(fromDir1, fromDir2) # extrapolate u3 vector from u1 and u2
toDir1 = array(toDir1) # convert v1 vector to an array object
toDir2 = array(toDir2) # convert v2 vector to an array object
toDir3 = cross(toDir1, toDir2) # extrapolate v3 vector from v1 and v2
inva = LA.inv(transpose(vstack([fromDir1, fromDir2, fromDir3])))
b = transpose(vstack([toDir1, toDir2, toDir3]))
transformArray = dot(b, inva)
return transformArray
def unv(center, planarA, planarB):
""" Use vector operations to get unit normal vector, given a center coordinate and two planar coordinates. """
center = array(center)
planarA = array(planarA)
planarB = array(planarB)
vA = planarA - center
vB = planarB - center
xV = cross(vA, vB)
return xV/LA.norm(xV)
def transCoord(fromCoord, transformArray, translationVector):
""" Transform/translate a cartesian point from one coordinate system to another. """
vprod = dot(transformArray, fromCoord)
vprod = vprod + translationVector
toCoord = tuple(vprod)
return toCoord
def asmRecursion(asm, subAsms, asmParts):
""" Recursively identifies parts in sub-assemblies, in the order they are imported from STEP. """
parts = []
try:
for child in subAsms[asm]:
if child in subAsms:
parts.extend(asmRecursion(child, subAsms, asmParts))
else:
parts.extend(asmParts[child])
except KeyError:
pass
if asm in asmParts:
parts.extend(asmParts[asm])
return parts
def coordTransform(localTMs, localTVs, asm, subAsms, asmParts, localCoords):
"""
Iterate through sub-assemblies and top-level parts to transform/translate
every datum point to assembly coordinates; uses transCoord()
Note: Ignores top-level datums in highest assembly, which will not exist
in a CyPhy assembly anyway
"""
globalCoords = {} # create dictionary object to hold new point library
if asm in subAsms: # if assembly has sub-assemblies:
for subAsm in subAsms[asm]: # for each sub-assembly in the assembly:
subCoords = coordTransform(localTMs, localTVs, subAsm, # get point library local to sub-assembly
subAsms, asmParts, localCoords)
for part in subCoords.keys(): # for each component in chosen sub-assembly:
globalCoords.update([[part, {}]]) # create new entry in globalCoords
for (point, coord) in subCoords[part].iteritems(): # for each point in part/sub-sub-assembly:
globalCoords[part].update([[point.upper(), transCoord( # translate/transform point to globalCoords
array(coord), localTMs[subAsm], localTVs[subAsm])]])
globalCoords.update([[subAsm, {}]]) # create entry for sub-assembly in globalCoords
for (point, coord) in localCoords[subAsm].iteritems():
# for each point specified at top level of that sub-assembly:
globalCoords[subAsm].update([[point.upper(), transCoord( # translate/transform point to globalCoords
array(coord), localTMs[subAsm], localTVs[subAsm])]])
if asm in asmParts: # if assembly has top-level parts:
for part in asmParts[asm]: # for each top-level part:
globalCoords.update([[part, {}]]) # create new entry in globalCoords
for (point, coord) in localCoords[part].iteritems(): # for each point in part:
globalCoords[part].update([[point.upper(), transCoord( # translate/transform point to globalCoords
array(coord), localTMs[part], localTVs[part])]])
return globalCoords
def myMask(idnums):
""" Produce mask string for getSequenceFromMask(...) from a feature ID or set of IDs. """
try:
idnums = tuple(idnums) # make the input a tuple!
except TypeError: # if input is not iterable:
idnums = (idnums,) # make it a tuple anyway!
powersum = 0 # integer to hold mask number
for num in idnums: # iterating through input IDs:
powersum += 2**num # add 2**ID to powersum
rawmask = hex(powersum)[2:] # convert powermask to hexadecimal
rawmask = STR.rstrip(rawmask, 'L') # strip "long" character, if necessary
if max(idnums) < 32: # if hex number is 8 digits or less:
mask = '[#' + rawmask + ' ]' # create mask
else: # if hex number is >8 digits:
maskpieces = [] # container for fragments of hex string
piececount = int(math.ceil(len(rawmask)/8)) # number of times to split hex string
for i in range(piececount): # for each split needed:
maskpieces.append(rawmask[-8:]) # append last 8 characters of hex string to fragment list
rawmask = rawmask[:-8] # trim last 8 characters from hex string
maskpieces.append(rawmask) # append remaining hex string to fragment list
mask = '[#' + STR.join(maskpieces, ' #') + ' ]' # join fragments, using the correct delimiters, to create mask
return mask
def toBC(constraint):
""" Translates a degree of freedom as read from the XML to the appropriate SymbolicConstant. """
if constraint == 'FIXED':
return 0
elif constraint == 'FREE':
return UNSET
else:
return float(constraint)
| 53.673203
| 121
| 0.565392
| 853
| 8,212
| 5.443142
| 0.33177
| 0.010554
| 0.008615
| 0.012923
| 0.183502
| 0.155503
| 0.146888
| 0.12255
| 0.12255
| 0.12255
| 0
| 0.011276
| 0.352046
| 8,212
| 153
| 122
| 53.673203
| 0.861304
| 0.353385
| 0
| 0.12381
| 0
| 0
| 0.006761
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0.009524
| 0.057143
| 0
| 0.247619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c07dacc643d713f89a754dcc9e2a89ae590b2576
| 2,143
|
py
|
Python
|
analysis/11-compress-jacobians.py
|
lmjohns3/cube-experiment
|
ab6d1a9df95efebc369d184ab1c748d73d5c3313
|
[
"MIT"
] | null | null | null |
analysis/11-compress-jacobians.py
|
lmjohns3/cube-experiment
|
ab6d1a9df95efebc369d184ab1c748d73d5c3313
|
[
"MIT"
] | null | null | null |
analysis/11-compress-jacobians.py
|
lmjohns3/cube-experiment
|
ab6d1a9df95efebc369d184ab1c748d73d5c3313
|
[
"MIT"
] | null | null | null |
import climate
import glob
import gzip
import io
import lmj.cubes
import logging
import numpy as np
import os
import pandas as pd
import pickle
import theanets
def compress(source, k, activation, **kwargs):
fns = sorted(glob.glob(os.path.join(source, '*', '*_jac.csv.gz')))
logging.info('%s: found %d jacobians', source, len(fns))
# the clipping operation affects about 2% of jacobian values.
dfs = [np.clip(pd.read_csv(fn, index_col='time').dropna(), -10, 10)
for fn in fns]
B, N = 128, dfs[0].shape[1]
logging.info('loaded %s rows of %d-D data from %d files',
sum(len(df) for df in dfs), N, len(dfs))
def batch():
batch = np.zeros((B, N), 'f')
for b in range(B):
a = np.random.randint(len(dfs))
batch[b] = dfs[a].iloc[np.random.randint(len(dfs[a])), :]
return [batch]
pca = theanets.Autoencoder([N, (k, activation), (N, 'tied')])
pca.train(batch, **kwargs)
key = '{}_k{}'.format(activation, k)
if 'hidden_l1' in kwargs:
key += '_s{hidden_l1:.4f}'.format(**kwargs)
for df, fn in zip(dfs, fns):
df = pd.DataFrame(pca.encode(df.values.astype('f')), index=df.index)
s = io.StringIO()
df.to_csv(s, index_label='time')
out = fn.replace('_jac', '_jac_' + key)
with gzip.open(out, 'wb') as handle:
handle.write(s.getvalue().encode('utf-8'))
logging.info('%s: saved %s', out, df.shape)
out = os.path.join(source, 'pca_{}.pkl'.format(key))
pickle.dump(pca, open(out, 'wb'))
@climate.annotate(
root='load data files from subject directories in this path',
k=('compress to this many dimensions', 'option', None, int),
activation=('use this activation function', 'option'),
)
def main(root, k=1000, activation='relu'):
for subject in lmj.cubes.Experiment(root).subjects:
compress(subject.root, k, activation,
momentum=0.9,
hidden_l1=0.01,
weight_l1=0.01,
monitors={'hid1:out': (0.01, 0.1, 1, 10)})
if __name__ == '__main__':
climate.call(main)
| 30.614286
| 76
| 0.591227
| 314
| 2,143
| 3.961783
| 0.423567
| 0.026527
| 0.016077
| 0.025723
| 0.033762
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022797
| 0.24265
| 2,143
| 69
| 77
| 31.057971
| 0.743685
| 0.027532
| 0
| 0
| 0
| 0
| 0.147454
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.203704
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c07f103a6a6e92a6245209f932b8d90c064fd018
| 21,369
|
py
|
Python
|
commerce/views.py
|
zlkca/ehetuan-api
|
da84cd4429bd33e8fe191327ec267bf105f41453
|
[
"MIT"
] | 1
|
2020-05-27T18:17:01.000Z
|
2020-05-27T18:17:01.000Z
|
commerce/views.py
|
zlkca/ehetuan-api
|
da84cd4429bd33e8fe191327ec267bf105f41453
|
[
"MIT"
] | 6
|
2020-06-05T18:14:56.000Z
|
2021-09-07T23:53:08.000Z
|
commerce/views.py
|
zlkca/ehetuan-api
|
da84cd4429bd33e8fe191327ec267bf105f41453
|
[
"MIT"
] | null | null | null |
import json
import os
import logging
from datetime import datetime
from django.db.models import Q,Count
from django.http import JsonResponse
from django.views.generic import View
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from django.conf import settings
from rest_framework_jwt.settings import api_settings
from django.core.exceptions import ObjectDoesNotExist#EmptyResultSet, MultipleObjectsReturned
from django.contrib.auth import get_user_model
from commerce.models import Restaurant, Picture, Product, Category, Order, OrderItem, Style, PriceRange, FavoriteProduct
from account.models import Province, City, Address
from utils import to_json, obj_to_json, get_data_from_token
logger = logging.getLogger(__name__)
def processPictures(product, pictures):
# pid --- product id
# pictures --- dict that pass from the front end
reindex = False
pic = None
for picture in pictures:
try:
pic = Picture.objects.get(product_id=product.id, index=picture['index'])
except:
pic = None
if pic:
if picture['status'] == 'removed':
reindex = True
rmPicture(pic)
elif picture['status'] == 'changed':
savePicture(product, pic, picture)
pic.save()
else:# new
pic = Picture()
savePicture(product, pic, picture)
if reindex:
reindexPicture(product.id)
def savePicture(product, pic, picture):
# product --- Product model object
# pic --- Picture model object
# picture --- dict from front end
pic.index = picture['index']
pic.name = picture['name']
pic.product = product
pic.image.save(picture['image'].name, picture['image'].file, True)
pic.save()
def getDefaultPicture(pictures):
if pictures.count() == 0:
return ''
else:
if pictures.count()>0 and pictures[0].image.name:
return pictures[0].image.name
else:
return ''
def rmPicture(pic):
try:
os.remove(pic.image.path)
except:
print('remove image failed')
pic.image.delete()
pic.delete()
def reindexPicture(pid):
# pid --- product id
pics = Picture.objects.filter(product_id=pid).order_by('index')
i = 0
for pic in pics:
pic.index = i
i = i + 1
pic.save()
def saveProduct(params):
_id = params.get('id')
if _id:
item = Product.objects.get(id=_id)
else:
item = Product()
item.name = params.get('name')
item.description = params.get('description')
item.price = params.get('price')
item.currency = params.get('currency')
restaurant_id = params.get('restaurant_id')
try:
item.restaurant = Restaurant.objects.get(id=restaurant_id)
except:
item.restaurant = None
#item.category = category
item.save()
# item.categories.clear()
# Assume there is only one image
# n_pics = int(params.get('n_pictures'))
# pictures = []
# for i in range(n_pics):
# name = params.get('name%s'%i)
# status = params.get('image_status%s'%i)
# image = req.FILES.get('image%s'%i)
# pictures.append({'index':i,'name':name, 'status':status, 'image':image})
#
# self.processPictures(item, pictures)
#
# # select default picture
# pics = Picture.objects.filter(product_id=item.id)
# item.fpath = self.getDefaultPicture(pics)
# item.save()
return item
def find_restaurants_by_location(lat, lng, distance):
query = """SELECT *,
(
3959 *
acos(cos(radians(%s)) *
cos(radians(lat)) *
cos(radians(lng) -
radians(%s)) +
sin(radians(%s)) *
sin(radians(lat )))
) AS distance
FROM commerce_restaurant
HAVING distance < %s
ORDER BY distance LIMIT 0, 20;"""%(lat, lng, lat, distance)
try:
return Restaurant.objects.raw(query)
except:
return None
@method_decorator(csrf_exempt, name='dispatch')
class RestaurantView(View):
def getList(self, req):
lat = req.GET.get('lat')
lng = req.GET.get('lng')
distance = 25 # km
restaurants = []
admin_id = req.GET.get('admin_id')
if admin_id: # need address
try:
item = Restaurant.objects.get(admin_id=admin_id)
restaurant = to_json(item)
restaurant['address'] = self.getAddress(item)
return JsonResponse({'data':[restaurant]})
except Exception:
return JsonResponse({'data':[]})
elif lat and lng: # do not need address
restaurants = find_restaurants_by_location(lat, lng, distance)
else:
try:
restaurants = Restaurant.objects.all()#.annotate(n_products=Count('product'))
except Exception:
return JsonResponse({'data':[]})
rs =[]
for r in restaurants:
rs.append(to_json(r))
return JsonResponse({'data': rs })
def getAddress(self, restaurant):
addr_id = restaurant.address.id
item = None
try:
item = Address.objects.get(id=addr_id)
except:
item = None
return to_json(item)
def get(self, req, *args, **kwargs):
pid = kwargs.get('id')
if pid:
try:
item = Restaurant.objects.get(id=int(pid))
p = obj_to_json(item, False)
p['address'] = self.getAddress(item)
return JsonResponse({'data':p})
except Exception as e:
print(e.message);
return JsonResponse({'data':''})
else: # get list
return self.getList(req)#JsonResponse({'data':''})
def delete(self, req, *args, **kwargs):
pid = int(kwargs.get('id'))
if pid:
instance = Restaurant.objects.get(id=pid)
instance.delete()
items = Restaurant.objects.filter().order_by('-updated')
return JsonResponse({'data':to_json(items)})
return JsonResponse({'data':[]})
def post(self, req, *args, **kwargs):
params = req.POST
authorizaion = req.META['HTTP_AUTHORIZATION']
token = authorizaion.replace("Bearer ", "")
data = get_data_from_token(token)
# if data and data['username']=='admin':
_id = params.get('id')
if _id:
item = Restaurant.objects.get(id=_id)
else:
item = Restaurant()
item.name = params.get('name')
item.description = params.get('description')
item.lat = float(params.get('lat'))
item.lng = float(params.get('lng'))
item.created = item.created if item.created else datetime.now()
addr_id = params.get('address_id')
if(addr_id):
addr = Address.objects.get(id=addr_id)
self.saveAddress(addr, params)
item.address = addr
else:
addr = Address()
self.saveAddress(addr, params)
item.address = addr
item.save()
image_status = params.get('image_status')
if image_status == 'changed':
self.rmPicture(item)
image = req.FILES.get("image")
item.image.save(image.name, image.file, True)
item.save()
return JsonResponse({'data':to_json(item)})
def saveAddress(self, addr1, params):
addr1.street = params.get('street')
addr1.sub_locality = params.get('sub_locality')
addr1.postal_code = params.get('postal_code')
addr1.lat = params.get('lat')
addr1.lng = params.get('lng')
addr1.province = params.get('province')
addr1.city = params.get('city')
addr1.save()
def rmPicture(self, item):
try:
os.remove(item.image.path)
except:
print('remove image failed')
item.image.delete()
@method_decorator(csrf_exempt, name='dispatch')
class CategoryView(View):
def getList(self):
categories = []
try:
categories = Category.objects.all()#.annotate(n_products=Count('product'))
except Exception as e:
logger.error('Get category Exception:%s'%e)
return JsonResponse({'data':[]})
return JsonResponse({'data': to_json(categories)})
def get(self, req, *args, **kwargs):
cid = kwargs.get('id')
if cid:
cid = int(cid)
try:
item = Category.objects.get(id=cid)
return JsonResponse({'data':to_json(item)})
except Exception as e:
return JsonResponse({'data':''})
else:
return self.getList()
def delete(self, req, *args, **kwargs):
pid = int(kwargs.get('id'))
if pid:
instance = Category.objects.get(id=pid)
instance.delete()
items = Category.objects.filter().order_by('-updated')
return JsonResponse({'data':to_json(items)})
return JsonResponse({'data':[]})
def post(self, req, *args, **kwargs):
ubody = req.body.decode('utf-8')
params = json.loads(ubody)
_id = params.get('id')
if _id:
item = Category.objects.get(id=_id)
else:
item = Category()
item.name = params.get('name')
item.description = params.get('description')
# item.status = params.get('status')
item.save()
return JsonResponse({'data':to_json(item)})
@method_decorator(csrf_exempt, name='dispatch')
class ProductListView(View):
def get(self, req, *args, **kwargs):
''' get product list
'''
products = []
cats = req.GET.get('cats')
restaurants = req.GET.get('ms')
colors = req.GET.get('colors')
keyword = req.GET.get('keyword')
kwargs = {}
q = None
if cats:
q = Q(categories__id__in=cats.split(','))
if restaurants:
if q:
q = q | Q(restaurant__id__in=restaurants.split(','))
else:
q = Q(restaurant__id__in=restaurants.split(','))
if colors:
if q:
q = q | Q(color__id__in=colors.split(','))
else:
q = Q(restaurant__id__in=restaurants.split(','))
restaurant_id = req.GET.get('restaurant_id')
category_id = req.GET.get('category_id')
if restaurant_id:
products = Product.objects.filter(restaurant_id=restaurant_id).annotate(n_likes=Count('favoriteproduct'))
elif category_id:
products = Product.objects.filter(category_id=category_id).annotate(n_likes=Count('favoriteproduct'))
elif cats or restaurants or colors:
if keyword:
products = Product.objects.filter(q).filter(Q(name__icontains=keyword)
|Q(categories__name__icontains=keyword)
|Q(restaurant__name__icontains=keyword)
|Q(color__name__icontains=keyword))
else:
products = Product.objects.filter(q)
else:
if keyword:
products = Product.objects.filter(Q(name__icontains=keyword)
|Q(categories__name__icontains=keyword)
|Q(restaurant__name__icontains=keyword)
|Q(color__name__icontains=keyword))
else:
products = Product.objects.filter().annotate(n_likes=Count('favoriteproduct'))
ps = to_json(products)
for p in ps:
try:
pics = Picture.objects.filter(product_id=p['id'])
except:
pics = None
if pics:
p['pictures'] = to_json(pics)
#s = []
# for product in products:
# items = Item.objects.filter(product_id=product.id)
# p = product.to_json()
# p['n_likes'] = product.n_likes
# p['n_items'] = len(items)
# p['items'] = [items[0].to_json()]
# fp = None
# try:
# fp = FavoriteProduct.objects.get(user_id=uid)
# except:
# pass
#
# p['like'] = fp.status if fp else False
# s.append(p)
return JsonResponse({'data':ps})
def post(self, req, *args, **kwargs):
authorizaion = req.META['HTTP_AUTHORIZATION']
token = authorizaion.replace("Bearer ", "")
data = get_data_from_token(token)
for key in req.POST:
params = json.loads(req.POST[key])
index = int(key.replace('info_', ''))
product = saveProduct(params)
image_status = params.get('image_status')
if image_status == 'unchange':
pass
elif image_status == 'changed' or image_status == 'add':
pictures = []
image = req.FILES.get('image%s'%index)
pictures.append({'index':0,'name':'', 'status':image_status, 'image':image})
processPictures(product, pictures)
# select default picture
pics = Picture.objects.filter(product_id=product.id)
product.fpath = getDefaultPicture(pics)
product.save()
return JsonResponse({'data':[]})
@method_decorator(csrf_exempt, name='dispatch')
class ProductFilterView(View):
def get(self, req, *args, **kwargs):
categories = Category.objects.all();
styles = Style.objects.all();
price_ranges = PriceRange.objects.all();
return JsonResponse({'categories':categories, 'styles':styles, 'price_ranges':price_ranges})
@method_decorator(csrf_exempt, name='dispatch')
class ProductView(View):
def get(self, req, *args, **kwargs):
''' get product detail with multiple items
'''
pid = int(kwargs.get('id'))
if pid:
try:
products = Product.objects.filter(id=pid)
except Exception as e:
return JsonResponse({'product':''})
else:
return JsonResponse({'product':''})
product = products[0]
pics = Picture.objects.filter(product_id=product.id)
ps = []
for pic in pics:
ps.append(to_json(pic))
p = to_json(product)
p['pictures'] = ps
return JsonResponse({'data':p})
def delete(self, req, *args, **kwargs):
pid = int(kwargs.get('id'))
if pid:
instance = Product.objects.get(id=pid)
instance.delete()
items = Product.objects.filter().order_by('-updated')
return JsonResponse({'data':to_json(items)})
return JsonResponse({'data':[]})
def post(self, req, *args, **kwargs):
params = req.POST
authorizaion = req.META['HTTP_AUTHORIZATION']
token = authorizaion.replace("Bearer ", "")
data = get_data_from_token(token)
if data and data['username']=='admin' or data['utype']=='business':
item = saveProduct(params)
item.categories.clear()
categories = params.get('categories').split(',')
for cat_id in categories:
try:
category = Category.objects.get(id=cat_id)
except:
category = None
item.categories.add(category)
n_pics = int(params.get('n_pictures'))
pictures = []
for i in range(n_pics):
name = params.get('name%s'%i)
status = params.get('image_status%s'%i)
image = req.FILES.get('image%s'%i)
pictures.append({'index':i,'name':name, 'status':status, 'image':image})
processPictures(item, pictures)
# select default picture
pics = Picture.objects.filter(product_id=item.id)
item.fpath = getDefaultPicture(pics)
item.save()
return JsonResponse({'tokenValid': True,'data':to_json(item)})
return JsonResponse({'tokenValid':False, 'data':''})
@method_decorator(csrf_exempt, name='dispatch')
class OrderView(View):
def getList(self, rid=None):
orders = []
try:
if rid:
orders = Order.objects.filter(restaurant_id=rid).order_by('created')
else:
orders = Order.objects.all().order_by('created')#.annotate(n_products=Count('product'))
r = to_json(orders)
for order in orders:
items = OrderItem.objects.filter(order_id=order.id)
ri = next((x for x in r if x['id'] == order.id), None)
ri['items'] = to_json(items)
ri['user']['username'] = order.user.username
except Exception as e:
logger.error('Get Order Exception:%s'%e)
return JsonResponse({'data':[]})
return JsonResponse({'data': r})
def get(self, req, *args, **kwargs):
cid = kwargs.get('id')
if cid:
cid = int(cid)
try:
item = Order.objects.get(id=cid)
return JsonResponse({'data':to_json(item)})
except Exception as e:
return JsonResponse({'data':''})
else:
rid = req.GET.get('restaurant_id')
return self.getList(rid)
def post(self, req, *args, **kwargs):
authorizaion = req.META['HTTP_AUTHORIZATION']
token = authorizaion.replace("Bearer ", "")
data = get_data_from_token(token)
if data:
uid = data['id']
ubody = req.body.decode('utf-8')
d = json.loads(ubody)
# dict: {'orders': [{'restaurant_id': 2, 'items': [{'pid': 1, 'name': '土豆排骨', 'price': '12.000', 'restaurant_id':
#2, 'quantity': 4}, {'pid': 2, 'name': '泡椒豆腐', 'price': '12.000', 'restaurant_id': 2, 'quantity': 2}]}],
#'user_id': 7}
orders = d.get("orders")
for data in orders:
rid = data['restaurant_id']
items = data['items']
order = Order()
try:
restaurant = Restaurant.objects.get(id=rid)
user = get_user_model().objects.get(id=uid)
order.restaurant = restaurant
order.user = user
order.save()
except Exception as e:
print(e)
if order.id:
for item in items:
orderItem = OrderItem()
orderItem.order = order
orderItem.product = Product.objects.get(id=item['pid'])
orderItem.quantity = item['quantity']
orderItem.product_name = orderItem.product.name
orderItem.price = orderItem.product.price
orderItem.save()
return JsonResponse({'success': True})
return JsonResponse({'success':False})
@method_decorator(csrf_exempt, name='dispatch')
class FavoriteProductView(View):
def get(self, req, *args, **kwargs):
uid = req.GET.get('user_id')
ps = Product.objects.annotate(n_likes=Count('favoriteproduct'))
favorites = []
for p in ps:
product = p.to_json()
product['n_likes'] = p.n_likes
fp = None
try:
fp = FavoriteProduct.objects.get(user_id=uid)
except:
pass
product['favorate'] = fp.status if fp else False
favorites.append(product)
return JsonResponse({'favorites':favorites})
def post(self, req, *args, **kwargs):
ubody = req.body.decode('utf-8')
d = json.loads(ubody)
uid = d.get("user_id")
pid = d.get("product_id")
try:
like = FavoriteProduct.objects.get(user_id=uid, product_id=pid)
like.delete()
except ObjectDoesNotExist:
like = FavoriteProduct()
like.product = Product.objects.get(id=pid)
like.user = get_user_model().objects.get(id=uid)
like.status = True
like.save()
return JsonResponse({'success':'true'})
| 36.15736
| 126
| 0.530675
| 2,240
| 21,369
| 4.959375
| 0.1125
| 0.05509
| 0.049509
| 0.024485
| 0.494734
| 0.434423
| 0.394815
| 0.299037
| 0.273382
| 0.231794
| 0
| 0.003437
| 0.346436
| 21,369
| 590
| 127
| 36.218644
| 0.791995
| 0.094904
| 0
| 0.411392
| 0
| 0
| 0.077881
| 0.00109
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061181
| false
| 0.004219
| 0.033755
| 0
| 0.202532
| 0.008439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c07fe33cae576add35e02a5f464a4a05467459e8
| 5,666
|
py
|
Python
|
api/views.py
|
huatxu/erasmusbackend
|
d8f86ee857a292a133106e75e9c920b905b5b10d
|
[
"MIT"
] | null | null | null |
api/views.py
|
huatxu/erasmusbackend
|
d8f86ee857a292a133106e75e9c920b905b5b10d
|
[
"MIT"
] | null | null | null |
api/views.py
|
huatxu/erasmusbackend
|
d8f86ee857a292a133106e75e9c920b905b5b10d
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from api.models import Comida, Cerveza, Titulo, TipoComida
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import serializers
import csv
import os
class CervezaSerializer(serializers.ModelSerializer):
class Meta:
model = Cerveza
fields = ['id', 'nombre', 'estilo', 'pais', 'pais_ingles', 'alcohol', 'color', 'amargor', 'descripcion', 'descripcion_ingles', 'disponible', 'imagen', 'artesanal', 'tipo', 'recomendada', 'formato', 'precio', 'formato_2', 'precio_2', 'formato_3', 'precio_3', 'sin_gluten', 'aparece', 'barril']
class ComidaList(APIView):
"""
List all snippets, or create a new snippet.
"""
def get(self, request, format=None):
comidas = Comida.objects.filter(disponible=True, tipo__aparece=True).order_by('tipo__orden', 'orden', 'nombre')
serializer = ComidaSerializer(comidas, many=True)
return Response(serializer.data)
class ComidaSerializer(serializers.ModelSerializer):
tipo = serializers.SerializerMethodField('get_tipo')
def get_tipo(self, obj):
return obj.tipo.nombre + '-' + obj.tipo.nombre_ingles
class Meta:
model = Comida
fields = ('id', 'nombre', 'nombre_ingles', 'descripcion', 'descripcion_ingles', 'tipo', 'precio', 'precio_2', 'altramuces', 'apio', 'cacahuete', 'crustaceo', 'gluten', 'huevo', 'lacteos', 'moluscos', 'mostaza', 'nueces', 'pescado', 'sesamo', 'soja', 'sulfitos', 'disponible')
class TituloSerializer(serializers.ModelSerializer):
class Meta:
model = Titulo
fields = ['titulo_1', 'titulo_1_ingles', 'titulo_2', 'titulo_2_ingles']
class CervezaList(APIView):
"""
List all snippets, or create a new snippet.
"""
def get(self, request, format=None):
cervezas = Cerveza.objects.all()
serializer = CervezaSerializer(cervezas, many=True)
titulos = Titulo.objects.first()
titulosSerializer = TituloSerializer(titulos)
return Response({"titulos": titulosSerializer.data, "cervezas": serializer.data})
import csv
import os
def cast_bool(entry):
try:
if not entry:
return False
trues = ['sí', 'si']
return entry.lower() in trues
except Exception:
return False
def cast_price(entry):
result = entry
result = result.replace('€', '')
result = result.replace(',', '.')
result = result.strip()
if result:
return float(result)
return 0.0
def load_csv():
with open(f'{os.path.dirname(os.path.abspath(__file__))}/carta-cervezas.csv', newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
try:
cerveza = Cerveza.objects.create(
nombre=row['Nombre'],
estilo = row['Estilo'],
pais = row['País'],
pais_ingles = row['País Ingles'],
alcohol = row['Alcohol'],
color = row['Color'],
amargor = row['Amargor'],
descripcion = row['Descripcion'],
descripcion_ingles = row['Descripcion ingles'],
disponible = cast_bool(row['Disponible']),
imagen = row['Imagen'],
artesanal = cast_bool(row['Artesanal']),
tipo = row['Tipo'],
recomendada = cast_bool(row['Recomendada']),
formato = row['Formato'],
precio = cast_price(row['Precio']),
formato_2 = row['formato 2'],
precio_2 = cast_price(row['precio 2']),
formato_3 = row['formato 3'],
precio_3 = cast_price(row['precio 3']),
sin_gluten = cast_bool(row['Sin gluten']),
aparece = cast_bool(row['Aparece']),
barril = cast_bool(row['Barril'])
)
cerveza.save()
except Exception:
pass
with open(f'{os.path.dirname(os.path.abspath(__file__))}/carta-comida.csv', newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
try:
comida = Comida.objects.create(
nombre=row['Nombre'],
nombre_ingles=row['Nombre ingles'],
descripcion=row['Descripcion'],
descripcion_ingles=row['Descripcion ingles'],
tipo=row['Tipo'],
precio=cast_price(row['Precio']),
precio_2=cast_price(row['precio 2']),
altramuces=cast_bool(row['Altramuces']),
apio=cast_bool(row['Apio']),
cacahuete=cast_bool(row['Cacahuete']),
crustaceo=cast_bool(row['Crustaceo']),
gluten=cast_bool(row['Gluten']),
huevo=cast_bool(row['Huevo']),
lacteos=cast_bool(row['Lacteos']),
moluscos=cast_bool(row['Moluscos']),
mostaza=cast_bool(row['Mostaza']),
nueces=cast_bool(row['Nueces']),
pescado=cast_bool(row['Pescado']),
sesamo=cast_bool(row['Sesamo']),
soja=cast_bool(row['Soja']),
sulfitos=cast_bool(row['Sulfitos']),
disponible=cast_bool(row['Disponible'])
)
comida.save()
except Exception as exc:
pass
| 39.901408
| 300
| 0.55683
| 563
| 5,666
| 5.474245
| 0.246892
| 0.057106
| 0.074951
| 0.029202
| 0.249838
| 0.170019
| 0.170019
| 0.153147
| 0.112914
| 0.112914
| 0
| 0.006157
| 0.312037
| 5,666
| 142
| 301
| 39.901408
| 0.784248
| 0.015355
| 0
| 0.290598
| 0
| 0
| 0.16958
| 0.022346
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0.017094
| 0.08547
| 0.008547
| 0.282051
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c08a254cca4494b2d1aa73495456b23d2cb83ea5
| 390
|
py
|
Python
|
1_Ejemplo_practico_ECG/utils.py
|
IEEE-UPIBI/Comunicacion-Serial-Python-Arduino
|
806916a5d47e8d29933e1402296e2ca6d5d5a79e
|
[
"MIT"
] | null | null | null |
1_Ejemplo_practico_ECG/utils.py
|
IEEE-UPIBI/Comunicacion-Serial-Python-Arduino
|
806916a5d47e8d29933e1402296e2ca6d5d5a79e
|
[
"MIT"
] | 1
|
2021-04-23T23:20:42.000Z
|
2021-04-23T23:20:42.000Z
|
2_Ejemplo_practico_SensorMPU6050/utils.py
|
IEEE-UPIBI/Comunicacion-Serial-Python-Arduino
|
806916a5d47e8d29933e1402296e2ca6d5d5a79e
|
[
"MIT"
] | null | null | null |
import serial
import time
### FUNCTIONS ####
#### SERIAL COMMUNICATION ####
def arduino_communication(COM="COM5",BAUDRATE=9600,TIMEOUT=1):
""" Initalizes connection with Arduino Board """
try:
arduino = serial.Serial(COM, BAUDRATE , timeout=TIMEOUT)
time.sleep(2)
except:
print("Error de coneccion con el puerto")
return arduino
| 16.956522
| 64
| 0.628205
| 42
| 390
| 5.809524
| 0.690476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023973
| 0.251282
| 390
| 22
| 65
| 17.727273
| 0.811644
| 0.189744
| 0
| 0
| 0
| 0
| 0.123711
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.222222
| 0
| 0.444444
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c08b025b2f074208a6371fa035f6cf38f392405a
| 3,595
|
py
|
Python
|
trav_lib/visualize.py
|
thwhitfield/trav_lib
|
46185f5545d958eba1538c769a98d07908dd0d19
|
[
"MIT"
] | null | null | null |
trav_lib/visualize.py
|
thwhitfield/trav_lib
|
46185f5545d958eba1538c769a98d07908dd0d19
|
[
"MIT"
] | null | null | null |
trav_lib/visualize.py
|
thwhitfield/trav_lib
|
46185f5545d958eba1538c769a98d07908dd0d19
|
[
"MIT"
] | null | null | null |
"""Classes and functions used for data visualization"""
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
def plot_correlation_matrix_heat_map(df,label,qty_fields=10):
df = pd.concat([df[label],df.drop(label,axis=1)],axis=1)
correlation_matrix = df.corr()
index = correlation_matrix.sort_values(label, ascending=False).index
correlation_matrix = correlation_matrix[index].sort_values(label,ascending=False)
fig,ax = plt.subplots()
fig.set_size_inches((10,10))
sns.heatmap(correlation_matrix.iloc[:qty_fields,:qty_fields],annot=True,fmt='.2f',ax=ax)
# Code added due to bug in matplotlib 3.1.1
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + .5, top - .5)
return(fig,ax)
def plot_log_hist(s,bin_factor=1,min_exp=None):
"""Plot 2 histograms with log x scales, one for positive values & one for negative values.
Bin_factor is used to scale how many bins to use (1 is default and corresponds to
one bin per order of magnitude. Higher than 1 will skew the bins away from even powers of
10).
Parameters
----------
s: pandas series (generally using df[col])
Series or column of dataframe to analyze
bin_factor: int
Default 1, used to scale how many bins to use
min_exp: int
The minimum exponent to use in creating bins & plotting.
This can be set manually for cases where you want a specific
minimum value to be shown.
Returns
-------
fig, (ax1,ax2): matplotlib fig and ax objects
"""
# Split series into positive & negative components
s_pos = s[s >= 0]
s_neg = s[s < 0].abs()
# Not the best way to deal with this, but this was the easiest solution for now.
# TODO Fix this code to deal with no negative values or no positive values more appropriately
if s_neg.shape[0] == 0:
s_neg.loc[0] = 1
if s_pos.shape[0] == 0:
s_pos.loc[0] = 1
# Calculate appropriate min_exp if none provied
if min_exp == None:
threshold = s_pos.shape[0] - (s_pos==0).sum()
for i in range(10):
n_betw = s_pos[s_pos!=0].between(0,10**-i).sum()
if not (n_betw / threshold) > .1:
min_exp = -i
break
# Clip values to the 10**min_exp so that they are included in the histograms (if
# this isn't done then values which are 0 will be excluded from the histogram)
s_pos = s_pos.clip(lower=10**min_exp)
s_neg = s_neg.clip(lower=10**min_exp)
# Calculate the lowest integer which encompases all the positive and negative values
pos_max = int(np.ceil(np.log10(max(s_pos))))
neg_max = int(np.ceil(np.log10(max(s_neg))))
# Use that for both negative & positive values
plot_max = max(pos_max,neg_max)
# Create the bins (bin spacing is logarithmic)
bins = np.logspace(min_exp,plot_max,(plot_max+1)*bin_factor)
fig,(ax1,ax2) = plt.subplots(nrows=1,ncols=2,sharey=True)
fig.set_size_inches((10,5))
s_neg.hist(bins=bins,ax=ax1)
ax1.set_xscale('log')
ax1.set_title('Distribution of Negative Values')
ax1.set_xlabel('Negative values')
s_pos.hist(bins=bins,ax=ax2)
ax2.set_xscale('log')
ax2.set_title('Distribution of Positive Values')
ax2.set_xlabel('Positive Values')
# Invert axis so that values are increasingly negative from right to left.
# Decrease the spacing between the two subplots
ax1.invert_xaxis()
plt.subplots_adjust(wspace=.02)
return(fig,(ax1,ax2))
| 35.594059
| 97
| 0.662309
| 574
| 3,595
| 4.026132
| 0.364112
| 0.019039
| 0.011683
| 0.02077
| 0.098659
| 0.043271
| 0.043271
| 0.043271
| 0
| 0
| 0
| 0.026899
| 0.234771
| 3,595
| 101
| 98
| 35.594059
| 0.813159
| 0.41363
| 0
| 0
| 0
| 0
| 0.050324
| 0
| 0
| 0
| 0
| 0.009901
| 0
| 1
| 0.041667
| false
| 0
| 0.083333
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c08cb3b6fdb628373adc1c5e8da4f386b0294fba
| 1,828
|
py
|
Python
|
test/test_configeditor.py
|
ta-assistant/Admin-CLI
|
1c03ede0e09d8ddc270646937aa7af463c55f1f5
|
[
"MIT"
] | 1
|
2021-07-22T15:43:02.000Z
|
2021-07-22T15:43:02.000Z
|
test/test_configeditor.py
|
ta-assistant/Admin-CLI
|
1c03ede0e09d8ddc270646937aa7af463c55f1f5
|
[
"MIT"
] | 28
|
2021-05-15T08:18:21.000Z
|
2021-08-02T06:12:30.000Z
|
test/test_configeditor.py
|
ta-assistant/TA-CLI
|
1c03ede0e09d8ddc270646937aa7af463c55f1f5
|
[
"MIT"
] | null | null | null |
import unittest
import os, sys, inspect, json
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from lib.file_management.configeditor import ConfigEditor
from lib.file_management.file_management_lib import DirManagement
class TestSendData(unittest.TestCase):
def setUp(self) -> None:
self.path = os.path.join(parentdir,"ta")
DirManagement.create_dir(self.path)
workdata = {
"workDraft": {
"outputDraft": [
"ID",
"param1",
"param2",
"comment"
],
"fileDraft": "{ID}_test.py"
},
"scores": [
{
"ID": "6310545000",
"param1": "100",
"param2": "print('hello')",
"comment": "good"
}]
}
with open(os.path.join(self.path, "work.json"), "w") as create:
json.dump(workdata, create)
self.con = ConfigEditor('testWork2', parentdir)
self.con.writeconfig()
return super().setUp()
def test_writeconfig(self):
"""
return None
"""
self.assertIsNone(self.con.writeconfig())
def test_readconfig(self):
"""
return str
"""
self.assertIs(type(self.con.readconfig()), dict)
def test_ishaveconfig(self):
"""
return None
"""
self.assertIsNone(self.con.ishaveconfig())
def test_checkdata(self):
"""
return None
"""
self.assertIsNone(self.con.checkdata())
def tearDown(self) -> None:
"""
retrun None
"""
DirManagement.remove_dir(os.path.join(parentdir,"ta"))
return super().tearDown()
if __name__ == '__main__':
unittest.main()
| 25.041096
| 86
| 0.561816
| 180
| 1,828
| 5.6
| 0.405556
| 0.035714
| 0.029762
| 0.053571
| 0.151786
| 0.110119
| 0.110119
| 0
| 0
| 0
| 0
| 0.014786
| 0.297046
| 1,828
| 73
| 87
| 25.041096
| 0.76965
| 0.031729
| 0
| 0
| 0
| 0
| 0.091239
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 1
| 0.12766
| false
| 0
| 0.085106
| 0
| 0.276596
| 0.021277
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c08e8f408c1440f68bb49f4c21e145acaad7cc8e
| 3,466
|
py
|
Python
|
TwitterCode/crawler.py
|
aghriss/CS5502_project
|
68403f38ef26067360cb22404cdabe0d0543097a
|
[
"MIT"
] | null | null | null |
TwitterCode/crawler.py
|
aghriss/CS5502_project
|
68403f38ef26067360cb22404cdabe0d0543097a
|
[
"MIT"
] | null | null | null |
TwitterCode/crawler.py
|
aghriss/CS5502_project
|
68403f38ef26067360cb22404cdabe0d0543097a
|
[
"MIT"
] | null | null | null |
'''
Twitter Crawler to get tweets and user data
'''
import tweepy
import json
import os
import time
def get_counts_quantile(tweets):
counts = []
for t in tweets:
counts.append()
def save_tweet(result):
"""Function to save tweepy result status"""
pass
def save_user(result_set):
"""Function to save tweepy set fo result statuses"""
pass
class TweetCrawler():
def __init__(self, credentials_path, save_path, location_id=None):
assert os.path.exists(save_path)
assert os.path.exists(credentials_path)
self.save_path = save_path
self.location_id = 23424977
try:
with open(credentials_path,"r") as f:
creds = json.load(f)
f.close()
self.api = tweepy.API(tweepy.AppAuthHandler(creds['API_KEY'], creds['SECRET_KEY']))
except:
raise "Auth Error, check credentials and connection"
if location_id:
self.location_id = location_id
def crawl(self):
location, trends = self.get_trends()
for trend in trends:
query = trend['query']
trending = self.get_trending_tweets(query)
non_trending = self.get_untrending_tweets(query)
self.store(trending, trending=True)
self.store(non_trending, trensing=False)
def get_trends(self,):
trends_dict = self.api.trends_place(self.location_id)[0]
location_name = trends_dict['locations'][0]['name']
non_empty_trends = list(filter(lambda x: x['tweet_volume'] is not None,
trends_dict['trends']))
print("Retrieved %i for location: %s"%(len(non_empty_trends), location_name))
return location_name, non_empty_trends
def get_trending_tweets(self, query):
popular_tweets = self.api.search(query, count=500, result_type="popular")
tuples = []
for popular in popular_tweets:
user_timeline = self.api.user_timeline(popular.author.id, count=200)
tuples.append([popular, user_timeline])
return tuples
def get_untrending_tweets(self, query):
popular_tweets = self.api.search(query, count=500, result_type="recent")
tuples = []
for popular in popular_tweets:
user_timeline = self.get_user(popular.author.id)
tuples.append([popular, user_timeline])
return tuples
def get_user(self, user_id):
time.sleep(0.1)
return self.api.user_timeline(user_id, count=200)
def save_user(self, user):
print("Saving user %s"%user.id_str)
json.dump(user._json, open(os.path.join(self.save_path, "user_"+user.id_str+".json"), 'w'))
def save_tweet(self, tweet):
print("Saving tweet %s"%tweet.id_str)
json.dump(tweet._json, open(os.path.join(self.save_path, "tweet_"+ tweet.id_str+".json"), 'w'))
def rate_status(self):
state = self.api.rate_limit_status()
limits = state['resources']['statuses']
return {'tweet':limits['/statuses/show/:id']['remaining'],
'users': limits['/statuses/user_timeline']['remaining']}
def get_tweet(self, tweet_id):
time.sleep(0.1)
return self.api.get_status(tweet_id)
#crawler = TweetCrawler("twitter_credentials.json", './data')
#self=crawler
| 32.698113
| 103
| 0.611656
| 430
| 3,466
| 4.727907
| 0.269767
| 0.027546
| 0.017708
| 0.019675
| 0.225283
| 0.212494
| 0.212494
| 0.212494
| 0.157403
| 0.062961
| 0
| 0.010313
| 0.272649
| 3,466
| 105
| 104
| 33.009524
| 0.796113
| 0.057992
| 0
| 0.164384
| 0
| 0
| 0.08567
| 0.007088
| 0
| 0
| 0
| 0
| 0.027397
| 1
| 0.178082
| false
| 0.027397
| 0.054795
| 0
| 0.328767
| 0.041096
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c08e9da0f8073946d9eb1f38656fc0912b347134
| 2,206
|
py
|
Python
|
instruments/swap.py
|
neoyung/IrLib
|
942793c49a477c9f5747410be74daf868391f289
|
[
"MIT"
] | 1
|
2021-10-04T03:15:50.000Z
|
2021-10-04T03:15:50.000Z
|
instruments/swap.py
|
neoyung/IrLib
|
942793c49a477c9f5747410be74daf868391f289
|
[
"MIT"
] | null | null | null |
instruments/swap.py
|
neoyung/IrLib
|
942793c49a477c9f5747410be74daf868391f289
|
[
"MIT"
] | null | null | null |
from irLib.instruments.instrument import instrument
from irLib.helpers.schedule import period
from irLib.instruments.legs import fixLeg, floatLeg
class swap(instrument):
def __init__(self, tradeDate, spotLag=period(0, 'day'), position='long', *legs):
super().__init__(tradeDate, spotLag, position)
self.legs = legs
def setPricingEngine(self, discountCurve):
self.discountCurve = discountCurve
self.pricingEngine = self.discountCurve
for leg in self.legs:
leg.setPricingEngine(discountCurve)
def calculateNPV(self, day):
super().calculateNPV()
NPV = 0
for leg in self.legs:
NPV += leg.calculateNPV(day)
return NPV * self.longShort
def isExpired(self, day):
return all([leg.isExpired(day) for leg in self.legs])
class vanillaSwap(swap):
def __init__(self, tradeDate, payer, fixSchedule, floatSchedule, floatingCurve, discountCurve=None, spotLag=period(0, 'day'), swapRate=None):
assert payer in ('payer', 'receiver'), 'payer or receiver?'
self.payer = payer
self.position = 'long' if self.payer == 'payer' else 'short'
self.floatLeg = floatLeg(
tradeDate, floatingCurve, floatSchedule, spotLag)
self.fixLeg = fixLeg(tradeDate, 1., fixSchedule, spotLag)
super().__init__(tradeDate, spotLag, self.position, self.fixLeg, self.floatLeg)
if swapRate is None:
assert discountCurve is not None, 'need discount curve to determine swap rate'
super().setPricingEngine(discountCurve)
self.dayCount = self.discountCurve.dayCount
self.tenor = self.dayCount.getYearFrac(min(self.floatLeg.schedule.startDate, self.fixLeg.schedule.startDate),
max(self.floatLeg.schedule.terminationDate, self.fixLeg.schedule.terminationDate))
self.swapRate = self.floatLeg.calculateNPV(
self.tradeDate) / self.fixLeg.calculateNPV(self.tradeDate)
else:
self.swapRate = swapRate
self.fixLeg.rate = self.swapRate
self.fixLeg.position = 'short'
self.fixLeg.longShort = -1
| 41.622642
| 145
| 0.655938
| 234
| 2,206
| 6.115385
| 0.273504
| 0.055905
| 0.016771
| 0.025157
| 0.033543
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002998
| 0.24388
| 2,206
| 52
| 146
| 42.423077
| 0.854916
| 0
| 0
| 0.047619
| 0
| 0
| 0.046238
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 1
| 0.119048
| false
| 0
| 0.071429
| 0.02381
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c08f4ab3e25ce0f369e7d00947095aeb1fb9b437
| 21,083
|
py
|
Python
|
skhubness/neighbors/lsh.py
|
VarIr/scikit-hubness
|
6eaeedda2c4b52bb7bf2553b3c5b04a076287ae3
|
[
"BSD-3-Clause"
] | 33
|
2019-08-05T12:29:19.000Z
|
2022-03-08T18:48:28.000Z
|
skhubness/neighbors/lsh.py
|
AndreasPhilippi/scikit-hubness
|
6eaeedda2c4b52bb7bf2553b3c5b04a076287ae3
|
[
"BSD-3-Clause"
] | 84
|
2019-07-12T09:05:42.000Z
|
2022-03-31T08:50:15.000Z
|
skhubness/neighbors/lsh.py
|
AndreasPhilippi/scikit-hubness
|
6eaeedda2c4b52bb7bf2553b3c5b04a076287ae3
|
[
"BSD-3-Clause"
] | 9
|
2019-09-26T11:03:04.000Z
|
2021-07-01T08:43:11.000Z
|
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: BSD-3-Clause
# PEP 563: Postponed Evaluation of Annotations
from __future__ import annotations
from functools import partial
import multiprocessing as mp
from typing import Tuple, Union
import warnings
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.metrics import euclidean_distances, pairwise_distances
from sklearn.metrics.pairwise import cosine_distances
from sklearn.utils.validation import check_is_fitted, check_array, check_X_y
try:
import puffinn
except ImportError:
puffinn = None # pragma: no cover
try:
import falconn
except ImportError:
falconn = None # pragma: no cover
from tqdm.auto import tqdm
from .approximate_neighbors import ApproximateNearestNeighbor
from ..utils.check import check_n_candidates
__all__ = ['FalconnLSH', 'PuffinnLSH', ]
class PuffinnLSH(BaseEstimator, ApproximateNearestNeighbor):
""" Wrap Puffinn LSH for scikit-learn compatibility.
Parameters
----------
n_candidates: int, default = 5
Number of neighbors to retrieve
metric: str, default = 'euclidean'
Distance metric, allowed are "angular", "jaccard".
Other metrics are partially supported, such as 'euclidean', 'sqeuclidean'.
In these cases, 'angular' distances are used to find the candidate set
of neighbors with LSH among all indexed objects, and (squared) Euclidean
distances are subsequently only computed for the candidates.
memory: int, default = None
Max memory usage. If None, determined heuristically.
recall: float, default = 0.90
Probability of finding the true nearest neighbors among the candidates
n_jobs: int, default = 1
Number of parallel jobs
verbose: int, default = 0
Verbosity level. If verbose > 0, show tqdm progress bar on indexing and querying.
Attributes
----------
valid_metrics:
List of valid distance metrics/measures
"""
valid_metrics = ["angular", "cosine", "euclidean", "sqeuclidean", "minkowski",
"jaccard",
]
metric_map = {'euclidean': 'angular',
'sqeuclidean': 'angular',
'minkowski': 'angular',
'cosine': 'angular',
}
def __init__(self, n_candidates: int = 5,
metric: str = 'euclidean',
memory: int = None,
recall: float = 0.9,
n_jobs: int = 1,
verbose: int = 0,
):
if puffinn is None: # pragma: no cover
raise ImportError(f'Please install the `puffinn` package, before using this class:\n'
f'$ git clone https://github.com/puffinn/puffinn.git\n'
f'$ cd puffinn\n'
f'$ python3 setup.py build\n'
f'$ pip install .\n') from None
super().__init__(n_candidates=n_candidates,
metric=metric,
n_jobs=n_jobs,
verbose=verbose,
)
self.memory = memory
self.recall = recall
def fit(self, X, y=None) -> PuffinnLSH:
""" Build the puffinn LSH index and insert data from X.
Parameters
----------
X: np.array
Data to be indexed
y: any
Ignored
Returns
-------
self: Puffinn
An instance of Puffinn with a built index
"""
if y is None:
X = check_array(X)
else:
X, y = check_X_y(X, y)
self.y_train_ = y
if self.metric not in self.valid_metrics:
warnings.warn(f'Invalid metric "{self.metric}". Using "euclidean" instead')
self.metric = 'euclidean'
try:
self._effective_metric = self.metric_map[self.metric]
except KeyError:
self._effective_metric = self.metric
# Larger memory means many iterations (time-recall trade-off)
memory = max(np.multiply(*X.shape) * 8 * 500, 1024**2)
if self.memory is not None:
memory = max(self.memory, memory)
# Construct the index
index = puffinn.Index(self._effective_metric,
X.shape[1],
memory,
)
disable_tqdm = False if self.verbose else True
for v in tqdm(X, desc='Indexing', disable=disable_tqdm):
index.insert(v.tolist())
index.rebuild()
self.index_ = index
self.n_indexed_ = X.shape[0]
self.X_indexed_norm_ = np.linalg.norm(X, ord=2, axis=1).reshape(-1, 1)
return self
def kneighbors(self, X=None, n_candidates=None, return_distance=True) -> Union[Tuple[np.array, np.array], np.array]:
""" Retrieve k nearest neighbors.
Parameters
----------
X: np.array or None, optional, default = None
Query objects. If None, search among the indexed objects.
n_candidates: int or None, optional, default = None
Number of neighbors to retrieve.
If None, use the value passed during construction.
return_distance: bool, default = True
If return_distance, will return distances and indices to neighbors.
Else, only return the indices.
"""
check_is_fitted(self, 'index_')
index = self.index_
if n_candidates is None:
n_candidates = self.n_candidates
n_candidates = check_n_candidates(n_candidates)
# For compatibility reasons, as each sample is considered as its own
# neighbor, one extra neighbor will be computed.
if X is None:
n_query = self.n_indexed_
X = np.array([index.get(i) for i in range(n_query)])
search_from_index = True
else:
X = check_array(X)
n_query = X.shape[0]
search_from_index = False
dtype = X.dtype
# If chosen metric is not among the natively supported ones, reorder the neighbors
reorder = True if self.metric not in ('angular', 'cosine', 'jaccard') else False
# If fewer candidates than required are found for a query,
# we save index=-1 and distance=NaN
neigh_ind = -np.ones((n_query, n_candidates),
dtype=np.int32)
if return_distance or reorder:
neigh_dist = np.empty_like(neigh_ind,
dtype=dtype) * np.nan
metric = 'cosine' if self.metric == 'angular' else self.metric
disable_tqdm = False if self.verbose else True
if search_from_index: # search indexed against indexed
for i in tqdm(range(n_query),
desc='Querying',
disable=disable_tqdm,
):
# Find the approximate nearest neighbors.
# Each of the true `n_candidates` nearest neighbors
# has at least `recall` chance of being found.
ind = index.search_from_index(i, n_candidates, self.recall, )
neigh_ind[i, :len(ind)] = ind
if return_distance or reorder:
X_neigh_denormalized = \
X[ind] * self.X_indexed_norm_[ind].reshape(len(ind), -1)
neigh_dist[i, :len(ind)] = pairwise_distances(X[i:i+1, :] * self.X_indexed_norm_[i],
X_neigh_denormalized,
metric=metric,
)
else: # search new query against indexed
for i, x in tqdm(enumerate(X),
desc='Querying',
disable=disable_tqdm,
):
# Find the approximate nearest neighbors.
# Each of the true `n_candidates` nearest neighbors
# has at least `recall` chance of being found.
ind = index.search(x.tolist(),
n_candidates,
self.recall,
)
neigh_ind[i, :len(ind)] = ind
if return_distance or reorder:
X_neigh_denormalized =\
np.array([index.get(i) for i in ind]) * self.X_indexed_norm_[ind].reshape(len(ind), -1)
neigh_dist[i, :len(ind)] = pairwise_distances(x.reshape(1, -1),
X_neigh_denormalized,
metric=metric,
)
if reorder:
sort = np.argsort(neigh_dist, axis=1)
neigh_dist = np.take_along_axis(neigh_dist, sort, axis=1)
neigh_ind = np.take_along_axis(neigh_ind, sort, axis=1)
if return_distance:
return neigh_dist, neigh_ind
else:
return neigh_ind
class FalconnLSH(ApproximateNearestNeighbor):
"""Wrapper for using falconn LSH
Falconn is an approximate nearest neighbor library,
that uses multiprobe locality-sensitive hashing.
Parameters
----------
n_candidates: int, default = 5
Number of neighbors to retrieve
radius: float or None, optional, default = None
Retrieve neighbors within this radius.
Can be negative: See Notes.
metric: str, default = 'euclidean'
Distance metric, allowed are "angular", "euclidean", "manhattan", "hamming", "dot"
num_probes: int, default = 50
The number of buckets the query algorithm probes.
The higher number of probes is, the better accuracy one gets,
but the slower queries are.
n_jobs: int, default = 1
Number of parallel jobs
verbose: int, default = 0
Verbosity level. If verbose > 0, show tqdm progress bar on indexing and querying.
Attributes
----------
valid_metrics:
List of valid distance metrics/measures
Notes
-----
From the falconn docs: radius can be negative, and for the distance function
'negative_inner_product' it actually makes sense.
"""
valid_metrics = ['euclidean', 'l2', 'minkowski', 'squared_euclidean', 'sqeuclidean',
'cosine', 'neg_inner', 'NegativeInnerProduct']
def __init__(self, n_candidates: int = 5, radius: float = 1., metric: str = 'euclidean', num_probes: int = 50,
n_jobs: int = 1, verbose: int = 0):
if falconn is None: # pragma: no cover
raise ImportError(f'Please install the `falconn` package, before using this class:\n'
f'$ pip install falconn') from None
super().__init__(n_candidates=n_candidates,
metric=metric,
n_jobs=n_jobs,
verbose=verbose,
)
self.num_probes = num_probes
self.radius = radius
def fit(self, X: np.ndarray, y: np.ndarray = None) -> FalconnLSH:
""" Setup the LSH index from training data.
Parameters
----------
X: np.array
Data to be indexed
y: any
Ignored
Returns
-------
self: FalconnLSH
An instance of LSH with a built index
"""
X = check_array(X, dtype=[np.float32, np.float64])
if self.metric in ['euclidean', 'l2', 'minkowski']:
self.metric = 'euclidean'
distance = falconn.DistanceFunction.EuclideanSquared
elif self.metric in ['squared_euclidean', 'sqeuclidean']:
self.metric = 'sqeuclidean'
distance = falconn.DistanceFunction.EuclideanSquared
elif self.metric in ['cosine', 'NegativeInnerProduct', 'neg_inner']:
self.metric = 'cosine'
distance = falconn.DistanceFunction.NegativeInnerProduct
else:
warnings.warn(f'Invalid metric "{self.metric}". Using "euclidean" instead')
self.metric = 'euclidean'
distance = falconn.DistanceFunction.EuclideanSquared
# Set up the LSH index
lsh_construction_params = falconn.get_default_parameters(*X.shape,
distance=distance)
lsh_index = falconn.LSHIndex(lsh_construction_params)
lsh_index.setup(X)
self.X_train_ = X
self.y_train_ = y
self.index_ = lsh_index
return self
def kneighbors(self, X: np.ndarray = None,
n_candidates: int = None,
return_distance: bool = True) -> Union[Tuple[np.array, np.array], np.array]:
""" Retrieve k nearest neighbors.
Parameters
----------
X: np.array or None, optional, default = None
Query objects. If None, search among the indexed objects.
n_candidates: int or None, optional, default = None
Number of neighbors to retrieve.
If None, use the value passed during construction.
return_distance: bool, default = True
If return_distance, will return distances and indices to neighbors.
Else, only return the indices.
"""
check_is_fitted(self, ["index_", 'X_train_'])
# Check the n_neighbors parameter
if n_candidates is None:
n_candidates = self.n_candidates
elif n_candidates <= 0:
raise ValueError(f"Expected n_neighbors > 0. Got {n_candidates:d}")
else:
if not np.issubdtype(type(n_candidates), np.integer):
raise TypeError(f"n_neighbors does not take {type(n_candidates)} value, enter integer value")
if X is not None:
X = check_array(X, dtype=self.X_train_.dtype)
query_is_train = False
X = check_array(X, accept_sparse='csr')
n_retrieve = n_candidates
else:
query_is_train = True
X = self.X_train_
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_retrieve = n_candidates + 1
# Configure the LSH query objects (one per parallel worker)
query = self.index_.construct_query_pool(num_probes=self.num_probes,
num_query_objects=self.n_jobs)
if return_distance:
if self.metric == 'euclidean':
distances = partial(euclidean_distances, squared=False)
elif self.metric == 'sqeuclidean':
distances = partial(euclidean_distances, squared=True)
elif self.metric == 'cosine':
distances = cosine_distances
else:
raise ValueError(f'Internal error: unrecognized metric "{self.metric}"')
# Allocate memory for neighbor indices (and distances)
n_objects = X.shape[0]
neigh_ind = np.empty((n_objects, n_candidates), dtype=np.int32)
if return_distance:
neigh_dist = np.empty_like(neigh_ind, dtype=X.dtype)
# If verbose, show progress bar on the search loop
disable_tqdm = False if self.verbose else True
if self.n_jobs > 1:
def pquery(ix):
i, x = ix
return i, np.array(query.find_k_nearest_neighbors(x, k=n_retrieve))
with mp.pool.ThreadPool(processes=self.n_jobs) as pool:
i_knn = list(tqdm(pool.imap_unordered(func=pquery,
iterable=enumerate(X),
chunksize=10),
disable=False if self.verbose else True,
total=X.shape[0],
unit='vectors',
desc='LSH query'))
for i, knn in tqdm(i_knn, desc='Collecting results', disable=disable_tqdm):
if query_is_train:
knn = knn[1:]
neigh_ind[i, :knn.size] = knn
if return_distance:
neigh_dist[i, :knn.size] = distances(X[i].reshape(1, -1), self.X_train_[knn])
# LSH may yield fewer neighbors than n_neighbors.
# We set distances to NaN, and indices to -1
if knn.size < n_candidates:
neigh_ind[i, knn.size:] = -1
if return_distance:
neigh_dist[i, knn.size:] = np.nan
else:
for i, x in tqdm(enumerate(X),
desc='LSH',
disable=disable_tqdm,
):
knn = np.array(query.find_k_nearest_neighbors(x, k=n_retrieve))
if query_is_train:
knn = knn[1:]
neigh_ind[i, :knn.size] = knn
if return_distance:
neigh_dist[i, :knn.size] = distances(x.reshape(1, -1), self.X_train_[knn])
# LSH may yield fewer neighbors than n_neighbors.
# We set distances to NaN, and indices to -1
if knn.size < n_candidates:
neigh_ind[i, knn.size:] = -1
if return_distance:
neigh_dist[i, knn.size:] = np.nan
if return_distance:
return neigh_dist, neigh_ind
else:
return neigh_ind
def radius_neighbors(self, X: np.ndarray = None,
radius: float = None,
return_distance: bool = True) -> Union[Tuple[np.array, np.array], np.array]:
""" Retrieve neighbors within a certain radius.
Parameters
----------
X: np.array or None, optional, default = None
Query objects. If None, search among the indexed objects.
radius: float or None, optional, default = None
Retrieve neighbors within this radius.
Can be negative: See Notes.
return_distance: bool, default = True
If return_distance, will return distances and indices to neighbors.
Else, only return the indices.
Notes
-----
From the falconn docs: radius can be negative, and for the distance function
'negative_inner_product' it actually makes sense.
"""
check_is_fitted(self, ["index_", 'X_train_'])
# Constructing a query object
query = self.index_.construct_query_object()
query.set_num_probes(self.num_probes)
if return_distance:
if self.metric == 'euclidean':
distances = partial(euclidean_distances, squared=False)
elif self.metric == 'sqeuclidean':
distances = partial(euclidean_distances, squared=True)
elif self.metric == 'cosine':
distances = cosine_distances
else:
raise ValueError(f'Internal error: unrecognized metric "{self.metric}"')
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr', dtype=self.X_train_.dtype)
else:
query_is_train = True
X = self.X_train_
if radius is None:
radius = self.radius
# LSH uses squared Euclidean internally
if self.metric == 'euclidean':
radius *= radius
# Add a small number to imitate <= threshold
radius += 1e-7
# Allocate memory for neighbor indices (and distances)
n_objects = X.shape[0]
neigh_ind = np.empty(n_objects, dtype='object')
if return_distance:
neigh_dist = np.empty_like(neigh_ind)
# If verbose, show progress bar on the search loop
disable_tqdm = False if self.verbose else True
for i, x in tqdm(enumerate(X),
desc='LSH',
disable=disable_tqdm,
):
knn = np.array(query.find_near_neighbors(x, threshold=radius))
if len(knn) == 0:
knn = np.array([], dtype=int)
else:
if query_is_train:
knn = knn[1:]
neigh_ind[i] = knn
if return_distance:
if len(knn):
neigh_dist[i] = distances(x.reshape(1, -1), self.X_train_[knn]).ravel()
else:
neigh_dist[i] = np.array([])
if return_distance:
return neigh_dist, neigh_ind
else:
return neigh_ind
| 39.481273
| 120
| 0.54774
| 2,343
| 21,083
| 4.784464
| 0.160905
| 0.035326
| 0.025691
| 0.013113
| 0.572792
| 0.532649
| 0.525335
| 0.506601
| 0.476717
| 0.451115
| 0
| 0.006763
| 0.368828
| 21,083
| 533
| 121
| 39.555347
| 0.83565
| 0.258834
| 0
| 0.449511
| 0
| 0
| 0.077839
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026059
| false
| 0
| 0.061889
| 0
| 0.13355
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c09039628dfca0497559485ef917b2eee5612ab1
| 11,859
|
py
|
Python
|
virtualreality/calibration/manual_color_mask_calibration.py
|
sahasam/hobo_vr
|
0cf5824c91719055156ec23cf8dda2d921be948a
|
[
"MIT"
] | null | null | null |
virtualreality/calibration/manual_color_mask_calibration.py
|
sahasam/hobo_vr
|
0cf5824c91719055156ec23cf8dda2d921be948a
|
[
"MIT"
] | null | null | null |
virtualreality/calibration/manual_color_mask_calibration.py
|
sahasam/hobo_vr
|
0cf5824c91719055156ec23cf8dda2d921be948a
|
[
"MIT"
] | null | null | null |
"""
pyvr calibrate.
Usage:
pyvr calibrate [options]
Options:
-h, --help
-c, --camera <camera> Source of the camera to use for calibration [default: 0]
-r, --resolution <res> Input resolution in width and height [default: -1x-1]
-n, --n_masks <n_masks> Number of masks to calibrate [default: 1]
-l, --load_from_file <file> Load previous calibration settings [default: ranges.pickle]
-s, --save <file> Save calibration settings to a file [default: ranges.pickle]
"""
import logging
import pickle
import sys
from copy import copy
from pathlib import Path
from typing import Optional, List
import cv2
from docopt import docopt
from virtualreality import __version__
class ColorRange(object):
def __init__(self,
color_num,
hue_center=0,
hue_range=180,
sat_center=0,
sat_range=180,
val_center=0,
val_range=180
):
self.color_num = color_num
self.hue_center = hue_center
self.hue_range = hue_range
self.sat_center = sat_center
self.sat_range = sat_range
self.val_center = val_center
self.val_range = val_range
class CalibrationData(object):
def __init__(self, width=1, height=1, auto_exposure=0.25, exposure=0, saturation=50, num_colors=4):
self.width = width
self.height = height
self.exposure = exposure
self.saturation = saturation
self.num_colors = num_colors
self.color_ranges: List[ColorRange] = []
color_dist = 180 // num_colors
for color in range(num_colors):
self.color_ranges.append(ColorRange(color, *[color * color_dist, color_dist] * 3))
@classmethod
def load_from_file(cls, load_file: str = str(Path(__file__).parent) + "ranges.pickle") -> Optional[
'CalibrationData']:
"""Load the calibration data from a file."""
try:
with open(load_file, "rb") as file:
ranges = pickle.load(file)
return ranges
except FileNotFoundError as fe:
logging.warning(f"Could not load calibration file '{load_file}'.")
def save_to_file(self, save_file: str = str(Path(__file__).parent) + "ranges.pickle") -> None:
with open(save_file, "wb") as file:
pickle.dump(self, file)
def colordata_to_blob(colordata, mapdata):
'''
translates CalibrationData object to BlobTracker format masks
:colordata: CalibrationData object
:mapdata: a map dict with key representing the mask name and value representing the mask number
'''
out = {}
for key, clr_range_index in mapdata.items():
temp = colordata.color_ranges[clr_range_index]
out[key] = {
'h':(temp.hue_center, temp.hue_range),
's':(temp.sat_center, temp.sat_range),
'v':(temp.val_center, temp.val_range),
}
return out
def load_mapdata_from_file(path):
'''
loads mapdata from file, for use in colordata_to_blob
'''
with open(path, 'rb') as file:
return pickle.load(file)
def save_mapdata_to_file(path, mapdata):
'''
save mapdata to file, for use in colordata_to_blob
'''
with open(path, "wb") as file:
pickle.dump(mapdata, file)
def list_supported_capture_properties(cap: cv2.VideoCapture):
"""List the properties supported by the capture device."""
# thanks: https://stackoverflow.com/q/47935846/782170
supported = list()
for attr in dir(cv2):
if attr.startswith("CAP_PROP") and cap.get(getattr(cv2, attr)) != -1:
supported.append(attr)
return supported
def get_color_mask(hsv, color_range: ColorRange):
color_low = [
color_range.hue_center - color_range.hue_range,
color_range.sat_center - color_range.sat_range,
color_range.val_center - color_range.val_range,
]
color_high = [
color_range.hue_center + color_range.hue_range,
color_range.sat_center + color_range.sat_range,
color_range.val_center + color_range.val_range,
]
color_low_neg = copy(color_low)
color_high_neg = copy(color_high)
for c in range(3):
if c==0:
c_max = 180
else:
c_max = 255
if color_low_neg[c] < 0:
color_low_neg[c] = c_max + color_low_neg[c]
color_high_neg[c] = c_max
color_low[c] = 0
elif color_high_neg[c] > c_max:
color_low_neg[c] = 0
color_high_neg[c] = color_high_neg[c] - c_max
color_high[c] = c_max
mask1 = cv2.inRange(hsv, tuple(color_low), tuple(color_high))
mask2 = cv2.inRange(hsv, tuple(color_low_neg), tuple(color_high_neg))
mask = cv2.bitwise_or(mask1, mask2)
return mask
def _set_default_camera_properties(vs, cam, vs_supported, frame_width, frame_height):
if "CAP_PROP_FOURCC" not in vs_supported:
logging.warning(f"Camera {cam} does not support setting video codec.")
else:
vs.set(cv2.CAP_PROP_FOURCC, cv2.CAP_OPENCV_MJPEG)
if "CAP_PROP_AUTO_EXPOSURE" not in vs_supported:
logging.warning(f"Camera {cam} does not support turning on/off auto exposure.")
else:
vs.set(cv2.CAP_PROP_AUTO_EXPOSURE, 0.25)
if "CAP_PROP_EXPOSURE" not in vs_supported:
logging.warning(f"Camera {cam} does not support directly setting exposure.")
else:
vs.set(cv2.CAP_PROP_EXPOSURE, -7)
if "CAP_PROP_EXPOSURE" not in vs_supported:
logging.warning(f"Camera {cam} does not support directly setting exposure.")
else:
vs.set(cv2.CAP_PROP_EXPOSURE, -7)
if "CAP_PROP_FRAME_HEIGHT" not in vs_supported:
logging.warning(f"Camera {cam} does not support requesting frame height.")
else:
vs.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_height)
if "CAP_PROP_FRAME_WIDTH" not in vs_supported:
logging.warning(f"Camera {cam} does not support requesting frame width.")
else:
vs.set(cv2.CAP_PROP_FRAME_WIDTH, frame_width)
def manual_calibration(
cam=0, num_colors_to_track=4, frame_width=-1, frame_height=-1, load_file="", save_file="ranges.pickle"
):
"""Manually calibrate the hsv ranges and camera settings used for blob tracking."""
vs = cv2.VideoCapture(cam)
vs.set(cv2.CAP_PROP_EXPOSURE, -7)
vs_supported = list_supported_capture_properties(vs)
_set_default_camera_properties(vs, cam, vs_supported, frame_width, frame_height)
cam_window = f"camera {cam} input"
cv2.namedWindow(cam_window)
if "CAP_PROP_EXPOSURE" in vs_supported:
cv2.createTrackbar(
"exposure", cam_window, 0, 16, lambda x: vs.set(cv2.CAP_PROP_EXPOSURE, x - 8),
)
if "CAP_PROP_SATURATION" in vs_supported:
cv2.createTrackbar(
"saturation", cam_window, 0, 100, lambda x: vs.set(cv2.CAP_PROP_SATURATION, x),
)
else:
logging.warning(f"Camera {cam} does not support setting saturation.")
ranges = None
if load_file:
ranges = CalibrationData.load_from_file(load_file)
if ranges is None:
ranges = CalibrationData(width=frame_width, height=frame_height, num_colors=num_colors_to_track)
tracker_window_names = []
for color in range(num_colors_to_track):
tracker_window_names.append(f"color {color}")
cv2.namedWindow(tracker_window_names[color])
cv2.createTrackbar(
"hue center", tracker_window_names[color], ranges.color_ranges[color].hue_center, 180, lambda _: None,
)
cv2.createTrackbar(
"hue range", tracker_window_names[color], ranges.color_ranges[color].hue_range, 180, lambda _: None,
)
cv2.createTrackbar(
"sat center", tracker_window_names[color], ranges.color_ranges[color].sat_center, 255, lambda _: None,
)
cv2.createTrackbar(
"sat range", tracker_window_names[color], ranges.color_ranges[color].sat_range, 255, lambda _: None,
)
cv2.createTrackbar(
"val center", tracker_window_names[color], ranges.color_ranges[color].val_center, 255, lambda _: None,
)
cv2.createTrackbar(
"val range", tracker_window_names[color], ranges.color_ranges[color].val_range, 255, lambda _: None,
)
while 1:
ret, frame = vs.read()
if frame is None:
break
blurred = cv2.GaussianBlur(frame, (3, 3), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
exposure = cv2.getTrackbarPos("exposure", cam_window)
saturation = cv2.getTrackbarPos("saturation", cam_window)
ranges.exposure = exposure - 8
ranges.saturation = saturation
for color in range(num_colors_to_track):
hue_center = cv2.getTrackbarPos("hue center", tracker_window_names[color])
hue_range = cv2.getTrackbarPos("hue range", tracker_window_names[color])
sat_center = cv2.getTrackbarPos("sat center", tracker_window_names[color])
sat_range = cv2.getTrackbarPos("sat range", tracker_window_names[color])
val_center = cv2.getTrackbarPos("val center", tracker_window_names[color])
val_range = cv2.getTrackbarPos("val range", tracker_window_names[color])
ranges.color_ranges[color].hue_center = hue_center
ranges.color_ranges[color].hue_range = hue_range
ranges.color_ranges[color].sat_center = sat_center
ranges.color_ranges[color].sat_range = sat_range
ranges.color_ranges[color].val_center = val_center
ranges.color_ranges[color].val_range = val_range
mask = get_color_mask(hsv, ranges.color_ranges[color])
res = cv2.bitwise_and(hsv, hsv, mask=mask)
cv2.imshow(tracker_window_names[color], res)
cv2.imshow(cam_window, frame)
k = cv2.waitKey(1) & 0xFF
if k in [ord("q"), 27]:
break
for color in range(num_colors_to_track):
hue_center = cv2.getTrackbarPos("hue center", tracker_window_names[color])
hue_range = cv2.getTrackbarPos("hue range", tracker_window_names[color])
sat_center = cv2.getTrackbarPos("sat center", tracker_window_names[color])
sat_range = cv2.getTrackbarPos("sat range", tracker_window_names[color])
val_center = cv2.getTrackbarPos("val center", tracker_window_names[color])
val_range = cv2.getTrackbarPos("val range", tracker_window_names[color])
print(f"hue_center[{color}]: {hue_center}")
print(f"hue_range[{color}]: {hue_range}")
print(f"sat_center[{color}]: {sat_center}")
print(f"sat_range[{color}]: {sat_range}")
print(f"val_center[{color}]: {val_center}")
print(f"val_range[{color}]: {val_range}")
if save_file:
ranges.save_to_file(save_file)
print(f'ranges saved to list in "{save_file}".')
print("You can use this in the pyvr tracker using the --calibration-file argument.")
vs.release()
cv2.destroyAllWindows()
def main():
"""Calibrate entry point."""
# allow calling from both python -m and from pyvr:
argv = sys.argv[1:]
if len(argv) < 2 or sys.argv[1] != "calibrate":
argv = ["calibrate"] + argv
args = docopt(__doc__, version=f"pyvr version {__version__}", argv=argv)
width, height = args["--resolution"].split("x")
if args["--camera"].isdigit():
cam = int(args["--camera"])
else:
cam = args["--camera"]
manual_calibration(
cam=cam,
num_colors_to_track=int(args["--n_masks"]),
frame_width=int(width),
frame_height=int(height),
load_file=args["--load_from_file"],
save_file=args["--save"],
)
| 35.827795
| 114
| 0.646935
| 1,559
| 11,859
| 4.667094
| 0.146889
| 0.039307
| 0.054426
| 0.063222
| 0.434717
| 0.400082
| 0.343595
| 0.316795
| 0.296042
| 0.258384
| 0
| 0.017665
| 0.245805
| 11,859
| 330
| 115
| 35.936364
| 0.795841
| 0.093094
| 0
| 0.179916
| 0
| 0
| 0.121814
| 0.004029
| 0
| 0
| 0.000375
| 0
| 0
| 1
| 0.050209
| false
| 0
| 0.037657
| 0
| 0.117155
| 0.033473
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c09416ca42570e30634d8a60a3175bf1c430d092
| 1,894
|
py
|
Python
|
database.py
|
tzoch/dropbox-bot
|
2bf36e2d4146bf8c00169362f9767ed059643787
|
[
"MIT"
] | 3
|
2016-03-08T04:43:22.000Z
|
2020-08-25T20:07:28.000Z
|
database.py
|
tzoch/dropbox-bot
|
2bf36e2d4146bf8c00169362f9767ed059643787
|
[
"MIT"
] | null | null | null |
database.py
|
tzoch/dropbox-bot
|
2bf36e2d4146bf8c00169362f9767ed059643787
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python
'''
Class to handle database connections and queries for
Dropbox Mirror Bot
'''
import sqlite3
class Database(object):
def __init__(self, database=":memory:"):
self._database = database
c = self.cursor()
query = '''CREATE TABLE IF NOT EXISTS dropbox_submissions (
processed_id INTEGER PRIMARY KEY ASC,
submission_id VARCHAR(10) UNIQUE)'''
c.execute(query)
self.conn.commit()
query = '''CREATE TABLE IF NOT EXISTS dropbox_images (
id INTEGER PRIMARY KEY ASC,
imgur_id VARCHAR(10),
deletehash VARCHAR(40))'''
c.execute(query)
self.conn.commit()
@property
def conn(self):
if not hasattr(self, '_connection'):
self._connection = sqlite3.connect(self._database)
return self._connection
def cursor(self):
return self.conn.cursor()
def close(self):
self.conn.close()
def is_processed(self, submission_id):
'''Return true if the submission has already been processed'''
c = self.cursor()
query = '''SELECT submission_id FROM dropbox_submissions
WHERE submission_id = (?)'''
c.execute(query, (submission_id,))
if c.fetchone():
return True
return False
def mark_as_processed(self, submission_id):
c = self.cursor()
query = '''INSERT INTO dropbox_submissions (submission_id)
VALUES (?)'''
c.execute(query , (submission_id,))
self.conn.commit()
def log_image(self, img_id, img_deletehash):
c = self.cursor()
query = '''INSERT INTO dropbox_images (imgur_id, deletehash)
VALUES (?, ?)'''
c.execute(query, (img_id, img_deletehash))
self.conn.commit()
| 30.063492
| 70
| 0.577614
| 208
| 1,894
| 5.100962
| 0.346154
| 0.090481
| 0.061263
| 0.06032
| 0.265787
| 0.177191
| 0.126296
| 0
| 0
| 0
| 0
| 0.006168
| 0.315206
| 1,894
| 62
| 71
| 30.548387
| 0.811874
| 0.077614
| 0
| 0.266667
| 0
| 0
| 0.347174
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.155556
| false
| 0
| 0.022222
| 0.022222
| 0.288889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c095ea2cd17b98f861280b8dd90e12ab34027235
| 513
|
py
|
Python
|
solutions/unitReview/gcd.py
|
mrparkonline/python3_while
|
3b24be84d16230e2b923276dca4c943f4c5ad26d
|
[
"MIT"
] | null | null | null |
solutions/unitReview/gcd.py
|
mrparkonline/python3_while
|
3b24be84d16230e2b923276dca4c943f4c5ad26d
|
[
"MIT"
] | null | null | null |
solutions/unitReview/gcd.py
|
mrparkonline/python3_while
|
3b24be84d16230e2b923276dca4c943f4c5ad26d
|
[
"MIT"
] | null | null | null |
# GCD Program
from math import gcd
# input
num1 = int(input('Enter a number: '))
num2 = int(input('Enter another number: '))
# processing & output
divisor = 1
upper_limit = min(num1, num2)
gcd_answer = 0
#print(num1, 'and', num2, 'share these factors:')
print('GCD of', num1, 'and', num2, 'is:')
while divisor <= upper_limit:
if num1 % divisor == 0 and num2 % divisor == 0:
gcd_answer = divisor
divisor += 1
# end of while loop
print(gcd_answer)
print('Math Module GCD:', gcd(num1,num2))
| 22.304348
| 51
| 0.651072
| 76
| 513
| 4.328947
| 0.447368
| 0.082067
| 0.079027
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041565
| 0.202729
| 513
| 23
| 52
| 22.304348
| 0.762836
| 0.20078
| 0
| 0
| 0
| 0
| 0.162963
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0.230769
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c09740c69f29292cd8143f6167d141bb98d730a6
| 728
|
py
|
Python
|
notification/views.py
|
ChristopherOloo/KilimoQAPortal
|
c905a42282bbce70b5477862185ad332185307ce
|
[
"MIT"
] | 67
|
2022-01-05T18:59:23.000Z
|
2022-03-18T13:13:39.000Z
|
notification/views.py
|
ChristopherOloo/KilimoQAPortal
|
c905a42282bbce70b5477862185ad332185307ce
|
[
"MIT"
] | 3
|
2022-01-10T10:03:23.000Z
|
2022-03-11T16:58:38.000Z
|
notification/views.py
|
ChristopherOloo/KilimoQAPortal
|
c905a42282bbce70b5477862185ad332185307ce
|
[
"MIT"
] | 4
|
2022-01-08T17:39:19.000Z
|
2022-02-28T07:40:16.000Z
|
from django.shortcuts import render
from .models import PrivRepNotification,Notification
from django.http import JsonResponse, HttpResponseRedirect, HttpResponse
def read_All_Notifications(request):
notifics = Notification.objects.filter(noti_receiver=request.user).order_by('-date_created')
for objs in notifics:
objs.is_read = True
objs.save()
# return HttpResponse(status=204)
return JsonResponse({'action': 'readedAll'})
def read_All_Priv_Notifications(request):
notifications = PrivRepNotification.objects.filter(for_user=request.user)
for obj in notifications:
obj.is_read = True
obj.save()
return JsonResponse({'action':'readedAllPrivNotifications'})
| 26.962963
| 96
| 0.747253
| 80
| 728
| 6.6625
| 0.5125
| 0.037523
| 0.037523
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004918
| 0.162088
| 728
| 26
| 97
| 28
| 0.868852
| 0.042582
| 0
| 0
| 0
| 0
| 0.086455
| 0.037464
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.2
| 0
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c09b58cc8746669f100104bd829d92eb5454df67
| 1,548
|
py
|
Python
|
fb2_get_list.py
|
kawaiigamer/python-tools
|
68fd75299657811fef36339732c80539ccad386e
|
[
"Unlicense"
] | null | null | null |
fb2_get_list.py
|
kawaiigamer/python-tools
|
68fd75299657811fef36339732c80539ccad386e
|
[
"Unlicense"
] | null | null | null |
fb2_get_list.py
|
kawaiigamer/python-tools
|
68fd75299657811fef36339732c80539ccad386e
|
[
"Unlicense"
] | null | null | null |
import os
import glob
import codecs
from typing import List
def dirs(root_dit: str) -> List[str]:
return next(os.walk(root_dit))[1]
def select_directory_from_list(directories: List[str]) -> str:
for i in range(0, len(directories)):
print("(%d) %s" % (i, directories[i]))
while True:
try:
return directories[int(input('Directory to check(number)_->'))]
except Exception as e:
print("Wrong input: %s" % e)
continue
def text_between(_str: str, begin: str, end: str) -> str:
start = _str.find(begin)
stop = _str.find(end)
if start != -1 and stop != -1:
return _str[start+len(begin):stop]
else:
return ""
def f2b_print_data_list():
checking_directory = select_directory_from_list(dirs('.'))
f2b_files = glob.glob("%s/*.fb2" % checking_directory)
counter = 0
for f2b_file in f2b_files:
try:
text = codecs.open(f2b_file, 'r', encoding='utf8').read()
counter += 1
print("%d. %s - %s %s %s" %
(counter,
text_between(text, "<book-title>", "</book-title>"),
text_between(text, "<first-name>", "</first-name>"),
text_between(text, "<middle-name>", "</middle-name>"),
text_between(text, "<last-name>", "</last-name>")
))
except Exception as e:
print("Exception while parsing %s: %s" % (f2b_file, e))
if __name__ == "__main__":
f2b_print_data_list()
| 29.207547
| 75
| 0.554264
| 194
| 1,548
| 4.221649
| 0.371134
| 0.067155
| 0.07326
| 0.056166
| 0.056166
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013699
| 0.292636
| 1,548
| 52
| 76
| 29.769231
| 0.734247
| 0
| 0
| 0.097561
| 0
| 0
| 0.142119
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097561
| false
| 0
| 0.097561
| 0.02439
| 0.292683
| 0.146341
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c09c7f0c8e41ed1996a2664259286c39cad5f12c
| 2,403
|
py
|
Python
|
simplecaptcha/fields.py
|
Kromey/django-simplecaptcha
|
ad462f8742be19b1e87103f097853d41e21d0e0a
|
[
"MIT"
] | 5
|
2015-11-12T06:31:08.000Z
|
2017-03-09T06:45:46.000Z
|
simplecaptcha/fields.py
|
Kromey/django-simplecaptcha
|
ad462f8742be19b1e87103f097853d41e21d0e0a
|
[
"MIT"
] | null | null | null |
simplecaptcha/fields.py
|
Kromey/django-simplecaptcha
|
ad462f8742be19b1e87103f097853d41e21d0e0a
|
[
"MIT"
] | null | null | null |
import time
from django import forms
from django.core.exceptions import ValidationError
from .widgets import CaptchaWidget
from .settings import DURATION
class CaptchaField(forms.MultiValueField):
"""A field that contains and validates a simple catcha question
WARNING: If you use this field directly in your own forms, you may be
caught by surprise by the fact that Django forms rely upon class object
rather than instance objects for its fields. This means that your captcha
will not be updated when you instantiate a new form, and you'll end up
asking your users the same question over and over -- largely defeating the
purpose of a captcha! To solve this, either use the @decorator instead, or
be sure to call upon the widget to update its captcha question.
"""
widget = CaptchaWidget
def __init__(self, *args, **kwargs):
"""Sets up the MultiValueField"""
fields = (
forms.CharField(),
forms.CharField(),
forms.CharField(),
)
super().__init__(fields, *args, **kwargs)
def compress(self, data_list):
"""Validates the captcha answer and returns the result
If no data is provided, this method will simply return None. Otherwise,
it will validate that the provided answer and timestamp hash to the
supplied hash value, and that the timestamp is within the configured
time that captchas are considered valid.
"""
if data_list:
# Calculate the hash of the supplied values
hashed = self.widget.hash_answer(answer=data_list[0], timestamp=data_list[1])
# Current time
timestamp = time.time()
if float(data_list[1]) < timestamp - DURATION:
raise ValidationError("Captcha expired, please try again", code='invalid')
elif hashed != data_list[2]:
raise ValidationError("Incorrect answer", code='invalid')
# Return the supplied answer
return data_list[0]
else:
return None
@property
def label(self):
"""The captcha field's label is the captcha question itself"""
return self.widget._question
@label.setter
def label(self, value):
"""The question is generated by the widget and cannot be externally set"""
pass
| 35.338235
| 90
| 0.650853
| 305
| 2,403
| 5.072131
| 0.465574
| 0.036199
| 0.024564
| 0.036199
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002904
| 0.283396
| 2,403
| 67
| 91
| 35.865672
| 0.89547
| 0.458593
| 0
| 0.096774
| 0
| 0
| 0.052588
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.129032
| false
| 0.032258
| 0.16129
| 0
| 0.451613
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c09e72d5be2ef0cef0c360e31efc8610a74ed555
| 4,940
|
py
|
Python
|
skills_taxonomy_v2/analysis/sentence_classifier/notebooks/Skills Classifier 1.0 - Doccano Baseline Classifier.py
|
india-kerle/skills-taxonomy-v2
|
a71366dfea3c35580dbafddba9470f83795805ae
|
[
"MIT"
] | 3
|
2021-11-21T17:21:12.000Z
|
2021-12-10T21:19:57.000Z
|
skills_taxonomy_v2/analysis/sentence_classifier/notebooks/Skills Classifier 1.0 - Doccano Baseline Classifier.py
|
india-kerle/skills-taxonomy-v2
|
a71366dfea3c35580dbafddba9470f83795805ae
|
[
"MIT"
] | 16
|
2021-10-06T11:20:35.000Z
|
2022-02-02T11:44:28.000Z
|
skills_taxonomy_v2/analysis/sentence_classifier/notebooks/Skills Classifier 1.0 - Doccano Baseline Classifier.py
|
india-kerle/skills-taxonomy-v2
|
a71366dfea3c35580dbafddba9470f83795805ae
|
[
"MIT"
] | 1
|
2021-10-04T12:27:20.000Z
|
2021-10-04T12:27:20.000Z
|
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: -all
# comment_magics: true
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Existing skill tags data
# 1. Look at data
# 2. Build a simple baseline classifier
#
# Karlis tagged 50 jobs with where the skills were mentioned. Can we train something to identify sentences as about skills or not?
#
# Would be helpful for taking out the junk.
# +
from sklearn.linear_model import LogisticRegression
import json
import random
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
from sklearn.metrics import (
accuracy_score,
classification_report,
f1_score,
precision_score,
recall_score,
)
# -
# ### Import data
with open(
"../../../../inputs/karlis_ojo_manually_labelled/OJO_test_labelling_April2021_jobs.jsonl",
"r",
) as file:
jobs_data = [json.loads(line) for line in file]
jobs_data[0].keys()
with open(
"../../../../inputs/karlis_ojo_manually_labelled/OJO_test_labelling_April2021_labels.json",
"r",
) as file:
labels_data = json.load(file)
label_type_dict = {label_type["id"]: label_type["text"] for label_type in labels_data}
label_type_dict
# ### Restructuring to have a look
# +
all_job_tags_text = {}
for job_id, job_info in enumerate(jobs_data):
text = job_info["text"]
annotations = job_info["annotations"]
job_tags_text = {}
for label_number, label_type in label_type_dict.items():
job_tags_text[label_type] = [
text[label["start_offset"] : label["end_offset"]]
for label in annotations
if label["label"] == label_number
]
all_job_tags_text[job_id] = job_tags_text
# -
job_id = 1
print(jobs_data[job_id]["text"])
print("\n")
print(all_job_tags_text[job_id]["SKILL"])
print(all_job_tags_text[job_id]["SKILL-RELATED"])
# ## Create a basic classifier
# Label sentences with containing skills (1) or not (0)
#
# Method assumes sentences are split by full stop and will run into problems if the skill has a full stop in.
def label_sentences(job_id):
annotations = jobs_data[job_id]["annotations"]
skill_spans = [
(label["start_offset"], label["end_offset"])
for label in annotations
if label["label"] in [1, 5]
]
sentences = jobs_data[job_id]["text"].split(".")
# Indices of where sentences start and end
sentences_ix = []
for i, sentence in enumerate(sentences):
if i == 0:
start = 0
else:
start = sentences_ix[i - 1][1] + 1
sentences_ix.append((start, start + len(sentence)))
# Find which sentences contain skills
sentences_label = [0] * len(sentences)
for (skill_start, skill_end) in skill_spans:
for i, (sent_s, sent_e) in enumerate(sentences_ix):
if sent_s <= skill_start and sent_e >= skill_end:
sentences_label[i] = 1
return sentences, sentences_label
# Testing
job_id = 2
sentences, sentences_label = label_sentences(job_id)
print(all_job_tags_text[job_id]["SKILL"])
print(all_job_tags_text[job_id]["SKILL-RELATED"])
print([sentences[i] for i, label in enumerate(sentences_label) if label == 1])
print([sentences[i] for i, label in enumerate(sentences_label) if label == 0])
# Create training dataset
X = []
y = []
for job_id in range(len(jobs_data)):
sentences, sentences_label = label_sentences(job_id)
for sentence, sentence_label in zip(sentences, sentences_label):
X.append(sentence)
y.append(sentence_label)
# +
# Random shuffle data points
shuffle_index = list(range(len(X)))
random.Random(42).shuffle(shuffle_index)
X = [X[i] for i in shuffle_index]
y = [y[i] for i in shuffle_index]
# Split test/train set
train_split = 0.75
len_train = round(len(X) * train_split)
X_train = X[0:len_train]
y_train = y[0:len_train]
X_test = X[len_train:]
y_test = y[len_train:]
# -
print(len(X))
print(len(y_train))
print(len(y_test))
vectorizer = CountVectorizer(
analyzer="word",
token_pattern=r"(?u)\b\w+\b",
ngram_range=(1, 2),
stop_words="english",
)
X_train_vect = vectorizer.fit_transform(X_train)
model = MultinomialNB()
model = model.fit(X_train_vect, y_train)
X_test_vect = vectorizer.transform(X_test)
y_test_pred = model.predict(X_test_vect)
print(classification_report(y_test, y_test_pred))
# +
# LogisticRegression
model = LogisticRegression(max_iter=1000, class_weight="balanced")
model = model.fit(X_train_vect, y_train)
X_test_vect = vectorizer.transform(X_test)
y_test_pred = model.predict(X_test_vect)
print(classification_report(y_test, y_test_pred))
# -
| 26.417112
| 130
| 0.696964
| 716
| 4,940
| 4.554469
| 0.27933
| 0.022999
| 0.030359
| 0.025759
| 0.291015
| 0.275682
| 0.258203
| 0.232444
| 0.232444
| 0.232444
| 0
| 0.012435
| 0.186032
| 4,940
| 186
| 131
| 26.55914
| 0.798558
| 0.204251
| 0
| 0.205607
| 0
| 0
| 0.087697
| 0.045138
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009346
| false
| 0
| 0.065421
| 0
| 0.084112
| 0.121495
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0a3c20650d9f2b0b50513762c0375912b29d194
| 2,594
|
py
|
Python
|
tests/test_action_guest_process_start.py
|
lingfish/stackstorm-vsphere
|
49199f5ebdc05b70b7504962e104642b0c30ba30
|
[
"Apache-2.0"
] | null | null | null |
tests/test_action_guest_process_start.py
|
lingfish/stackstorm-vsphere
|
49199f5ebdc05b70b7504962e104642b0c30ba30
|
[
"Apache-2.0"
] | 2
|
2019-03-25T18:03:02.000Z
|
2019-03-26T13:13:59.000Z
|
tests/test_action_guest_process_start.py
|
lingfish/stackstorm-vsphere
|
49199f5ebdc05b70b7504962e104642b0c30ba30
|
[
"Apache-2.0"
] | 1
|
2021-03-05T10:12:21.000Z
|
2021-03-05T10:12:21.000Z
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import mock
from vsphere_base_action_test_case import VsphereBaseActionTestCase
from guest_process_start import StartProgramInGuest
__all__ = [
'StartProgramInGuestTestCase'
]
class StartProgramInGuestTestCase(VsphereBaseActionTestCase):
__test__ = True
action_cls = StartProgramInGuest
@mock.patch('pyVmomi.vim.vm.guest.ProcessManager')
def test_normal(self, mock_process_manager):
# Vary the arguments list including passing None
# Each tuple has two array items, [0] is arguments input
# [1] is expected cmdspec
for argdata in (None, 'onearg', 'two arguments'):
(action, mock_vm) = self.mock_one_vm('vm-12345')
mockProcMgr = mock.Mock()
mockProcMgr.StartProgramInGuest = mock.Mock()
mockProcMgr.StartProgramInGuest.return_value = 12345
action.si_content.guestOperationsManager = mock.Mock()
action.si_content.guestOperationsManager.processManager =\
mockProcMgr
mock_process_manager.ProgramSpec.return_value = 'cmdspec'
envvars = ["A=B", "C=D"] if argdata else None
result = action.run(vm_id='vm-12345', username='u',
password='p', command='c',
arguments=argdata, workdir='/tmp',
envvar=envvars)
mock_process_manager.ProgramSpec.assert_called_with(
arguments='' if not argdata else argdata,
envVariables=envvars,
programPath='c',
workingDirectory='/tmp'
)
mockProcMgr.StartProgramInGuest.assert_called_once_with(
mock_vm, action.guest_credentials, 'cmdspec',
)
self.assertEqual(result, 12345)
| 43.233333
| 74
| 0.658443
| 286
| 2,594
| 5.842657
| 0.524476
| 0.029922
| 0.032316
| 0.01915
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013677
| 0.267155
| 2,594
| 59
| 75
| 43.966102
| 0.865334
| 0.338088
| 0
| 0
| 0
| 0
| 0.075882
| 0.036471
| 0
| 0
| 0
| 0
| 0.085714
| 1
| 0.028571
| false
| 0.028571
| 0.085714
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0a3f676d422bbdd29b5d1ae6fd198e164330819
| 4,192
|
py
|
Python
|
src/soda/mutator.py
|
UCLA-VAST/soda
|
1b3994ded643d82ebc2fce7b1eb1d13c70800897
|
[
"MIT"
] | 9
|
2020-05-09T19:52:46.000Z
|
2021-09-15T13:45:27.000Z
|
src/soda/mutator.py
|
UCLA-VAST/soda
|
1b3994ded643d82ebc2fce7b1eb1d13c70800897
|
[
"MIT"
] | 1
|
2021-07-26T08:51:49.000Z
|
2021-07-26T08:51:49.000Z
|
src/soda/mutator.py
|
UCLA-VAST/soda
|
1b3994ded643d82ebc2fce7b1eb1d13c70800897
|
[
"MIT"
] | 1
|
2020-10-28T03:06:44.000Z
|
2020-10-28T03:06:44.000Z
|
from typing import (
Iterable,
Mapping,
MutableMapping,
Optional,
Tuple,
TypeVar,
Union,
)
import collections
import logging
import operator
import types
from haoda import ir
from soda import tensor
import soda.visitor
_logger = logging.getLogger().getChild(__name__)
def shift(obj, offset, excluded=(), op=operator.sub, verbose=False):
"""Shift soda.ir.Ref with the given offset.
All soda.ir.Ref, excluding the given names, will be shifted with the
given offset using the given operator. The operator will be applied pointwise
on the original index and the given offset.
Args:
obj: A haoda.ir.Node or a tensor.Tensor object.
offset: Second operand given to the operator.
excluded: Sequence of names to be excluded from the mutation. Default to ().
op: Shifting operator. Should be either add or sub. Default to sub.
verbose: Whether to log shiftings. Default to False.
Returns:
Mutated obj. If obj is an IR node, it will be a different object than the
input. If obj is a tensor, it will be the same object but with fields
mutated.
"""
if op not in (operator.add, operator.sub):
_logger.warn('shifting with neither + nor -, which most likely is an error')
def visitor(obj, args):
if isinstance(obj, ir.Ref):
if obj.name not in excluded:
new_idx = tuple(op(a, b) for a, b in zip(obj.idx, offset))
if verbose:
_logger.debug('reference %s(%s) shifted to %s(%s)', obj.name,
', '.join(map(str, obj.idx)), obj.name,
', '.join(map(str, new_idx)))
obj.idx = new_idx
if isinstance(obj, ir.Node):
return obj.visit(visitor)
if isinstance(obj, tensor.Tensor):
obj.mutate(visitor)
else:
raise TypeError('argument is not an IR node or a tensor')
return obj
def normalize(obj: Union[ir.Node, Iterable[ir.Node]],
references: Optional[Mapping[str, Tuple[int, ...]]] = None):
"""Make the least access index 0.
Works on an ir.Node or an iterable of ir.Nodes. If it is shifted, a different
object is constructed and returned. Otherwise, obj will be returned as-is.
Args:
obj: A node or an iterable of nodes.
Returns:
Normalized node or iterable.
Raises:
TypeError: If argument is not an ir.Node or an iterable of ir.Nodes.
"""
if isinstance(obj, types.GeneratorType):
return normalize(tuple(obj))
norm_idx = soda.visitor.get_normalize_index(obj, references)
shifter = lambda x: shift(x, norm_idx) if any(norm_idx) else x
if isinstance(obj, collections.Iterable):
return type(obj)(map(shifter, obj)) # type: ignore
if isinstance(obj, ir.Node):
return shifter(obj)
raise TypeError('argument is not an ir.Node or an iterable of ir.Nodes')
NodeT = TypeVar('NodeT', bound=ir.Node)
def replace_expressions(
obj: NodeT,
cses: MutableMapping[NodeT, ir.Ref],
used: Optional[MutableMapping[NodeT, NodeT]] = None,
references: Optional[Mapping[str, Tuple[int, ...]]] = None,
) -> NodeT:
"""Get AST with common subexpression elimination.
Get AST with the given common subexpressions. If used is not None, the used
common subexpressions will be added to used.
Args:
obj: An ir.Node.
cses: Dict mapping normalized common subexpressions to the new ir.Ref.
used: Set of used common subexpressions, or None.
Returns:
The ir.Node as the AST.
"""
def visitor(
obj: NodeT,
args: Tuple[MutableMapping[NodeT, ir.
Ref], Optional[MutableMapping[NodeT, NodeT]]]
) -> NodeT:
cses, used = args
norm_idx = soda.visitor.get_normalize_index(obj, references)
normalized = shift(obj, norm_idx) if any(norm_idx) else obj
if normalized in cses:
if used is not None:
if normalized not in used:
used[normalized] = replace_expressions(
normalized, {k: v for k, v in cses.items() if k != normalized},
used)
new_obj = shift(cses[normalized], norm_idx, op=operator.add)
_logger.debug('replacing %s with %s', obj, new_obj)
return new_obj
return obj
return obj.visit(visitor, (cses, used))
| 32.246154
| 80
| 0.669132
| 610
| 4,192
| 4.554098
| 0.247541
| 0.028078
| 0.017279
| 0.014399
| 0.197264
| 0.159827
| 0.140389
| 0.095032
| 0.095032
| 0.042117
| 0
| 0.000311
| 0.232586
| 4,192
| 129
| 81
| 32.496124
| 0.863227
| 0.340649
| 0
| 0.136986
| 0
| 0
| 0.079821
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068493
| false
| 0
| 0.109589
| 0
| 0.287671
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0a71acf6116e8faa1f0455b3919ee53b2e3be9c
| 2,923
|
py
|
Python
|
htdocs/plotting/auto/scripts/p66.py
|
jamayfieldjr/iem
|
275b77a65f3b12e26e6cbdb230786b9c7d2b9c9a
|
[
"MIT"
] | 1
|
2019-10-07T17:01:24.000Z
|
2019-10-07T17:01:24.000Z
|
htdocs/plotting/auto/scripts/p66.py
|
jamayfieldjr/iem
|
275b77a65f3b12e26e6cbdb230786b9c7d2b9c9a
|
[
"MIT"
] | null | null | null |
htdocs/plotting/auto/scripts/p66.py
|
jamayfieldjr/iem
|
275b77a65f3b12e26e6cbdb230786b9c7d2b9c9a
|
[
"MIT"
] | null | null | null |
"""Consec days"""
import calendar
from pandas.io.sql import read_sql
from pyiem.plot.use_agg import plt
from pyiem.util import get_autoplot_context, get_dbconn
PDICT = {'above': 'Temperature At or Above (AOA) Threshold',
'below': 'Temperature Below Threshold'}
PDICT2 = {'high': 'High Temperature',
'low': 'Low Temperature'}
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['data'] = True
desc['description'] = """This chart presents the daily frequency of the
given date having the prescribed number of previous days above or below
some provided treshold."""
desc['arguments'] = [
dict(type='station', name='station', default='IATDSM',
label='Select Station:', network='IACLIMATE'),
dict(type='select', name='var', default='high', options=PDICT2,
label='Select which daily variable'),
dict(type='select', name='dir', default='above', options=PDICT,
label='Select temperature direction'),
dict(type='int', name='threshold', default='60',
label='Temperature Threshold (F):'),
dict(type='int', name='days', default='7',
label='Number of Days:')
]
return desc
def plotter(fdict):
""" Go """
pgconn = get_dbconn('coop')
ctx = get_autoplot_context(fdict, get_description())
station = ctx['station']
days = ctx['days']
threshold = ctx['threshold']
varname = ctx['var']
mydir = ctx['dir']
table = "alldata_%s" % (station[:2],)
agg = "min" if mydir == 'above' else 'max'
op = ">=" if mydir == 'above' else '<'
df = read_sql("""
with data as (select day,
"""+agg+"""("""+varname+""")
OVER (ORDER by day ASC ROWS BETWEEN %s PRECEDING
and CURRENT ROW) as agg from """ + table + """
where station = %s)
select extract(doy from day) as doy,
sum(case when agg """+op+""" %s then 1 else 0 end)
/ count(*)::float * 100. as freq
from data GROUP by doy ORDER by doy asc
""", pgconn, params=(days - 1, station, threshold), index_col='doy')
fig, ax = plt.subplots(1, 1, sharex=True)
label = "AOA" if mydir == 'above' else 'below'
ax.set_title(("[%s] %s\nFrequency of %s Consec Days"
r" with %s %s %s$^\circ$F "
) % (station, ctx['_nt'].sts[station]['name'],
days, varname.capitalize(), label, threshold))
ax.set_ylabel("Frequency of Days [%]")
ax.set_ylim(0, 100)
ax.set_yticks([0, 5, 10, 25, 50, 75, 90, 95, 100])
ax.grid(True)
ax.bar(df.index.values, df['freq'], width=1)
ax.set_xticks((1, 32, 60, 91, 121, 152, 182, 213, 244, 274,
305, 335, 365))
ax.set_xticklabels(calendar.month_abbr[1:])
ax.set_xlim(0, 366)
return fig, df
if __name__ == '__main__':
plotter(dict())
| 34.388235
| 75
| 0.584673
| 384
| 2,923
| 4.372396
| 0.445313
| 0.020846
| 0.021441
| 0.028588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034372
| 0.253507
| 2,923
| 84
| 76
| 34.797619
| 0.735105
| 0.02258
| 0
| 0
| 0
| 0
| 0.352589
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029851
| false
| 0
| 0.059701
| 0
| 0.119403
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0a93dc0b3c06bf5e6cdc0aa43def476e965448d
| 866
|
py
|
Python
|
csv_test.py
|
mii012345/deep-learning
|
660785157446583eefeefa9d5dc25927aab6a9e4
|
[
"MIT"
] | 3
|
2017-06-04T06:59:38.000Z
|
2017-06-05T14:01:48.000Z
|
csv_test.py
|
mii012345/deep-learning
|
660785157446583eefeefa9d5dc25927aab6a9e4
|
[
"MIT"
] | null | null | null |
csv_test.py
|
mii012345/deep-learning
|
660785157446583eefeefa9d5dc25927aab6a9e4
|
[
"MIT"
] | null | null | null |
import csv
import numpy as np
import pickle
with open('data (2).csv','r') as f:
csv = csv.reader(f)
csvlist = []
for i in csv:
csvlist.append(i)
#6行目から
mas = []
for i in range(364):
i+=6
a = 0
b = 0
c = 0
date = csvlist[i][0]
weather = csvlist[i][1]
if date[0:10] == "2016/11/1 " or date[0:10] == "2016/11/2 " or date[0:10] == "2016/11/3 " or date[0:9] == "2016/11/4" or date[0:9] == "2016/11/5" or date[0:9] == "2016/11/6" or date[0:9] == "2016/11/7":
continue
if weather == "1" or weather == "2":
a = 1
elif weather == "3" or weather == "4" or weather == "5" or weather == "6":
b = 1
else:
c = 1
w = [a,b,c]
print(date[0:10])
mas.append(w)
mas = np.array(mas)
with open('tenki_num.pkl','wb') as f:
pickle.dump(mas,f)
| 24.055556
| 207
| 0.489607
| 153
| 866
| 2.764706
| 0.333333
| 0.094563
| 0.099291
| 0.07565
| 0.234043
| 0.20331
| 0
| 0
| 0
| 0
| 0
| 0.150338
| 0.316397
| 866
| 35
| 208
| 24.742857
| 0.564189
| 0.005774
| 0
| 0
| 0
| 0
| 0.121212
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0abaf869bbe93d0c4be20bb53db1ca7697f6d3d
| 1,971
|
py
|
Python
|
ntm/ntm.py
|
clemkoa/ntm
|
723d4ebea63f8f9439fd1c56f36e3cb680c8a277
|
[
"MIT"
] | 41
|
2020-05-19T05:48:04.000Z
|
2021-11-24T11:31:08.000Z
|
ntm/ntm.py
|
clemkoa/ntm
|
723d4ebea63f8f9439fd1c56f36e3cb680c8a277
|
[
"MIT"
] | 3
|
2021-06-07T09:00:59.000Z
|
2021-12-30T17:21:07.000Z
|
ntm/ntm.py
|
clemkoa/ntm
|
723d4ebea63f8f9439fd1c56f36e3cb680c8a277
|
[
"MIT"
] | 4
|
2020-12-31T17:39:42.000Z
|
2021-12-29T14:11:43.000Z
|
import torch
from torch import nn
import torch.nn.functional as F
from ntm.controller import Controller
from ntm.memory import Memory
from ntm.head import ReadHead, WriteHead
class NTM(nn.Module):
def __init__(self, vector_length, hidden_size, memory_size, lstm_controller=True):
super(NTM, self).__init__()
self.controller = Controller(lstm_controller, vector_length + 1 + memory_size[1], hidden_size)
self.memory = Memory(memory_size)
self.read_head = ReadHead(self.memory, hidden_size)
self.write_head = WriteHead(self.memory, hidden_size)
self.fc = nn.Linear(hidden_size + memory_size[1], vector_length)
nn.init.xavier_uniform_(self.fc.weight, gain=1)
nn.init.normal_(self.fc.bias, std=0.01)
def get_initial_state(self, batch_size=1):
self.memory.reset(batch_size)
controller_state = self.controller.get_initial_state(batch_size)
read = self.memory.get_initial_read(batch_size)
read_head_state = self.read_head.get_initial_state(batch_size)
write_head_state = self.write_head.get_initial_state(batch_size)
return (read, read_head_state, write_head_state, controller_state)
def forward(self, x, previous_state):
previous_read, previous_read_head_state, previous_write_head_state, previous_controller_state = previous_state
controller_input = torch.cat([x, previous_read], dim=1)
controller_output, controller_state = self.controller(controller_input, previous_controller_state)
# Read
read_head_output, read_head_state = self.read_head(controller_output, previous_read_head_state)
# Write
write_head_state = self.write_head(controller_output, previous_write_head_state)
fc_input = torch.cat((controller_output, read_head_output), dim=1)
state = (read_head_output, read_head_state, write_head_state, controller_state)
return F.sigmoid(self.fc(fc_input)), state
| 50.538462
| 118
| 0.742263
| 274
| 1,971
| 4.970803
| 0.193431
| 0.070485
| 0.057269
| 0.044053
| 0.246696
| 0.193833
| 0.061674
| 0.061674
| 0
| 0
| 0
| 0.006127
| 0.171994
| 1,971
| 38
| 119
| 51.868421
| 0.828431
| 0.005074
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0
| 0.1875
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0af4a37c3b086f10b2224f1101fb1be4a7fdce1
| 3,468
|
py
|
Python
|
facebook_business/adobjects/adkeywordstats.py
|
enricapq/facebook-python-business-sdk
|
49c569ac5cf812b1bcb533520c35896b0436fa4c
|
[
"CNRI-Python"
] | null | null | null |
facebook_business/adobjects/adkeywordstats.py
|
enricapq/facebook-python-business-sdk
|
49c569ac5cf812b1bcb533520c35896b0436fa4c
|
[
"CNRI-Python"
] | null | null | null |
facebook_business/adobjects/adkeywordstats.py
|
enricapq/facebook-python-business-sdk
|
49c569ac5cf812b1bcb533520c35896b0436fa4c
|
[
"CNRI-Python"
] | 1
|
2018-09-24T14:04:48.000Z
|
2018-09-24T14:04:48.000Z
|
# Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject
from facebook_business.adobjects.objectparser import ObjectParser
from facebook_business.api import FacebookRequest
from facebook_business.typechecker import TypeChecker
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class AdKeywordStats(
AbstractCrudObject,
):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isAdKeywordStats = True
super(AdKeywordStats, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
actions = 'actions'
clicks = 'clicks'
cost_per_total_action = 'cost_per_total_action'
cost_per_unique_click = 'cost_per_unique_click'
cpc = 'cpc'
cpm = 'cpm'
cpp = 'cpp'
ctr = 'ctr'
frequency = 'frequency'
id = 'id'
impressions = 'impressions'
name = 'name'
reach = 'reach'
spend = 'spend'
total_actions = 'total_actions'
total_unique_actions = 'total_unique_actions'
unique_actions = 'unique_actions'
unique_clicks = 'unique_clicks'
unique_ctr = 'unique_ctr'
unique_impressions = 'unique_impressions'
# @deprecated get_endpoint function is deprecated
@classmethod
def get_endpoint(cls):
return 'keywordstats'
_field_types = {
'actions': 'list<AdsActionStats>',
'clicks': 'unsigned int',
'cost_per_total_action': 'float',
'cost_per_unique_click': 'float',
'cpc': 'float',
'cpm': 'float',
'cpp': 'float',
'ctr': 'float',
'frequency': 'float',
'id': 'string',
'impressions': 'unsigned int',
'name': 'string',
'reach': 'unsigned int',
'spend': 'float',
'total_actions': 'unsigned int',
'total_unique_actions': 'unsigned int',
'unique_actions': 'list<AdsActionStats>',
'unique_clicks': 'unsigned int',
'unique_ctr': 'float',
'unique_impressions': 'unsigned int',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info
| 35.387755
| 79
| 0.684544
| 421
| 3,468
| 5.47981
| 0.410926
| 0.033377
| 0.043346
| 0.037711
| 0.038145
| 0.018639
| 0
| 0
| 0
| 0
| 0
| 0.001495
| 0.228662
| 3,468
| 97
| 80
| 35.752577
| 0.860935
| 0.30421
| 0
| 0.032787
| 0
| 0
| 0.264855
| 0.038692
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04918
| false
| 0
| 0.081967
| 0.016393
| 0.213115
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0b3ae1a797739b59abdda1942df55aaa68ec172
| 1,198
|
py
|
Python
|
DQM/TrackingMonitorSource/python/StandaloneTrackMonitor_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
DQM/TrackingMonitorSource/python/StandaloneTrackMonitor_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
DQM/TrackingMonitorSource/python/StandaloneTrackMonitor_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
standaloneTrackMonitor = DQMEDAnalyzer('StandaloneTrackMonitor',
moduleName = cms.untracked.string("StandaloneTrackMonitor"),
folderName = cms.untracked.string("highPurityTracks"),
vertexTag = cms.untracked.InputTag("selectedPrimaryVertices"),
puTag = cms.untracked.InputTag("addPileupInfo"),
clusterTag = cms.untracked.InputTag("siStripClusters"),
trackInputTag = cms.untracked.InputTag('selectedTracks'),
offlineBeamSpot = cms.untracked.InputTag('offlineBeamSpot'),
trackQuality = cms.untracked.string('highPurity'),
doPUCorrection = cms.untracked.bool(False),
isMC = cms.untracked.bool(True),
puScaleFactorFile = cms.untracked.string("PileupScaleFactor_run203002.root"),
haveAllHistograms = cms.untracked.bool(False),
verbose = cms.untracked.bool(False),
trackEtaH = cms.PSet(Xbins = cms.int32(60), Xmin = cms.double(-3.0),Xmax = cms.double(3.0)),
trackPtH = cms.PSet(Xbins = cms.int32(100),Xmin = cms.double(0.0),Xmax = cms.double(100.0))
)
| 59.9
| 104
| 0.687813
| 116
| 1,198
| 7.094828
| 0.439655
| 0.18955
| 0.121507
| 0.076549
| 0.048603
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025694
| 0.187813
| 1,198
| 19
| 105
| 63.052632
| 0.820144
| 0
| 0
| 0
| 0
| 0
| 0.15192
| 0.082638
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.105263
| 0
| 0.105263
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0b773458653a85f2fb1e0a33ea41844604c6b4f
| 3,006
|
py
|
Python
|
xdl-algorithm-solution/DIN_WITH_MOGUJIE_DATA/script/train.py
|
xiaobaoding/x-deeplearning
|
1280043aba15ff57ac5e973bcce2489c698380d2
|
[
"Apache-2.0"
] | null | null | null |
xdl-algorithm-solution/DIN_WITH_MOGUJIE_DATA/script/train.py
|
xiaobaoding/x-deeplearning
|
1280043aba15ff57ac5e973bcce2489c698380d2
|
[
"Apache-2.0"
] | null | null | null |
xdl-algorithm-solution/DIN_WITH_MOGUJIE_DATA/script/train.py
|
xiaobaoding/x-deeplearning
|
1280043aba15ff57ac5e973bcce2489c698380d2
|
[
"Apache-2.0"
] | null | null | null |
#coding=utf-8
# Copyright (C) 2016-2018 Alibaba Group Holding Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import sys
import time
import math
import random
import argparse
import tensorflow as tf
import numpy
from model import *
from utils import *
import xdl
from xdl.python.training.train_session import QpsMetricsHook, MetricsPrinterHook
#config here
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--seed", help="random seed", default=3)
parser.add_argument("-jt", "--job_type", help="'train' or 'test'", default='train')
parser.add_argument("-m", "--model", help="'din' or 'dien'", default='din_mogujie')
parser.add_argument("-si", "--save_interval", help="checkpoint save interval steps", default=20000)
parser.add_argument("-dr", "--data_dir", help="data dir")
args, unknown = parser.parse_known_args()
seed = args.seed
job_type = args.job_type
model_type = args.model
save_interval = args.save_interval
def get_data_prefix():
return "../data/"
#return args.data_dir
train_file = os.path.join(get_data_prefix(), "train_data.tfrecords")
def train():
if model_type == 'din_mogujie':
model = Model_DIN_MOGUJIE(
EMBEDDING_DIM, HIDDEN_SIZE, ATTENTION_SIZE,False, train_file,batch_size)
else:
raise Exception('only support din_mogujie and dien')
#data set
with xdl.model_scope('train'):
train_ops = model.build_network()
lr = 0.001
# Adam Adagrad
train_ops.append(xdl.Adam(lr).optimize())
hooks = []
log_format = "[%(time)s] lstep[%(lstep)s] gstep[%(gstep)s] lqps[%(lqps)s] gqps[%(gqps)s] loss[%(loss)s]"
hooks = [QpsMetricsHook(), MetricsPrinterHook(log_format)]
if xdl.get_task_index() == 0:
hooks.append(xdl.CheckpointHook(save_interval))
train_sess = xdl.TrainSession(hooks=hooks)
"""
with xdl.model_scope('test'):
test_ops = model.build_network(
EMBEDDING_DIM, is_train=False)
test_sess = xdl.TrainSession()
"""
model.run(train_ops, train_sess)
def test():
pass
if __name__ == '__main__':
SEED = seed
if SEED is None:
SEED = 3
tf.set_random_seed(SEED)
numpy.random.seed(SEED)
random.seed(SEED)
if job_type == 'train':
train()
elif job_type == 'test':
test()
else:
print('job type must be train or test, do nothing...')
| 30.06
| 112
| 0.663007
| 407
| 3,006
| 4.739558
| 0.434889
| 0.031104
| 0.044064
| 0.016589
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010284
| 0.191284
| 3,006
| 99
| 113
| 30.363636
| 0.783217
| 0.240186
| 0
| 0.034483
| 0
| 0.017241
| 0.183985
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051724
| false
| 0.017241
| 0.206897
| 0.017241
| 0.275862
| 0.017241
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0bb7b8a74c23f921be8c3f93658d3fa62727ccc
| 5,214
|
py
|
Python
|
input_fn.py
|
ilyakhov/pytorch-word2vec
|
bb9b0ed408a12e3652d2d897330292b7b93c7997
|
[
"MIT"
] | 12
|
2019-05-22T13:08:42.000Z
|
2021-07-11T07:12:37.000Z
|
input_fn.py
|
ilyakhov/pytorch-word2vec
|
bb9b0ed408a12e3652d2d897330292b7b93c7997
|
[
"MIT"
] | null | null | null |
input_fn.py
|
ilyakhov/pytorch-word2vec
|
bb9b0ed408a12e3652d2d897330292b7b93c7997
|
[
"MIT"
] | 1
|
2021-02-20T09:04:19.000Z
|
2021-02-20T09:04:19.000Z
|
import numpy as np
import torch
from torch.utils.data import Dataset
class CBOWDataSet(Dataset):
def __init__(self, corpus,
pipeline='hier_softmax',
nodes_index=None,
turns_index=None,
vocab_size=None,
neg_samples=None,
max_path_len=17,
window_size=6,
device=None,
skip_target=False,
dtype=torch.float32):
"""
:param corpus: the flat list of tokens
:param pipeline: 'hier_softmax'/'neg_sampling'
params for 'hierarchical softmax' pipeline:
:param nodes_index: index of nodes from leaf parent to the root
:param turns_index: the list of 1/-1 indices:
1 — the leaf is the left child of corresponding node
-1 — the leaf is the right child
:param vocab_size: is used for padding
:param max_path_len: length of the longest path from word (leaf)
to the root
params for 'negative sampling' pipeline:
:param neg_samples: the number of negative samples
:param window_size: word context size
:param device: cuda:0/cuda:1/cpu
:param dtype: torch float type
"""
self.window_size = window_size
self.step = window_size // 2
self.left_step = self.step
self.right_step = window_size - self.step
self.corpus = corpus[-self.left_step:] + corpus + \
corpus[:self.right_step]
self.device = device
self.dtype = dtype
self.pipeline = pipeline
if self.pipeline == 'hier_softmax':
self.nodes_index = nodes_index
self.max_path_len = max_path_len
self.turns_index = turns_index
self.vocab_size = vocab_size
self.skip_target = skip_target
elif self.pipeline == 'neg_sampling':
self.np_corpus = np.array(self.corpus)
self.neg_samples = neg_samples
else:
raise NotImplementedError(
f'Pipeline for "pipeline": {self.pipeline}')
def __len__(self):
return len(self.corpus) - self.window_size
def __getitem__(self, item):
if self.pipeline == 'hier_softmax':
return self.__h_getitem(item)
elif self.pipeline == 'neg_sampling':
return self.__n_getitem(item)
else:
raise NotImplementedError(
f'__getitem__ for pipeline: {self.pipeline}')
def __h_getitem(self, i):
"""
Hierarchical softmax pipepline
:param i: item index
:return: torch tensors:
context, target, nodes, mask, turns_coeffs
"""
i += self.left_step
target = self.corpus[i]
context = self.corpus[(i - self.left_step):i]
context += self.corpus[(i + 1):(i + self.right_step + 1)]
try:
assert len(context) == self.window_size
except AssertionError:
raise Exception(
'Context size is not valid: context - '
'{0} has size - {1}; window_size - {2}'
.format(context, len(context), self.window_size)
)
nodes = self.nodes_index[target]
nodes_len = len(nodes)
mask = np.zeros(self.max_path_len)
mask[:nodes_len] = 1
pad_len = self.max_path_len - nodes_len
nodes = np.concatenate([nodes, np.ones(pad_len) * self.vocab_size])
# nodes = np.concatenate([nodes, np.ones(pad_len) * -1])
nodes = torch.tensor(nodes, dtype=torch.long, device=self.device)
turns_coeffs = self.turns_index.get(target)
turns_coeffs = np.concatenate([turns_coeffs, np.zeros(pad_len)])
turns_coeffs = torch.tensor(turns_coeffs, dtype=self.dtype,
device=self.device)
mask = torch.tensor(mask, dtype=self.dtype, device=self.device)
context = torch.tensor(context, dtype=torch.long, device=self.device)
target = torch.tensor(target, dtype=torch.long, device=self.device)
if self.skip_target is False:
return context, target, nodes, mask, turns_coeffs
else:
return context, nodes, mask, turns_coeffs
def __n_getitem(self, i):
"""
Negative sampling pipeline
:param i: item index
:return: torch tensors:
context, target, neg_samples
"""
i += self.left_step
target = self.corpus[i]
context = self.corpus[(i - self.left_step):i]
context += self.corpus[(i + 1):(i + self.right_step + 1)]
try:
assert len(context) == self.window_size
except AssertionError:
raise Exception(
'Context size is not valid: context - '
'{0} has size - {1}; window_size - {2}'
.format(context, len(context), self.window_size)
)
context = torch.tensor(context, dtype=torch.long, device=self.device)
target = torch.tensor(target, dtype=torch.long, device=self.device)
return context, target
| 36.71831
| 77
| 0.575374
| 617
| 5,214
| 4.687196
| 0.176661
| 0.044952
| 0.038728
| 0.034578
| 0.414592
| 0.36065
| 0.301521
| 0.301521
| 0.277317
| 0.245505
| 0
| 0.006869
| 0.329881
| 5,214
| 141
| 78
| 36.978723
| 0.820263
| 0.186038
| 0
| 0.376344
| 0
| 0
| 0.072016
| 0
| 0
| 0
| 0
| 0
| 0.043011
| 1
| 0.053763
| false
| 0
| 0.032258
| 0.010753
| 0.16129
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0c0cf39ce27029feb9aa7a105da2d19af17d25d
| 1,563
|
py
|
Python
|
delivery/services/external_program_service.py
|
mariya/arteria-delivery
|
ec2fd79cfc6047a44dd251183b971535e9afd0dc
|
[
"MIT"
] | null | null | null |
delivery/services/external_program_service.py
|
mariya/arteria-delivery
|
ec2fd79cfc6047a44dd251183b971535e9afd0dc
|
[
"MIT"
] | 18
|
2016-11-10T14:32:54.000Z
|
2019-10-14T07:07:54.000Z
|
delivery/services/external_program_service.py
|
mariya/arteria-delivery
|
ec2fd79cfc6047a44dd251183b971535e9afd0dc
|
[
"MIT"
] | 6
|
2016-10-18T12:16:46.000Z
|
2019-09-11T11:38:17.000Z
|
from tornado.process import Subprocess
from tornado import gen
from subprocess import PIPE
from delivery.models.execution import ExecutionResult, Execution
class ExternalProgramService(object):
"""
A service for running external programs
"""
@staticmethod
def run(cmd):
"""
Run a process and do not wait for it to finish
:param cmd: the command to run as a list, i.e. ['ls','-l', '/']
:return: A instance of Execution
"""
p = Subprocess(cmd,
stdout=PIPE,
stderr=PIPE,
stdin=PIPE)
return Execution(pid=p.pid, process_obj=p)
@staticmethod
@gen.coroutine
def wait_for_execution(execution):
"""
Wait for an execution to finish
:param execution: instance of Execution
:return: an ExecutionResult for the execution
"""
status_code = yield execution.process_obj.wait_for_exit(raise_error=False)
out = execution.process_obj.stdout.read().decode('UTF-8')
err = execution.process_obj.stderr.read().decode('UTF-8')
return ExecutionResult(out, err, status_code)
@staticmethod
def run_and_wait(cmd):
"""
Run an external command and wait for it to finish
:param cmd: the command to run as a list, i.e. ['ls','-l', '/']
:return: an ExecutionResult for the execution
"""
execution = ExternalProgramService.run(cmd)
return ExternalProgramService.wait_for_execution(execution)
| 29.490566
| 82
| 0.621881
| 185
| 1,563
| 5.172973
| 0.345946
| 0.043887
| 0.040752
| 0.022989
| 0.194357
| 0.194357
| 0.121212
| 0.121212
| 0.121212
| 0.121212
| 0
| 0.001794
| 0.286628
| 1,563
| 52
| 83
| 30.057692
| 0.856502
| 0.294946
| 0
| 0.130435
| 0
| 0
| 0.010352
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.173913
| 0
| 0.478261
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0c491c66e363814a85776c34ddeffc5e419a0b3
| 9,667
|
py
|
Python
|
xraydb/materials.py
|
chemmatcars/XModFit
|
7d1298448d1908d78797fd67ce0a00ecfaf17629
|
[
"MIT"
] | null | null | null |
xraydb/materials.py
|
chemmatcars/XModFit
|
7d1298448d1908d78797fd67ce0a00ecfaf17629
|
[
"MIT"
] | null | null | null |
xraydb/materials.py
|
chemmatcars/XModFit
|
7d1298448d1908d78797fd67ce0a00ecfaf17629
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
from collections import namedtuple
from .chemparser import chemparse
from .xray import mu_elam, atomic_mass
from .utils import get_homedir
_materials = None
Material = namedtuple('Material', ('formula', 'density', 'name', 'categories'))
def get_user_materialsfile():
"""return name for user-specific materials.dat file
With $HOME being the users home directory, this will be
$HOME/.config/xraydb/materials.dat
"""
return os.path.join(get_homedir(), '.config', 'xraydb', 'materials.dat')
def _read_materials_db():
"""return _materials dictionary, creating it if needed"""
global _materials
if _materials is None:
# initialize materials table
_materials = {}
def read_materialsfile(fname):
with open(fname, 'r') as fh:
lines = fh.readlines()
for line in lines:
line = line.strip()
if len(line) > 2 and not line.startswith('#'):
words = [i.strip() for i in line.split('|')]
name = words[0].lower()
formula = None
if len(words) == 3: # older style
# "name | formula | density" or "name | density | formula"
iformula = 1
try:
density = float(words[2])
except ValueError:
density = float(words[1])
iformula = 2
formula = words[iformula]
categories = []
elif len(words) == 4: # newer style, with categories
density = float(words[1])
categories = [w.strip() for w in words[2].split(',')]
formula = words[3]
if formula is not None:
formula = formula.replace(' ', '')
_materials[name] = Material(formula, density, name, categories)
# first, read from standard list
local_dir, _ = os.path.split(__file__)
fname = os.path.join(local_dir, 'materials.dat')
if os.path.exists(fname):
read_materialsfile(fname)
# next, read from users materials file
fname = get_user_materialsfile()
if os.path.exists(fname):
read_materialsfile(fname)
return _materials
def material_mu(name, energy, density=None, kind='total'):
"""X-ray attenuation length (in 1/cm) for a material by name or formula
Args:
name (str): chemical formul or name of material from materials list.
energy (float or ndarray): energy or array of energies in eV
density (None or float): material density (gr/cm^3).
kind (str): 'photo' or 'total' for whether to return the
photo-absorption or total cross-section ['total']
Returns:
absorption length in 1/cm
Notes:
1. material names are not case sensitive,
chemical compounds are case sensitive.
2. mu_elam() is used for mu calculation.
3. if density is None and material is known, that density will be used.
Examples:
>>> material_mu('H2O', 10000.0)
5.32986401658495
"""
global _materials
if _materials is None:
_materials = _read_materials_db()
formula = None
_density = None
mater = _materials.get(name.lower(), None)
if mater is None:
for key, val in _materials.items():
if name.lower() == val[0].lower(): # match formula
mater = val
break
# default to using passed in name as a formula
if formula is None:
if mater is None:
formula = name
else:
formula = mater.formula
if density is None and mater is not None:
density = mater.density
if density is None:
raise Warning('material_mu(): must give density for unknown materials')
mass_tot, mu = 0.0, 0.0
for elem, frac in chemparse(formula).items():
mass = frac * atomic_mass(elem)
mu += mass * mu_elam(elem, energy, kind=kind)
mass_tot += mass
return density*mu/mass_tot
def material_mu_components(name, energy, density=None, kind='total'):
"""material_mu_components: absorption coefficient (in 1/cm) for a compound
Args:
name (str): chemical formul or name of material from materials list.
energy (float or ndarray): energy or array of energies in eV
density (None or float): material density (gr/cm^3).
kind (str): 'photo' or 'total'for whether to
return photo-absorption or total cross-section ['total']
Returns:
dict for constructing mu per element,
with elements 'mass' (total mass), 'density', and
'elements' (list of atomic symbols for elements in material).
For each element, there will be an item (atomic symbol as key)
with tuple of (stoichiometric fraction, atomic mass, mu)
Examples:
>>> xraydb.material_mu('quartz', 10000)
50.36774553547068
>>> xraydb.material_mu_components('quartz', 10000)
{'mass': 60.0843, 'density': 2.65, 'elements': ['Si', 'O'],
'Si': (1, 28.0855, 33.87943243018506), 'O': (2.0, 15.9994, 5.952824815297084)}
"""
global _materials
if _materials is None:
_materials = _read_materials_db()
mater = _materials.get(name.lower(), None)
if mater is None:
formula = name
if density is None:
raise Warning('material_mu(): must give density for unknown materials')
else:
formula = mater.formula
density = mater.density
out = {'mass': 0.0, 'density': density, 'elements':[]}
for atom, frac in chemparse(formula).items():
mass = atomic_mass(atom)
mu = mu_elam(atom, energy, kind=kind)
out['mass'] += frac*mass
out[atom] = (frac, mass, mu)
out['elements'].append(atom)
return out
def get_material(name):
"""look up material name, return formula and density
Args:
name (str): name of material or chemical formula
Returns:
chemical formula, density of material
Examples:
>>> xraydb.get_material('kapton')
('C22H10N2O5', 1.43)
See Also:
find_material()
"""
material = find_material(name)
if material is None:
return None
return material.formula, material.density
def find_material(name):
"""look up material name, return material instance
Args:
name (str): name of material or chemical formula
Returns:
material instance
Examples:
>>> xraydb.find_material('kapton')
Material(formula='C22H10N2O5', density=1.42, name='kapton', categories=['polymer'])
See Also:
get_material()
"""
global _materials
if _materials is None:
_materials = _read_materials_db()
mat = _materials.get(name.lower(), None)
if mat is not None:
return mat
for mat in _materials.values():
if mat.formula == name:
return mat
return None
def get_materials(force_read=False, categories=None):
"""get dictionary of all available materials
Args:
force_read (bool): whether to force a re-reading of the
materials database [False]
categories (list of strings or None): restrict results
to those that match category names
Returns:
dict with keys of material name and values of Materials instances
Examples:
>>> for name, m in xraydb.get_materials().items():
... print(name, m)
...
water H2O 1.0
lead Pb 11.34
aluminum Al 2.7
kapton C22H10N2O5 1.42
polyimide C22H10N2O5 1.42
nitrogen N 0.00125
argon Ar 0.001784
...
"""
global _materials
if force_read or _materials is None:
_materials = _read_materials_db()
return _materials
def add_material(name, formula, density, categories=None):
"""add a material to the users local material database
Args:
name (str): name of material
formula (str): chemical formula
density (float): density
categories (list of strings or None): list of category names
Returns:
None
Notes:
the data will be saved to $HOME/.config/xraydb/materials.dat
in the users home directory, and will be useful in subsequent sessions.
Examples:
>>> xraydb.add_material('becopper', 'Cu0.98e0.02', 8.3, categories=['metal'])
"""
global _materials
if _materials is None:
_materials = _read_materials_db()
formula = formula.replace(' ', '')
if categories is None:
categories = []
_materials[name.lower()] = Material(formula, float(density), name, categories)
fname = get_user_materialsfile()
if os.path.exists(fname):
fh = open(fname, 'r')
text = fh.readlines()
fh.close()
else:
parent, _ = os.path.split(fname)
if not os.path.exists(parent):
try:
os.makedirs(parent)
except FileExistsError:
pass
text = ['# user-specific database of materials\n',
'# name | density | categories | formulan']
catstring = ', '.join(categories)
text.append(" %s | %g | %s | %s\n" % (name, density, catstring, formula))
with open(fname, 'w') as fh:
fh.write(''.join(text))
| 31.90429
| 91
| 0.58529
| 1,149
| 9,667
| 4.835509
| 0.227154
| 0.017279
| 0.016199
| 0.023398
| 0.325414
| 0.284557
| 0.237041
| 0.210043
| 0.182145
| 0.166307
| 0
| 0.029648
| 0.316127
| 9,667
| 302
| 92
| 32.009934
| 0.81077
| 0.396607
| 0
| 0.404255
| 0
| 0
| 0.062338
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06383
| false
| 0.007092
| 0.042553
| 0
| 0.177305
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0c62d4eee91d75a65403ff152657c9c03089c57
| 1,069
|
py
|
Python
|
client.py
|
simondlevy/sockets
|
f49dd677b6508859f01c9c54101b38e802d6370e
|
[
"MIT"
] | null | null | null |
client.py
|
simondlevy/sockets
|
f49dd677b6508859f01c9c54101b38e802d6370e
|
[
"MIT"
] | null | null | null |
client.py
|
simondlevy/sockets
|
f49dd677b6508859f01c9c54101b38e802d6370e
|
[
"MIT"
] | 1
|
2018-06-12T03:32:26.000Z
|
2018-06-12T03:32:26.000Z
|
#!/usr/bin/env python3
'''
Server script for simple client/server example
Copyright (C) Simon D. Levy 2021
MIT License
'''
from threading import Thread
from time import sleep
import socket
from struct import unpack
from header import ADDR, PORT
def comms(data):
'''
Communications thread
'''
# Connect to the client
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ADDR, PORT))
# Loop until main thread quits
while True:
# Receive and unpack three floating-point numbers
data[0], data[1], data[2] = unpack('=fff', sock.recv(12))
# Yield to the main thread
sleep(0.001)
def main():
# Create a list to receiver the data
data = [0, 0, 0]
# Start the client on its own thread
t = Thread(target=comms, args=(data,))
t.setDaemon(True)
t.start()
# Loop until user hits CTRL-C
while True:
try:
print('%3.3f %3.3f %3.3f ' % tuple(data))
sleep(.01)
except KeyboardInterrupt:
break
main()
| 18.431034
| 65
| 0.613658
| 148
| 1,069
| 4.418919
| 0.574324
| 0.013761
| 0.012232
| 0.018349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03251
| 0.280636
| 1,069
| 57
| 66
| 18.754386
| 0.817945
| 0.335828
| 0
| 0.086957
| 0
| 0
| 0.036765
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.217391
| 0
| 0.304348
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0c6c96cefa40fab2593e89ee811e26649ffff4f
| 15,126
|
py
|
Python
|
old/compute_T.py
|
azhan137/cylinder_t_matrix
|
73a496c07dbbb02896b2baf727d452765da9aac3
|
[
"MIT"
] | 1
|
2022-03-18T11:52:36.000Z
|
2022-03-18T11:52:36.000Z
|
old/compute_T.py
|
AmosEgel/cylinder_t_matrix
|
78f6607993af5babdda384969c45cf3ac6461257
|
[
"MIT"
] | null | null | null |
old/compute_T.py
|
AmosEgel/cylinder_t_matrix
|
78f6607993af5babdda384969c45cf3ac6461257
|
[
"MIT"
] | 1
|
2020-12-07T13:11:00.000Z
|
2020-12-07T13:11:00.000Z
|
import numpy as np
from numpy.polynomial import legendre
from smuthi import spherical_functions as sf
import bessel_functions as bf
##Codebase for computing the T-matrix and its derivative with respect to height and radius for a cylindrical scatterer
# with circular cross-section in spherical coordinates.
#
# inputs:
# lmax: maximum orbital angular momentum expansion order, an integer
# Ntheta: number of sections for discretization
# geometric_params: radius (0) and height (1) in an array
# n0: refractive index of medium
# ns: refractive index of scatterer
# wavelength: excitation wavelength
# particle_type: shape of particle (cylinder, ellipsoid, etc)
def compute_T(lmax, Ntheta, geometric_params, n0, ns, wavelength, particle_type):
[Q, dQ] = compute_Q(lmax, Ntheta, geometric_params, n0, ns, wavelength, 3, particle_type)
[rQ, drQ] = compute_Q(lmax, Ntheta, geometric_params, n0, ns, wavelength, 1, particle_type)
Qinv = np.linalg.inv(Q)
T = rQ*Qinv
dT = np.zeros((np.shape(drQ)))
num_geometric_params = np.size(geometric_params)
for geometric_idx in np.arange(0, num_geometric_params):
dT[:, :, geometric_idx] = np.matmul(drQ[:, :, geometric_idx] - np.matmul(T, dQ[:, :, geometric_idx]), Qinv)
return T, dT
def compute_Q(lmax, Ntheta, geometric_params, n0, ns, wavelength, nu, particle_type):
if particle_type is 'cylinder':
a = geometric_params[0]
h = geometric_params[1]
[J11, J12, J21, J22, dJ11, dJ12, dJ21, dJ22] = compute_J_cyl(lmax, Ntheta, a, h, n0, ns, wavelength, nu)
elif particle_type is 'ellipsoid':
print('ellipsoid not supported')
else:
print('particle type ' + particle_type + ' not supported.')
return 0
ki = 2*np.pi*n0/wavelength
ks = 2*np.pi*ns/wavelength
P = -1j * ki * (ks * J21 + ki * J12)
R = -1j * ki * (ks * J11 + ki * J22)
S = -1j * ki * (ks * J22 + ki * J11)
U = -1j * ki * (ks * J12 + ki * J21)
dP = -1j * ki * (ks * dJ21 + ki * dJ12)
dR = -1j * ki * (ks * dJ11 + ki * dJ22)
dS = -1j * ki * (ks * dJ22 + ki * dJ11)
dU = -1j * ki * (ks * dJ12 + ki * dJ21)
Q = np.block([
[P, R],
[S, U]
])
nmax = np.size(Q[:, 1])
num_geometric_params = np.size(geometric_params)
dQ = np.zeros((nmax, nmax, num_geometric_params))
for geometric_idx in np.arange(0, num_geometric_params):
dQ[:, :, geometric_idx] = np.block([
[dP[:, :, geometric_idx], dR[:, :, geometric_idx]],
[dS[:, :, geometric_idx], dU[:, :, geometric_idx]]
])
return Q, dQ
#function that computes the J surface integrals and their derivatives with respect to cylinder radius (a) and cylinder
# height (h). Expands up to a specified lmax, and approximates the integrals using gaussian quadrature with Ntheta
# points for the two integrals required.
# n0 is refractive index of medium
# ns is refractive index of scatterer
# wavelength is illumination wavelength
# nu = 1 or 3
# 1: b_li are the spherical Bessel functions of the first kind (j_n(x))
# involved in rQ and drQ computation
# 3: b_li are the spherical Hankel functions of the first kind (h_n(x))
# involved in Q and dQ computation
#care should be taken to expand lmax to sufficient order,
#where lmax should be greater than (ns-n_0)*max(2*a,h)/wavelength
def compute_J_cyl(lmax, Ntheta, a, h, n0, ns, wavelength, nu):
#dimension of final T-matrix is 2*nmax x 2*nmax for each individual matrix
nmax = int(lmax*(lmax+2))
#preallocate space for both J and dJ matrices of size nmax x nmax for J matrices
#and dJ matrices are nmax x nmax x 2
#dJ[:,:,0] is dJ/da
#dJ[:,:,1] is dJ/dh
J11 = np.zeros((nmax, nmax), dtype=np.complex_)
J12 = np.zeros((nmax, nmax), dtype=np.complex_)
J21 = np.zeros((nmax, nmax), dtype=np.complex_)
J22 = np.zeros((nmax, nmax), dtype=np.complex_)
dJ11 = np.zeros((nmax, nmax, 2), dtype=np.complex_)
dJ12 = np.zeros((nmax, nmax, 2), dtype=np.complex_)
dJ21 = np.zeros((nmax, nmax, 2), dtype=np.complex_)
dJ22 = np.zeros((nmax, nmax, 2), dtype=np.complex_)
#find the angle theta at which the corner of the cylinder is at
theta_edge = np.arctan(2*a/h)
#prepare gauss-legendre quadrature for interval of [-1,1] to perform numerical integral
[x_norm, wt_norm] = legendre.leggauss(Ntheta)
#rescale integration points and weights to match actual bounds:
# circ covers the circular surface of the cylinder (end caps)
# body covers the rectangular surface of the cylinder (body area)
#circ integral goes from 0 to theta_edge, b = theta_edge, a = 0
theta_circ = theta_edge/2*x_norm+theta_edge/2
wt_circ = theta_edge/2*wt_norm
#body integral goes from theta_edge to pi/2, b = pi/2, a = theta_edge
theta_body = (np.pi/2-theta_edge)/2*x_norm+(np.pi/2+theta_edge)/2
wt_body = (np.pi/2-theta_edge)/2*wt_norm
#merge the circ and body lists into a single map
theta_map = np.concatenate((theta_circ, theta_body), axis=0)
weight_map = np.concatenate((wt_circ, wt_body), axis=0)
#identify indices corresponding to the circular end caps and rectangular body
circ_idx = np.arange(0, Ntheta)
body_idx = np.arange(Ntheta, 2*Ntheta)
#k vectors of the light in medium (ki) and in scatterer (ks)
ki = 2*np.pi*n0/wavelength
ks = 2*np.pi*ns/wavelength
#precompute trig functions
ct = np.cos(theta_map)
st = np.sin(theta_map)
#normal vector for circular surface (circ) requires tangent
tant = np.tan(theta_map[circ_idx])
#normal vector for rectangular surface (body) requires cotangent
cott = 1/np.tan(theta_map[body_idx])
#precompute spherical angular polynomials
[p_lm, pi_lm, tau_lm] = sf.legendre_normalized(ct, st, lmax)
#radial coordinate of the surface, and the derivatives with respect to a and h
#r_c: radial coordinate of circular end cap
#r_b: radial coordinate of rectangular body
r_c = h/2/ct[circ_idx]
dr_c = r_c/h
r_b = a/st[body_idx]
dr_b = r_b/a
#merge radial coordiantes into a single vector
r = np.concatenate((r_c, r_b), axis=0)
#derivatives of the integration limits for performing derivatives
da_edge = 2*h/(h**2+4*a**2)
dh_edge = -2*a/(h**2+4*a**2)
#loop through each individual element of the J11, J12, J21, J22 matrices
for li in np.arange(1, lmax+1):
#precompute bessel functiosn and derivatives
b_li = bf.sph_bessel(nu, li, ki*r)
db_li = bf.d1Z_Z_sph_bessel(nu, li, ki*r)
db2_li = bf.d2Z_Z_sph_bessel(nu, li, ki*r)
d1b_li = bf.d1Z_sph_bessel(nu, li, ki*r)
for lp in np.arange(1, lmax+1):
#precompute bessel functions and derivatives
j_lp = bf.sph_bessel(1, lp, ks*r)
dj_lp = bf.d1Z_Z_sph_bessel(1, lp, ks*r)
dj2_lp = bf.d2Z_Z_sph_bessel(1, lp, ks*r)
d1j_lp = bf.d1Z_sph_bessel(1, lp, ks*r)
#compute normalization factor
lfactor = 1/np.sqrt(li*(li+1)*lp*(lp+1))
for mi in np.arange(-li, li+1):
#compute row index where element is placed
n_i = compute_n(lmax, 1, li, mi)-1
#precompute spherical harmonic functions
p_limi = p_lm[li][abs(mi)]
pi_limi = pi_lm[li][abs(mi)]
tau_limi = tau_lm[li][abs(mi)]
for mp in np.arange(-lp, lp+1):
#compute col index where element is placed
n_p = compute_n(lmax, 1, lp, mp)-1
#precompute spherical harmonic functions
p_lpmp = p_lm[lp][abs(mp)]
pi_lpmp = pi_lm[lp][abs(mp)]
tau_lpmp = tau_lm[lp][abs(mp)]
#compute selection rules that includes symmetries
sr_1122 = selection_rules(li, mi, lp, mp, 1)
sr_1221 = selection_rules(li, mi, lp, mp, 2)
#perform integral about phi analytically. This is roughly a sinc function
if mi == mp:
phi_exp = np.pi
else:
phi_exp = -1j*(np.exp(1j*(mp-mi)*np.pi)-1)/(mp-mi)
#for J11 and J22 integrals
if sr_1122 != 0:
prefactor = sr_1122*lfactor*phi_exp
ang = mp*pi_lpmp*tau_limi+mi*pi_limi*tau_lpmp
J11_r = -1j*weight_map*prefactor*r**2*st*j_lp*b_li*ang
J11[n_i, n_p] = np.sum(J11_r)
dJ11dr = 2*r*j_lp*b_li+r**2*(ks*d1j_lp*b_li+ki*d1b_li*j_lp)
dJ11[n_i, n_p, 0] = np.sum(-1j*prefactor*weight_map[body_idx]*st[body_idx]*dJ11dr[body_idx]*ang[body_idx]*dr_b)
dJ11[n_i, n_p, 1] = np.sum(-1j*prefactor*weight_map[circ_idx]*st[circ_idx]*dJ11dr[circ_idx]*ang[circ_idx]*dr_c)
J22_r = -1j*prefactor*weight_map*st/ki/ks*dj_lp*db_li*ang
J22_db = lp*(lp+1)*mi*pi_limi*p_lpmp
J22_dj = li*(li+1)*mp*pi_lpmp*p_limi
J22_t = -1j*prefactor*weight_map*st/ki/ks*(J22_db*j_lp*db_li+J22_dj*b_li*dj_lp)
J22[n_i, n_p] = sum(J22_r)+sum(J22_t[circ_idx]*tant)+sum(J22_t[body_idx]*-cott)
dJ22edge = st[Ntheta]*(J22_db[Ntheta]*j_lp[Ntheta]*db_li[Ntheta]+J22_dj[Ntheta]*dj_lp[Ntheta]*b_li[Ntheta])*(st[Ntheta]/ct[Ntheta]+ct[Ntheta]/st[Ntheta])
dJ22da1 = -1j/ki/ks*(ks*dj2_lp[body_idx]*db_li[body_idx]+ki*db2_li[body_idx]*dj_lp[body_idx])*dr_b*st[body_idx]*ang[body_idx]
dJ22da2 = 1j/ki/ks*cott*st[body_idx]*dr_b*(J22_db[body_idx]*(ks*d1j_lp[body_idx]*db_li[body_idx]+ki*j_lp[body_idx]*db2_li[body_idx])+J22_dj[body_idx]*(ki*d1b_li[body_idx]*dj_lp[body_idx]+ks*dj2_lp[body_idx]*b_li[body_idx]))
dJ22dh1 = -1j/ki/ks*(ks*dj2_lp[circ_idx]*db_li[circ_idx]+ki*db2_li[circ_idx]*dj_lp[circ_idx])*dr_c*st[circ_idx]*ang[circ_idx]
dJ22dh2 = -1j/ki/ks*tant*st[circ_idx]*dr_c*(J22_db[circ_idx]*(ks*d1j_lp[circ_idx]*db_li[circ_idx]+ki*j_lp[circ_idx]*db2_li[circ_idx])+J22_dj[circ_idx]*(ki*d1b_li[circ_idx]*dj_lp[circ_idx]+ks*dj2_lp[circ_idx]*b_li[circ_idx]))
dJ22[n_i, n_p, 0] = np.sum(prefactor*weight_map[body_idx]*dJ22da1)+np.sum(prefactor*weight_map[body_idx]*dJ22da2)+prefactor*dJ22edge*da_edge
dJ22[n_i, n_p, 1] = np.sum(prefactor*weight_map[circ_idx]*dJ22dh1)+np.sum(prefactor*weight_map[circ_idx]*dJ22dh2)+prefactor*dJ22edge*dh_edge
#for J12 and J21 integrals
if sr_1221 != 0:
prefactor = sr_1221*lfactor*phi_exp
ang = mi*mp*pi_limi*pi_lpmp+tau_limi*tau_lpmp
J12_r = prefactor*weight_map/ki*r*st*j_lp*db_li*ang
J12_t = prefactor*weight_map/ki*r*st*li*(li+1)*j_lp*b_li*p_limi*tau_lpmp
J12[n_i, n_p] = np.sum(J12_r)+np.sum(J12_t[circ_idx]*tant)+np.sum(J12_t[body_idx]*-cott)
dJ12edge = li*(li+1)/ki/r[Ntheta]*st[Ntheta]*j_lp[Ntheta]*b_li[Ntheta]*tau_lpmp[Ntheta]*p_limi[Ntheta]*(st[Ntheta]/ct[Ntheta]+ct[Ntheta]/st[Ntheta])
dJ12da1 = dr_b/ki*(j_lp[body_idx]*db_li[body_idx]+r_b*(ks*d1j_lp[body_idx]*b_li[body_idx]+ki*j_lp[body_idx]*d1b_li[body_idx]))*st[body_idx]*ang[body_idx]
dJ12da2 = -li*(li+1)/ki*dr_b*(j_lp[body_idx]*b_li[body_idx]+r_b*(ks*d1j_lp[body_idx]*b_li[body_idx]+ki*j_lp[body_idx]*d1b_li[body_idx]))*cott*st[body_idx]*tau_lpmp[body_idx]*p_limi[body_idx]
dJ12dh1 = dr_c/ki*(j_lp[circ_idx]*db_li[circ_idx]+r_c*(ks*d1j_lp[circ_idx]*b_li[circ_idx]+ki*j_lp[circ_idx]*d1b_li[circ_idx]))*st[circ_idx]*ang[circ_idx]
dJ12dh2 = li*(li+1)/ki*dr_c*(j_lp[circ_idx]*b_li[circ_idx]+r_c*(ks*d1j_lp[circ_idx]*b_li[circ_idx]+ki*j_lp[circ_idx]*d1b_li[circ_idx]))*tant*st[circ_idx]*tau_lpmp[circ_idx]*p_limi[circ_idx]
dJ12[n_i, n_p, 0] = np.sum(prefactor*weight_map[body_idx]*dJ12da1)+np.sum(prefactor*weight_map[body_idx]*dJ12da2)+prefactor*dJ12edge*da_edge
dJ12[n_i, n_p, 1] = np.sum(prefactor*weight_map[circ_idx]*dJ12dh1)+np.sum(prefactor*weight_map[body_idx]*dJ12da2)+prefactor*dJ12edge*dh_edge
J21_r = -prefactor*weight_map/ks*r*st*dj_lp*b_li*ang
J21_t = -prefactor*weight_map/ks*r*st*lp*(lp+1)*j_lp*b_li*p_lpmp*tau_limi
J21[n_i, n_p] = np.sum(J21_r)+np.sum(J21_t[circ_idx]*tant)+np.sum(J21_t[body_idx]*-cott)
dJ21edge = -lp*(lp+1)/ks/r[Ntheta]*st[Ntheta]*j_lp[Ntheta]*b_li[Ntheta]*tau_lpmp[Ntheta]*p_limi[Ntheta]*(st[Ntheta]/ct[Ntheta]+ct[Ntheta]/st[Ntheta])
dJ21da1 = -dr_b/ks*(b_li[body_idx]*dj_lp[body_idx]+r_b*(ki*d1b_li[body_idx]*dj_lp[body_idx]+ks*dj2_lp[body_idx]*b_li[body_idx]))*st[body_idx]*ang[body_idx]
dJ21da2 = lp*(lp+1)/ks*dr_b*(j_lp[body_idx]*b_li[body_idx]+r_b*(ks*d1j_lp[body_idx]*b_li[body_idx]+ki*d1b_li[body_idx]*j_lp[body_idx]))*cott*st[body_idx]*tau_limi[body_idx]*p_lpmp[body_idx]
dJ21dh1 = -dr_c/ks*(b_li[circ_idx]*dj_lp[circ_idx]+r_c*(ki*d1b_li[circ_idx]*dj_lp[circ_idx]+ks*dj2_lp[circ_idx]*b_li[circ_idx]))*st[circ_idx]*ang[circ_idx]
dJ21dh2 = -lp*(lp+1)/ks*dr_c*(j_lp[circ_idx]*b_li[circ_idx]+r_c*(ks*d1j_lp[circ_idx]*b_li[circ_idx]+ki*d1b_li[circ_idx]*j_lp[circ_idx]))*tant*st[circ_idx]*tau_limi[circ_idx]*p_lpmp[circ_idx]
dJ21[n_i, n_p, 0] = np.sum(prefactor*weight_map[body_idx]*dJ21da1)+np.sum(prefactor*weight_map[body_idx]*dJ21da2)+prefactor*dJ21edge*da_edge
dJ21[n_i, n_p, 1] = np.sum(prefactor*weight_map[circ_idx]*dJ21dh1)+np.sum(prefactor*weight_map[circ_idx]*dJ21dh2)+prefactor*dJ21edge*dh_edge
return J11, J12, J21, J22, dJ11, dJ12, dJ21, dJ22
#compute n index (single index) for matrix element given its p (polarization), l (orbital angular momementum index),
# and m (azimuthal angular momentum index.
def compute_n(lmax, p, l, m):
return (p-1)*lmax*(lmax+2)+(l-1)*(l+1)+m+l+1
#selection rules taking into account different symmetries for an axisymmetric particle
def selection_rules(li, mi, lp, mp, diag_switch):
if diag_switch == 1:
return np.float_power(-1, mi)*(1+np.float_power(-1, mp-mi))*(1+(-1)**(lp+li+1))
elif diag_switch == 2:
return np.float_power(-1, mi)*(1+np.float_power(-1, mp-mi))*(1+(-1)**(lp+li))
else:
return 0
if __name__ == '__main__':
import matplotlib.pyplot as plt
cyl_params = np.array([500,860])
[J11, J12, J21, J22, dJ11, dJ12, dJ21, dJ22] = compute_J_cyl(3,30,200,460,1,1.52,1000,3)
[T, dT] = compute_T(6,30,cyl_params,1,4,1000,'cylinder')
img1 = plt.imshow(np.abs(T))
plt.colorbar()
plt.title('T')
plt.show()
| 52.703833
| 248
| 0.628719
| 2,550
| 15,126
| 3.514902
| 0.132157
| 0.053107
| 0.040165
| 0.005355
| 0.425639
| 0.373201
| 0.314627
| 0.230838
| 0.19826
| 0.165904
| 0
| 0.046327
| 0.235092
| 15,126
| 286
| 249
| 52.888112
| 0.728349
| 0.221671
| 0
| 0.088235
| 0
| 0
| 0.007345
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.029412
| 0.005882
| 0.105882
| 0.011765
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0c7d98ec94365b9cf9f0e166a19f7b2371bc3ed
| 982
|
py
|
Python
|
run_tests.py
|
aquarioos/dvik-print
|
b897936168dab51c9e0f9fd84993065428896be4
|
[
"MIT"
] | 1
|
2018-09-19T22:27:32.000Z
|
2018-09-19T22:27:32.000Z
|
run_tests.py
|
aquarioos/dvik-print
|
b897936168dab51c9e0f9fd84993065428896be4
|
[
"MIT"
] | null | null | null |
run_tests.py
|
aquarioos/dvik-print
|
b897936168dab51c9e0f9fd84993065428896be4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf8 -*-
from __future__ import division, absolute_import, print_function
import os
import sys
import datetime as dt
import dvik_print as dvp
if __name__ == '__main__':
print(sys.version)
O = {
'lista': ['el1', 'el2', 1, 2, 3, 4, None, False],
'zbiór': {1, 2, 1, 2, 'a', 'a', 'b', 'b'},
'krotka': ('oto', 'elementy', 'naszej', 'krotki'),
('krotka', 'klucz'): {
'klucz1': ['jakaś', 'lista', 123],
'klucz2': dt.datetime.now(),
'klucz3': dt
},
(123, 'asd'): {123, 234, 345},
(123, 'asd1'): (123, 234, 345)
}
# deklarujemy obiekt dvp.PrettyPrint
pp = dvp.PrettyPrint(tab=2, head=3, tail=2, max_str_len=50, show_line=True, filename=__file__)
# obiekt jest wywoływalny
# w ten sposób wypisze na
# standardowe wyjście obiekt O
pp(O, var='zmienna')
# można użyć wartości domyślnych
pp_domyslny = dvp.PrettyPrint()
pp_domyslny(O)
| 26.540541
| 98
| 0.566191
| 124
| 982
| 4.290323
| 0.653226
| 0.011278
| 0.033835
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065278
| 0.266802
| 982
| 36
| 99
| 27.277778
| 0.673611
| 0.165988
| 0
| 0
| 0
| 0
| 0.135468
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.217391
| 0
| 0.217391
| 0.130435
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0c85207554af0054a2d3560e6e8d9cb080608eb
| 6,200
|
py
|
Python
|
nwb_conversion_tools/datainterfaces/ecephys/basesortingextractorinterface.py
|
miketrumpis/nwb-conversion-tools
|
4d5c270b70eb4f1c09f98a6c04b51ccdf20336c1
|
[
"BSD-3-Clause"
] | null | null | null |
nwb_conversion_tools/datainterfaces/ecephys/basesortingextractorinterface.py
|
miketrumpis/nwb-conversion-tools
|
4d5c270b70eb4f1c09f98a6c04b51ccdf20336c1
|
[
"BSD-3-Clause"
] | null | null | null |
nwb_conversion_tools/datainterfaces/ecephys/basesortingextractorinterface.py
|
miketrumpis/nwb-conversion-tools
|
4d5c270b70eb4f1c09f98a6c04b51ccdf20336c1
|
[
"BSD-3-Clause"
] | null | null | null |
"""Authors: Cody Baker and Ben Dichter."""
from abc import ABC
from pathlib import Path
import spikeextractors as se
import numpy as np
from pynwb import NWBFile, NWBHDF5IO
from pynwb.ecephys import SpikeEventSeries
from jsonschema import validate
from ...basedatainterface import BaseDataInterface
from ...utils.json_schema import (
get_schema_from_hdmf_class,
get_base_schema,
get_schema_from_method_signature,
fill_defaults,
)
from ...utils.common_writer_tools import default_export_ops, default_export_ops_schema
from ...utils import export_ecephys_to_nwb
from .baserecordingextractorinterface import BaseRecordingExtractorInterface, map_si_object_to_writer, OptionalPathType
class BaseSortingExtractorInterface(BaseDataInterface, ABC):
"""Primary class for all SortingExtractor intefaces."""
SX = None
def __init__(self, **source_data):
super().__init__(**source_data)
self.sorting_extractor = self.SX(**source_data)
self.writer_class = map_si_object_to_writer(self.sorting_extractor)(self.sorting_extractor)
def get_metadata_schema(self):
"""Compile metadata schema for the RecordingExtractor."""
metadata_schema = super().get_metadata_schema()
# Initiate Ecephys metadata
metadata_schema["properties"]["Ecephys"] = get_base_schema(tag="Ecephys")
metadata_schema["properties"]["Ecephys"]["required"] = []
metadata_schema["properties"]["Ecephys"]["properties"] = dict(
UnitProperties=dict(
type="array",
minItems=0,
renderForm=False,
items={"$ref": "#/properties/Ecephys/properties/definitions/UnitProperties"},
),
)
# Schema definition for arrays
metadata_schema["properties"]["Ecephys"]["properties"]["definitions"] = dict(
UnitProperties=dict(
type="object",
additionalProperties=False,
required=["name"],
properties=dict(
name=dict(type="string", description="name of this units column"),
description=dict(type="string", description="description of this units column"),
),
),
)
return metadata_schema
def subset_sorting(self):
"""
Subset a recording extractor according to stub and channel subset options.
Parameters
----------
stub_test : bool, optional (default False)
"""
self.writer_class = map_si_object_to_writer(self.sorting_extractor)(
self.sorting_extractor,
stub=True,
)
def run_conversion(
self,
nwbfile: NWBFile,
metadata: dict,
stub_test: bool = False,
write_ecephys_metadata: bool = False,
save_path: OptionalPathType = None,
overwrite: bool = False,
**kwargs,
):
"""
Primary function for converting the data in a SortingExtractor to the NWB standard.
Parameters
----------
nwbfile: NWBFile
nwb file to which the recording information is to be added
metadata: dict
metadata info for constructing the nwb file (optional).
Should be of the format
metadata['Ecephys']['UnitProperties'] = dict(name=my_name, description=my_description)
stub_test: bool, optional (default False)
If True, will truncate the data to run the conversion faster and take up less memory.
write_ecephys_metadata: bool (optional, defaults to False)
Write electrode information contained in the metadata.
save_path: PathType
Required if an nwbfile is not passed. Must be the path to the nwbfile
being appended, otherwise one is created and written.
overwrite: bool
If using save_path, whether or not to overwrite the NWBFile if it already exists.
skip_unit_features: list
list of unit feature names to skip writing to units table.
skip_unit_properties: list
list of unit properties to skip writing to units table.
unit_property_descriptions: dict
custom descriptions for unit properties:
>>> dict(prop_name='description')
the Other way to add custom descrptions is to override the default metadata:
>>> metadata = self.get_metadata()
>>> metadata["Ecephys"] = dict()
>>> metadata["Ecephys"].update(UnitProperties=[dict(name='prop_name1', description='description1'),
>>> dict(name='prop_name1', description='description1')])
"""
if stub_test:
self.subset_sorting()
if write_ecephys_metadata and "Ecephys" in metadata:
class TempEcephysInterface(BaseRecordingExtractorInterface):
RX = se.NumpyRecordingExtractor
n_channels = max([len(x["data"]) for x in metadata["Ecephys"]["Electrodes"]])
temp_ephys = TempEcephysInterface(timeseries=np.array(range(n_channels)), sampling_frequency=1)
temp_ephys.run_conversion(nwbfile=nwbfile, metadata=metadata, write_electrical_series=False)
conversion_opts = default_export_ops()
conversion_opts.update(**kwargs)
# construct unit property descriptions:
property_descriptions = dict()
for metadata_column in metadata.get("Ecephys", dict()).get("UnitProperties", []):
property_descriptions.update({metadata_column["name"]: metadata_column["description"]})
conversion_opts["unit_property_descriptions"].update(property_descriptions)
conversion_opt_schema = default_export_ops_schema()
validate(instance=conversion_opts, schema=conversion_opt_schema)
self.writer_class.add_to_nwb(nwbfile, metadata, **conversion_opts)
if save_path is not None:
if overwrite:
if Path(save_path).exists():
Path(save_path).unlink()
with NWBHDF5IO(str(save_path), mode="w") as io:
io.write(self.writer_class.nwbfile)
| 42.465753
| 119
| 0.647581
| 659
| 6,200
| 5.895296
| 0.306525
| 0.032432
| 0.02574
| 0.031918
| 0.114028
| 0.088031
| 0.038095
| 0.038095
| 0.038095
| 0.038095
| 0
| 0.001753
| 0.264032
| 6,200
| 145
| 120
| 42.758621
| 0.84966
| 0.30629
| 0
| 0.058824
| 0
| 0
| 0.088169
| 0.0211
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047059
| false
| 0
| 0.141176
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0c8cb69c19ab4dd40d043117a7822abefc679ef
| 1,711
|
py
|
Python
|
buildscripts/resmokelib/testing/testcases/cpp_libfuzzer_test.py
|
benety/mongo
|
203430ac9559f82ca01e3cbb3b0e09149fec0835
|
[
"Apache-2.0"
] | null | null | null |
buildscripts/resmokelib/testing/testcases/cpp_libfuzzer_test.py
|
benety/mongo
|
203430ac9559f82ca01e3cbb3b0e09149fec0835
|
[
"Apache-2.0"
] | null | null | null |
buildscripts/resmokelib/testing/testcases/cpp_libfuzzer_test.py
|
benety/mongo
|
203430ac9559f82ca01e3cbb3b0e09149fec0835
|
[
"Apache-2.0"
] | null | null | null |
"""The libfuzzertest.TestCase for C++ libfuzzer tests."""
import datetime
import os
from buildscripts.resmokelib import core
from buildscripts.resmokelib import utils
from buildscripts.resmokelib.testing.fixtures import interface as fixture_interface
from buildscripts.resmokelib.testing.testcases import interface
class CPPLibfuzzerTestCase(interface.ProcessTestCase):
"""A C++ libfuzzer test to execute."""
REGISTERED_NAME = "cpp_libfuzzer_test"
DEFAULT_TIMEOUT = datetime.timedelta(hours=1)
def __init__( # pylint: disable=too-many-arguments
self, logger, program_executable, program_options=None, runs=1000000,
corpus_directory_stem="corpora"):
"""Initialize the CPPLibfuzzerTestCase with the executable to run."""
interface.ProcessTestCase.__init__(self, logger, "C++ libfuzzer test", program_executable)
self.program_executable = program_executable
self.program_options = utils.default_if_none(program_options, {}).copy()
self.runs = runs
self.corpus_directory = f"{corpus_directory_stem}/corpus-{self.short_name()}"
self.merged_corpus_directory = f"{corpus_directory_stem}-merged/corpus-{self.short_name()}"
os.makedirs(self.corpus_directory, exist_ok=True)
def _make_process(self):
default_args = [
self.program_executable,
"-max_len=100000",
"-rss_limit_mb=5000",
"-max_total_time=3600", # 1 hour is the maximum amount of time to allow a fuzzer to run
f"-runs={self.runs}",
self.corpus_directory,
]
return core.programs.make_process(self.logger, default_args, **self.program_options)
| 38.022222
| 100
| 0.707189
| 200
| 1,711
| 5.81
| 0.445
| 0.090361
| 0.089501
| 0.055077
| 0.060241
| 0.060241
| 0
| 0
| 0
| 0
| 0
| 0.016703
| 0.195207
| 1,711
| 44
| 101
| 38.886364
| 0.82716
| 0.143776
| 0
| 0
| 0
| 0
| 0.151934
| 0.073895
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.206897
| 0
| 0.413793
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0c9967167f2ebbfb12ea4280bc6aa6f0ee2cebd
| 1,278
|
py
|
Python
|
data_curation/genome_annotations/preprocess_SEA.py
|
talkowski-lab/rCNV2
|
fcc1142d8c13b58d18a37fe129e9bb4d7bd6641d
|
[
"MIT"
] | 7
|
2021-01-28T15:46:46.000Z
|
2022-02-07T06:50:40.000Z
|
data_curation/genome_annotations/preprocess_SEA.py
|
talkowski-lab/rCNV2
|
fcc1142d8c13b58d18a37fe129e9bb4d7bd6641d
|
[
"MIT"
] | 1
|
2021-03-02T01:33:53.000Z
|
2021-03-02T01:33:53.000Z
|
data_curation/genome_annotations/preprocess_SEA.py
|
talkowski-lab/rCNV2
|
fcc1142d8c13b58d18a37fe129e9bb4d7bd6641d
|
[
"MIT"
] | 3
|
2021-02-21T19:49:12.000Z
|
2021-12-22T15:56:21.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 Ryan L. Collins <rlcollins@g.harvard.edu>
# and the Talkowski Laboratory
# Distributed under terms of the MIT license.
"""
Parse simple SEA super-enhancer BED by cell types
"""
import argparse
import csv
import subprocess
def main():
"""
Main block
"""
# Parse command line arguments and options
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('bed', help='Path to BED4 of super enhancers')
parser.add_argument('outdir', help='Output directory')
args = parser.parse_args()
outfiles = {}
with open(args.bed) as fin:
for chrom, start, end, source in csv.reader(fin, delimiter='\t'):
source = source.replace(' ', '_').replace('(', '').replace(')', '')
if source not in outfiles.keys():
outfiles[source] = open('{}/SEA.{}.bed'.format(args.outdir, source), 'w')
outfiles[source].write('\t'.join([chrom, start, end]) + '\n')
for outfile in outfiles.values():
outpath = outfile.name
outfile.close()
subprocess.run(['bgzip', '-f', outpath])
if __name__ == '__main__':
main()
| 26.081633
| 89
| 0.622066
| 151
| 1,278
| 5.152318
| 0.635762
| 0.020566
| 0.043702
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006073
| 0.226917
| 1,278
| 48
| 90
| 26.625
| 0.781377
| 0.21831
| 0
| 0
| 0
| 0
| 0.098039
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.130435
| 0
| 0.173913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0cba6784c6a4d07543a90ca7bc4b5a773c81fe7
| 2,461
|
py
|
Python
|
src/transforms/imageCropDivide/dev/generate_nodenk.py
|
MrLixm/Foundry_Nuke
|
078115043b6a4c09bdcf1b5031e995ef296bd604
|
[
"Apache-2.0"
] | null | null | null |
src/transforms/imageCropDivide/dev/generate_nodenk.py
|
MrLixm/Foundry_Nuke
|
078115043b6a4c09bdcf1b5031e995ef296bd604
|
[
"Apache-2.0"
] | null | null | null |
src/transforms/imageCropDivide/dev/generate_nodenk.py
|
MrLixm/Foundry_Nuke
|
078115043b6a4c09bdcf1b5031e995ef296bd604
|
[
"Apache-2.0"
] | null | null | null |
"""
python>3
"""
import os.path
import re
from pathlib import Path
VERSION = 7
BASE = r"""
set cut_paste_input [stack 0]
version 12.2 v5
push $cut_paste_input
Group {
name imageCropDivide
tile_color 0x5c3d84ff
note_font_size 25
note_font_color 0xffffffff
selected true
xpos 411
ypos -125
addUserKnob {20 User}
addUserKnob {3 width_max}
addUserKnob {3 height_max -STARTLINE}
addUserKnob {3 width_source}
addUserKnob {3 height_source -STARTLINE}
addUserKnob {26 "" +STARTLINE}
addUserKnob {22 icd_script l "Copy Setup to ClipBoard" T "$SCRIPT$" +STARTLINE}
addUserKnob {26 info l " " T "press ctrl+v in the nodegraph after clicking the above button"}
addUserKnob {20 Info}
addUserKnob {26 infotext l "" +STARTLINE T "2022 - Liam Collod<br> Visit <a style=\"color:#fefefe;\" href=\"https://github.com/MrLixm/Foundry_Nuke/tree/main/src/transforms/imageCropDivide\">the GitHub repo</a> "}
addUserKnob {26 "" +STARTLINE}
addUserKnob {26 versiontext l "" T "version $VERSION$"}
}
Input {
inputs 0
name Input1
xpos 0
}
Output {
name Output1
xpos 0
ypos 300
}
end_group
"""
MODULE_BUTTON_PATH = Path("..") / "button.py"
NODENK_PATH = Path("..") / "node.nk"
def increment_version():
this = Path(__file__)
this_code = this.read_text(encoding="utf-8")
version = re.search(r"VERSION\s*=\s*(\d+)", this_code)
assert version, f"Can't find <VERSION> in <{this}> !"
new_version = int(version.group(1)) + 1
new_code = f"VERSION = {new_version}"
new_code = this_code.replace(version.group(0), str(new_code))
this.write_text(new_code, encoding="utf-8")
print(f"[{__name__}][increment_version] Incremented {this} to {new_version}.")
return
def run():
increment_version()
btnscript = MODULE_BUTTON_PATH.read_text(encoding="utf-8")
# sanitize for nuke
btnscript = btnscript.replace("\\", r'\\')
btnscript = btnscript.split("\n")
btnscript = r"\n".join(btnscript)
btnscript = btnscript.replace("\"", r'\"')
btnscript = btnscript.replace("{", r'\{')
btnscript = btnscript.replace("}", r'\}')
node_content = BASE.replace("$SCRIPT$", btnscript)
node_content = node_content.replace("$VERSION$", str(VERSION+1))
NODENK_PATH.write_text(node_content, encoding="utf-8")
print(f"[{__name__}][run] node.nk file written to {NODENK_PATH}")
print(f"[{__name__}][run] Finished.")
return
if __name__ == '__main__':
# print(__file__)
run()
| 25.905263
| 213
| 0.683056
| 336
| 2,461
| 4.78869
| 0.410714
| 0.067122
| 0.029832
| 0.064636
| 0.12803
| 0.10317
| 0.048477
| 0.048477
| 0
| 0
| 0
| 0.029829
| 0.169037
| 2,461
| 94
| 214
| 26.180851
| 0.756968
| 0.017473
| 0
| 0.084507
| 0
| 0.028169
| 0.5467
| 0.022831
| 0
| 0
| 0.008302
| 0
| 0.014085
| 1
| 0.028169
| false
| 0
| 0.042254
| 0
| 0.098592
| 0.042254
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0cd5d7d340b27b3217620ef4b12a1391841820b
| 2,294
|
py
|
Python
|
workflow/tests/test_experiment_qc.py
|
JAMKuttan/chipseq_analysis
|
f8e4853bfdb4de8540026ae0b23235d72a1114ad
|
[
"MIT"
] | null | null | null |
workflow/tests/test_experiment_qc.py
|
JAMKuttan/chipseq_analysis
|
f8e4853bfdb4de8540026ae0b23235d72a1114ad
|
[
"MIT"
] | null | null | null |
workflow/tests/test_experiment_qc.py
|
JAMKuttan/chipseq_analysis
|
f8e4853bfdb4de8540026ae0b23235d72a1114ad
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import pytest
import os
import pandas as pd
from io import StringIO
import experiment_qc
test_output_path = os.path.dirname(os.path.abspath(__file__)) + \
'/../output/experimentQC/'
DESIGN_STRING = """sample_id\texperiment_id\tbiosample\tfactor\ttreatment\treplicate\tcontrol_id\tbam_reads
A_1\tA\tLiver\tH3K27ac\tNone\t1\tB_1\tA_1.bam
A_2\tA\tLiver\tH3K27ac\tNone\t2\tB_2\tA_2.bam
B_1\tB\tLiver\tInput\tNone\t1\tB_1\tB_1.bam
B_2\tB\tLiver\tInput\tNone\t2\tB_2\tB_2.bam
"""
@pytest.fixture
def design_bam():
design_file = StringIO(DESIGN_STRING)
design_df = pd.read_csv(design_file, sep="\t")
return design_df
@pytest.mark.unit
def test_check_update_controls(design_bam):
new_design = experiment_qc.update_controls(design_bam)
assert new_design.loc[0, 'control_reads'] == "B_1.bam"
@pytest.mark.singleend
def test_coverage_singleend():
assert os.path.exists(os.path.join(test_output_path, 'sample_mbs.npz'))
assert os.path.exists(os.path.join(test_output_path, 'coverage.pdf'))
@pytest.mark.singleend
def test_spearman_singleend():
assert os.path.exists(os.path.join(test_output_path, 'heatmap_SpearmanCorr.pdf'))
@pytest.mark.singleend
def test_pearson_singleend():
assert os.path.exists(os.path.join(test_output_path, 'heatmap_PearsonCorr.pdf'))
@pytest.mark.singleend
def test_fingerprint_singleend():
assert os.path.exists(os.path.join(test_output_path, 'ENCLB144FDT_fingerprint.pdf'))
assert os.path.exists(os.path.join(test_output_path, 'ENCLB831RUI_fingerprint.pdf'))
@pytest.mark.pairdend
def test_coverage_pairedend():
assert os.path.exists(os.path.join(test_output_path, 'sample_mbs.npz'))
assert os.path.exists(os.path.join(test_output_path, 'coverage.pdf'))
@pytest.mark.pairdend
def test_spearman_pairedend():
assert os.path.exists(os.path.join(test_output_path, 'heatmap_SpearmanCorr.pdf'))
@pytest.mark.pairdend
def test_pearson_pairedend():
assert os.path.exists(os.path.join(test_output_path, 'heatmap_PearsonCorr.pdf'))
@pytest.mark.pairdend
def test_fingerprint_pairedend():
assert os.path.exists(os.path.join(test_output_path, 'ENCLB568IYX_fingerprint.pdf'))
assert os.path.exists(os.path.join(test_output_path, 'ENCLB637LZP_fingerprint.pdf'))
| 30.586667
| 107
| 0.773322
| 353
| 2,294
| 4.764873
| 0.23796
| 0.092747
| 0.108205
| 0.128419
| 0.560642
| 0.545184
| 0.473246
| 0.473246
| 0.473246
| 0.473246
| 0
| 0.01784
| 0.095902
| 2,294
| 74
| 108
| 31
| 0.793153
| 0.009154
| 0
| 0.32
| 0
| 0
| 0.25044
| 0.215669
| 0
| 0
| 0
| 0
| 0.26
| 1
| 0.2
| false
| 0
| 0.1
| 0
| 0.32
| 0.12
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0d1e420d8a5ef2c04e4e14f531037003c9ed4f0
| 3,626
|
py
|
Python
|
native_client_sdk/src/build_tools/tests/test_generate_make.py
|
junmin-zhu/chromium-rivertrail
|
eb1a57aca71fe68d96e48af8998dcfbe45171ee1
|
[
"BSD-3-Clause"
] | 5
|
2018-03-10T13:08:42.000Z
|
2021-07-26T15:02:11.000Z
|
native_client_sdk/src/build_tools/tests/test_generate_make.py
|
quisquous/chromium
|
b25660e05cddc9d0c3053b3514f07037acc69a10
|
[
"BSD-3-Clause"
] | 1
|
2015-07-21T08:02:01.000Z
|
2015-07-21T08:02:01.000Z
|
native_client_sdk/src/build_tools/tests/test_generate_make.py
|
jianglong0156/chromium.src
|
d496dfeebb0f282468827654c2b3769b3378c087
|
[
"BSD-3-Clause"
] | 6
|
2016-11-14T10:13:35.000Z
|
2021-01-23T15:29:53.000Z
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import datetime
import os
import posixpath
import subprocess
import sys
import unittest
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
sys.path.append(BUILD_TOOLS_DIR)
import generate_make
BASIC_DESC = {
'TOOLS': ['newlib', 'glibc'],
'TARGETS': [
{
'NAME' : 'hello_world',
'TYPE' : 'main',
'SOURCES' : ['hello_world.c'],
},
],
'DEST' : 'examples'
}
class TestFunctions(unittest.TestCase):
def testPatsubst(self):
val = generate_make.GenPatsubst(32, 'FOO', 'cc', 'CXX')
gold = '$(patsubst %.cc,%_32.o,$(FOO_CXX))'
self.assertEqual(val, gold)
def testPatsubst(self):
val = generate_make.GenPatsubst(32, 'FOO', 'cc', 'CXX')
gold = '$(patsubst %.cc,%_32.o,$(FOO_CXX))'
self.assertEqual(val, gold)
def testSetVar(self):
val = generate_make.SetVar('FOO',[])
self.assertEqual(val, 'FOO:=\n')
val = generate_make.SetVar('FOO',['BAR'])
self.assertEqual(val, 'FOO:=BAR\n')
items = ['FOO_' + 'x' * (i % 13) for i in range(50)]
for i in range(10):
wrapped = generate_make.SetVar('BAR_' + 'x' * i, items)
lines = wrapped.split('\n')
for line in lines:
if len(line) > 79:
self.assertEqual(line, 'Less than 80 at ' + str(i))
class TestValidateFormat(unittest.TestCase):
def _append_result(self, msg):
self.result += msg
return self.result
def _validate(self, src, msg):
format = generate_make.DSC_FORMAT
self.result = ''
result = generate_make.ValidateFormat(src, format,
lambda msg: self._append_result(msg))
if msg:
self.assertEqual(self.result, msg)
else:
self.assertEqual(result, True)
def testGoodDesc(self):
testdesc = copy.deepcopy(BASIC_DESC)
self._validate(testdesc, None)
def testMissingKey(self):
testdesc = copy.deepcopy(BASIC_DESC)
del testdesc['TOOLS']
self._validate(testdesc, 'Missing required key TOOLS.')
testdesc = copy.deepcopy(BASIC_DESC)
del testdesc['TARGETS'][0]['NAME']
self._validate(testdesc, 'Missing required key NAME.')
def testNonEmpty(self):
testdesc = copy.deepcopy(BASIC_DESC)
testdesc['TOOLS'] = []
self._validate(testdesc, 'Expected non-empty value for TOOLS.')
testdesc = copy.deepcopy(BASIC_DESC)
testdesc['TARGETS'] = []
self._validate(testdesc, 'Expected non-empty value for TARGETS.')
testdesc = copy.deepcopy(BASIC_DESC)
testdesc['TARGETS'][0]['NAME'] = ''
self._validate(testdesc, 'Expected non-empty value for NAME.')
def testBadValue(self):
testdesc = copy.deepcopy(BASIC_DESC)
testdesc['TOOLS'] = ['newlib', 'glibc', 'badtool']
self._validate(testdesc, 'Value badtool not expected in TOOLS.')
def testExpectStr(self):
testdesc = copy.deepcopy(BASIC_DESC)
testdesc['TOOLS'] = ['newlib', True, 'glibc']
self._validate(testdesc, 'Value True not expected in TOOLS.')
def testExpectList(self):
testdesc = copy.deepcopy(BASIC_DESC)
testdesc['TOOLS'] = 'newlib'
self._validate(testdesc, 'Key TOOLS expects LIST not STR.')
# TODO(noelallen): Add test which generates a real make and runs it.
def main():
suite = unittest.defaultTestLoader.loadTestsFromModule(sys.modules[__name__])
result = unittest.TextTestRunner(verbosity=2).run(suite)
return int(not result.wasSuccessful())
if __name__ == '__main__':
sys.exit(main())
| 29.008
| 79
| 0.671539
| 461
| 3,626
| 5.145336
| 0.340564
| 0.037943
| 0.075885
| 0.094857
| 0.39629
| 0.356239
| 0.312816
| 0.226391
| 0.151349
| 0.085582
| 0
| 0.008457
| 0.184777
| 3,626
| 124
| 80
| 29.241935
| 0.793978
| 0.068395
| 0
| 0.180851
| 0
| 0
| 0.169582
| 0.013638
| 0
| 0
| 0
| 0.008065
| 0.074468
| 1
| 0.12766
| false
| 0
| 0.085106
| 0
| 0.255319
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0d317f2e8f8665da9e599f1dc02201ed251fea1
| 568
|
py
|
Python
|
Curso_em_Video_py3/ex069.py
|
Rodrigo98Matos/Projetos_py
|
6428e2c09d28fd8a717743f4434bc788e7d7d3cc
|
[
"MIT"
] | 1
|
2021-05-11T12:39:43.000Z
|
2021-05-11T12:39:43.000Z
|
Curso_em_Video_py3/ex069.py
|
Rodrigo98Matos/Projetos_py
|
6428e2c09d28fd8a717743f4434bc788e7d7d3cc
|
[
"MIT"
] | null | null | null |
Curso_em_Video_py3/ex069.py
|
Rodrigo98Matos/Projetos_py
|
6428e2c09d28fd8a717743f4434bc788e7d7d3cc
|
[
"MIT"
] | null | null | null |
a = b = c = 0
while True:
flag = ''
i = -1
s = ''
while i < 0:
i = int(input('idade:\t'))
while s != 'M' and s != 'F':
s = str(input('Sexo [M] [F]:\t')).strip().upper()[0]
if i > 18:
a += 1
if s == 'M':
b += 1
elif i < 20:
c += 1
while flag != 'S' and flag != 'N':
flag = str(input('Você quer cadastrar mais pessoas? [S] [N]\t')).strip().upper()[0]
if flag == 'N':
break
print(f'Tem {a} pessoas maior de 18 anos!\nTem {b} homens!\nTem {c} mulheres com menos de 20 anos!')
| 27.047619
| 100
| 0.452465
| 93
| 568
| 2.763441
| 0.451613
| 0.015564
| 0.085603
| 0.093385
| 0.108949
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042667
| 0.339789
| 568
| 20
| 101
| 28.4
| 0.642667
| 0
| 0
| 0
| 0
| 0.05
| 0.285211
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0d3bab4c52e7bb6548865457b438ad24de1affe
| 6,152
|
py
|
Python
|
hatfieldcmr/ingest/name.py
|
bcgov/nr_rfc_processing
|
7e414b97a29ed5bae8ba3c6decea39733be9a2db
|
[
"Apache-2.0"
] | null | null | null |
hatfieldcmr/ingest/name.py
|
bcgov/nr_rfc_processing
|
7e414b97a29ed5bae8ba3c6decea39733be9a2db
|
[
"Apache-2.0"
] | 6
|
2021-02-08T16:47:02.000Z
|
2022-01-30T21:58:18.000Z
|
hatfieldcmr/ingest/name.py
|
bcgov/rfc_processing
|
7e414b97a29ed5bae8ba3c6decea39733be9a2db
|
[
"Apache-2.0"
] | 2
|
2021-02-22T19:13:26.000Z
|
2021-05-03T23:58:56.000Z
|
"""
Contains data ingest related functions
"""
import re
import os.path
from dateutil.parser import parse as dateparser
import typing
from typing import Dict
import cmr
from hatfieldcmr.ingest.file_type import MODISBlobType
MODIS_NAME = "modis-terra"
TITLE_PATTERN_STRING = r"\w+:([\w]+\.[\w]+):\w+"
TITLE_PATTERN = re.compile(TITLE_PATTERN_STRING)
GRANULE_TITLE_KEY = 'title'
GRANULE_TIME_KEY = 'time_start'
GRANULE_NAME_KEY = 'producer_granule_id'
def format_object_name(meta: Dict, object_name: str) -> str:
"""
Parameters
----------
metas: Dict
Single Granule metadata JSON response from CMR
object_name: str
Name of object (ex. hdf file, xml file)
Returns
----------
str
Object name for granule.
If insufficient information is available, empty string is returned.
"""
default_value = ""
if meta is None:
return default_value
folder_prefix = ""
try:
folder_prefix = format_object_prefix(meta)
except ValueError:
return ''
os.makedirs(folder_prefix, exist_ok=True)
return f"{folder_prefix}/{object_name}"
def format_object_prefix(meta: Dict):
"""Helper function to generate 'folder prefix' of the bucket object
"""
if not ((GRANULE_TITLE_KEY in meta) and (GRANULE_TIME_KEY in meta) and
(GRANULE_NAME_KEY in meta)):
raise ValueError('granule does not have required keys', meta)
title = meta.get(GRANULE_TITLE_KEY, "")
m = TITLE_PATTERN.match(title)
if m is None:
raise ValueError('granule does not have well formated title', title)
product_name = m.groups()[0]
date_string = dateparser(meta.get("time_start")).strftime('%Y.%m.%d')
folder_prefix = format_object_prefix_helper(product_name, date_string)
# f"{MODIS_NAME}/{product_name}/{date_string}"
return folder_prefix
def format_object_prefix_helper(product_name: str, date_string: str):
return f"{MODIS_NAME}/{product_name}/{date_string}"
class BlobPathMetadata:
def __init__(self, product_name: str, date_string: str):
self.product_name = product_name
self.product_name_without_version = product_name[:7].lower()
self.date_string = date_string
self.date = dateparser(date_string)
@staticmethod
def parse(prefix_or_full_name: str):
parts = prefix_or_full_name.split(r'/')
if (len(parts) >= 3):
product_name = parts[1]
date_string = parts[2]
return BlobPathMetadata(product_name, date_string)
return None
class MODISFileNameParser:
THUMBNAIL_RE = re.compile(r"BROWSE\.([\w\.]+)\.\d+\.jpg")
@classmethod
def identify_file_type(cls, name: str):
basename = os.path.basename(name)
if ('BROWSE' in basename):
return MODISBlobType.THUMBNAIL
elif ('.hdf.xml' in basename):
return MODISBlobType.METADATA_XML
elif ('.hdf_meta.json' in basename):
return MODISBlobType.METADATA_JSON
elif ('.hdf' in basename):
return MODISBlobType.DATA_HDF
elif ('.tif.aux.xml' in basename):
return MODISBlobType.GEOTIFF_XML
elif ('.tif' in basename):
return MODISBlobType.GEOTIFF
else:
print(f'unknown file name {name}')
return ''
@classmethod
def extract_blob_id(cls, name: str, file_type: MODISBlobType = None):
if file_type is None:
file_type = cls.identify_file_type(name)
if file_type == MODISBlobType.THUMBNAIL:
return cls._extract_blob_id_thumbnail(name)
elif file_type == MODISBlobType.METADATA_XML:
return cls._extract_basename_from_file(name, '.hdf.xml')
elif file_type == MODISBlobType.METADATA_JSON:
return cls._extract_basename_from_file(name, '.hdf_meta.json')
elif file_type == MODISBlobType.DATA_HDF:
return cls._extract_basename_from_file(name, '.hdf')
elif file_type == MODISBlobType.GEOTIFF:
return cls._extract_basename_from_file(name, '.tif')
elif file_type == MODISBlobType.GEOTIFF_XML:
return cls._extract_basename_from_file(name, '.tif.aux.xml')
return ''
@classmethod
def _extract_blob_id_thumbnail(cls, name: str) -> str:
basename = os.path.basename(name)
m = cls.THUMBNAIL_RE.match(basename)
if m is None:
return ''
blob_id = m.groups()[0]
name_includes_dir = len(name.split(r'/')) >= 4
if (name_includes_dir):
product_name_doesnt_match_blob_prefix = cls._check_thumbnail_product_inconsistency(
name, blob_id)
if (product_name_doesnt_match_blob_prefix):
blob_id = cls._fix_thumbnail_product_name_inconsistency(
name, blob_id)
return blob_id
@classmethod
def _check_thumbnail_product_inconsistency(cls, name: str, blob_id: str):
full_name_product_name, blob_id_product_name = cls._extract_product_names(
name, blob_id)
return full_name_product_name != blob_id_product_name
@classmethod
def _fix_thumbnail_product_name_inconsistency(cls, name: str,
blob_id: str):
full_name_product_name, blob_id_product_name = cls._extract_product_names(
name, blob_id)
return blob_id.replace(blob_id_product_name, full_name_product_name)
@classmethod
def _extract_product_names(cls, name: str, blob_id: str):
product_name_with_version = name.split(r'/')[1]
full_name_product_name = product_name_with_version[:7]
blob_id_product_name = blob_id[:7]
return full_name_product_name, blob_id_product_name
@classmethod
def _extract_basename_from_file(cls, name: str, extension: str) -> str:
basename = os.path.basename(name).strip()
extension_len = len(extension)
if (len(basename) > extension_len
and basename[-extension_len:] == extension):
return basename[:-extension_len]
return ''
| 34.561798
| 95
| 0.658973
| 769
| 6,152
| 4.953186
| 0.191157
| 0.086637
| 0.03938
| 0.045681
| 0.399842
| 0.270412
| 0.169336
| 0.136256
| 0.083486
| 0.083486
| 0
| 0.002153
| 0.245124
| 6,152
| 177
| 96
| 34.757062
| 0.818045
| 0.069083
| 0
| 0.170543
| 0
| 0
| 0.066266
| 0.021028
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093023
| false
| 0
| 0.054264
| 0.007752
| 0.387597
| 0.007752
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0d42b9fa731f071b00e48d88b5dd1b3baf8c28b
| 8,661
|
py
|
Python
|
tests/test_lib.py
|
bluefloyd00/snowflet
|
a1676158bffc5f44970845b054d1ad221e9540c7
|
[
"MIT"
] | 1
|
2020-06-23T14:14:48.000Z
|
2020-06-23T14:14:48.000Z
|
tests/test_lib.py
|
bluefloyd00/snowflet
|
a1676158bffc5f44970845b054d1ad221e9540c7
|
[
"MIT"
] | 2
|
2020-06-19T15:05:05.000Z
|
2020-06-19T15:07:22.000Z
|
tests/test_lib.py
|
bluefloyd00/snowflet
|
a1676158bffc5f44970845b054d1ad221e9540c7
|
[
"MIT"
] | null | null | null |
import os
import unittest
from snowflet.lib import read_sql
from snowflet.lib import logging_config
from snowflet.lib import extract_args
from snowflet.lib import apply_kwargs
from snowflet.lib import strip_table
from snowflet.lib import extract_tables_from_query
from snowflet.lib import add_database_id_prefix
from snowflet.lib import is_table
from snowflet.lib import add_table_prefix_to_sql
class StringFunctions(unittest.TestCase):
""" Test """
def test_strip_table(self):
""" Test """
self.assertEqual(
strip_table(table_name='"db"."schema"."table"'),
'"db.schema.table"',
"strip_table: wrong table name"
)
def test_extract_tables_from_query(self):
""" Test """
self.assertEqual(
extract_tables_from_query(sql_query=""" select a,b,c from "db"."schema"."table" and db.schema.table not "schema"."table" """),
[ '"db"."schema"."table"', 'db.schema.table' ],
"does not extract the tables properly"
)
class TableFunctions(unittest.TestCase):
""" Test """
def test_is_table(self):
self.assertTrue(
is_table( word='"db"."test"."table1"' ,sql=""" select a.* from "db"."test"."table1" a left join db.test.table2 b on a.id=b.id left join db."test".table3 c on b.id = c.id """),
"select: ok"
)
self.assertTrue(
is_table( word='"db"."test"."table4"' ,sql=""" create table "db"."test"."table4" as select a.* from "db"."test"."table1" a left join db.test.table2 b on a.id=b.id left join db."test".table3 c on b.id = c.id """),
"create - select: ok"
)
def test_add_table_prefix_to_sql(self):
self.assertEqual(
add_table_prefix_to_sql(
sql=""" select a.* from "db1"."test"."table1" a left join db2.test.table2 b on a.id=b.id left join db3."test".table3 c on b.id = c.id """,
prefix="CLONE_1003"
),
""" select a.* from "CLONE_1003_DB1"."TEST"."TABLE1" a left join "CLONE_1003_DB2".TEST.TABLE2 b on a.id=b.id left join "CLONE_1003_DB3"."TEST".TABLE3 c on b.id = c.id """,
"add_table_prefix_to_sql: ok"
)
# def test_extract_tables(self):
# self.assertEqual(
# extract_tables(""" select a.* from "db"."test"."table1" and db.test.table2 and db."test".table3 """),
# ["db.test.table1", "db.test.table2", "db.test.table3"],
# "multiple tables, mix double quotes and not"
# )
# self.assertEqual(
# extract_tables(""" select a.* from "db"."test"."table1" and db.test.table2 and db."test".table1 """),
# ["db.test.table1", "db.test.table2"],
# "returned unique values"
# )
class ReadSql(unittest.TestCase):
""" Test """
def test_class_read_sql_file(self):
""" Test """
sql = read_sql(
file="tests/sql/read_sql.sql",
param1="type",
param2="300",
param3="shipped_date",
param4='trying'
)
# self.assertEqual(
# sql,
# 'select type, shipped_date from "DB_TEST"."SCHEMA_TEST"."TABLE1" where amount > 300',
# "read_sql unit test"
# )
sql = read_sql(
file="tests/sql/read_sql.sql"
)
self.assertTrue(
sql == 'select {param1}, {param3} from "DB_TEST"."SCHEMA_TEST"."TABLE1" where amount > {param2}',
"read_sql file unit test no opt parameters"
)
with self.assertRaises(KeyError):
read_sql(
file="tests/sql/read_sql.sql",
database_id='something'
)
def test_class_read_sql_query(self):
""" Test """
sql = read_sql(
query='select {param1}, {param3} from "db_test"."schema_test"."table1" where amount > {param2}',
param1="type",
param2="300",
param3="shipped_date",
param4='trying'
)
self.assertEqual(
sql,
'select type, shipped_date from "DB_TEST"."SCHEMA_TEST"."TABLE1" where amount > 300',
"read_sql unit test"
)
sql = read_sql(
file="tests/sql/read_sql.sql"
)
self.assertTrue(
sql == 'select {param1}, {param3} from "DB_TEST"."SCHEMA_TEST"."TABLE1" where amount > {param2}',
"read_sql query unit test no opt parameters"
)
with self.assertRaises(KeyError):
read_sql(
file="tests/sql/read_sql.sql",
database_id='something'
)
class FunctionsInLib(unittest.TestCase):
"""
Unittest class for lib functions
"""
def test_extract_args_1_param(self):
content = [
{
"table_desc": "table1",
"create_table": {
"table_id": "table1",
"dataset_id": "test",
"file": "tests/sql/table1.sql"
},
"pk": ["col1", "col2"],
"mock_data": "sql/table1_mocked.sql"
},
{
"table_desc": "table2",
"create_table": {
"table_id": "table2",
"dataset_id": "test",
"file": "tests/sql/table2.sql"
},
"pk": ["col1"],
"mock_data": "sql/table1_mocked.sql"
}
]
self.assertEqual(
extract_args(content, "pk"),
[["col1", "col2"], ["col1"]],
"extracted ok"
)
self.assertEqual(
extract_args(content, "create_table"),
[
{
"table_id": "table1",
"dataset_id": "test",
"file": "tests/sql/table1.sql"
},
{
"table_id": "table2",
"dataset_id": "test",
"file": "tests/sql/table2.sql"
}
],
"extracted ok"
)
def test_add_database_id_prefix(self):
self.yaml = {
"desc": "test",
"tables":
[
{
"table_desc": "table1",
"create_table": {
"table_id": "table1",
"database_id": "test",
},
},
{
"table_desc": "table2",
"create_table": {
"table_id": "table2",
"database_id": "test",
},
}
]
}
add_database_id_prefix(
self.yaml,
prefix='1234'
)
self.assertEqual(
self.yaml
,
{
"desc": "test",
"tables":
[
{
"table_desc": "table1",
"create_table": {
"table_id": "table1",
"database_id": "1234_test",
},
},
{
"table_desc": "table2",
"create_table": {
"table_id": "table2",
"database_id": "1234_test",
},
}
]
},
"prefix properly added to database"
)
if __name__ == "__main__":
logging_config()
unittest.main()
| 37.012821
| 225
| 0.419467
| 779
| 8,661
| 4.467266
| 0.14249
| 0.043103
| 0.038793
| 0.05431
| 0.702874
| 0.586782
| 0.514943
| 0.497126
| 0.485345
| 0.472701
| 0
| 0.02601
| 0.46288
| 8,661
| 233
| 226
| 37.171674
| 0.722055
| 0.083593
| 0
| 0.406417
| 0
| 0.037433
| 0.268426
| 0.050435
| 0
| 0
| 0
| 0
| 0.069519
| 1
| 0.042781
| false
| 0
| 0.058824
| 0
| 0.122995
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0d718d76a48ce7c669cb050436654bea3cdd296
| 2,891
|
py
|
Python
|
annotation/black_action/fu.py
|
windfall-shogi/feature-annotation
|
83ff7c3fa31e542221cf45186b2ea3ef2a10310f
|
[
"MIT"
] | null | null | null |
annotation/black_action/fu.py
|
windfall-shogi/feature-annotation
|
83ff7c3fa31e542221cf45186b2ea3ef2a10310f
|
[
"MIT"
] | null | null | null |
annotation/black_action/fu.py
|
windfall-shogi/feature-annotation
|
83ff7c3fa31e542221cf45186b2ea3ef2a10310f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sonnet as snt
import tensorflow as tf
from .drop_mask import make_drop_mask1
from .promotion_mask import make_promotion_mask
from ..boolean_board.black import select_black_fu_board, select_non_black_board
from ..boolean_board.empty import select_empty_board
from ..direction import Direction
from ..piece import Piece
__author__ = 'Yasuhiro'
__date__ = '2018/2/22'
class BlackFuFileLayer(snt.AbstractModule):
def __init__(self, data_format, name='black_fu_file'):
super().__init__(name=name)
self.data_format = data_format
def _build(self, board):
fu_board = select_black_fu_board(board=board)
axis = -1 if self.data_format == 'NCHW' else -2
flag = tf.reduce_any(fu_board, axis=axis, keep_dims=True)
flag = tf.logical_not(flag)
repeat_count = [1, 1, 1, 1]
repeat_count[axis] = 9
available_map = tf.tile(flag, repeat_count)
return available_map
class BlackFuDropLayer(snt.AbstractModule):
def __init__(self, data_format, name='black_fu_drop'):
super().__init__(name=name)
self.data_format = data_format
def _build(self, board, black_hand, available_square):
fu_available_file = BlackFuFileLayer(
data_format=self.data_format
)(board)
fu_available_area = make_drop_mask1(data_format=self.data_format)
empty_square = select_empty_board(board=board)
available = tf.logical_and(
# FUを置ける筋、2~9段
tf.logical_and(fu_available_file, fu_available_area),
tf.logical_and(
# 空いているマス
empty_square,
# 持ち駒があるかどうか
tf.reshape(
tf.greater_equal(black_hand[:, Piece.BLACK_FU], 1),
[-1, 1, 1, 1]
)
)
)
# 王手の時に有効かどうか
available = tf.logical_and(available, available_square)
return available
class BlackFuMoveLayer(snt.AbstractModule):
def __init__(self, data_format, name='black_fu_move'):
super().__init__(name=name)
self.data_format = data_format
def _build(self, board, fu_effect):
non_black_mask = select_non_black_board(board=board)
movable_effect = tf.logical_and(fu_effect[Direction.UP],
non_black_mask)
available_mask = make_drop_mask1(data_format=self.data_format)
non_promoting_effect = {
Direction.UP: tf.logical_and(movable_effect, available_mask)
}
promotion_mask = make_promotion_mask(
direction=Direction.UP, data_format=self.data_format, step_size=1
)
promoting_effect = {
Direction.UP: tf.logical_and(movable_effect, promotion_mask)
}
return non_promoting_effect, promoting_effect
| 31.769231
| 79
| 0.648219
| 353
| 2,891
| 4.906516
| 0.246459
| 0.103926
| 0.088915
| 0.04157
| 0.316975
| 0.289261
| 0.289261
| 0.289261
| 0.246536
| 0.187644
| 0
| 0.012224
| 0.264268
| 2,891
| 90
| 80
| 32.122222
| 0.802069
| 0.029747
| 0
| 0.09375
| 0
| 0
| 0.021436
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0
| 0.125
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0dc3d97cf9bff141a470ed5055719904a5f9f4c
| 2,492
|
py
|
Python
|
src/commands/refactor/convert_to_arrow_function.py
|
PranjalPansuriya/JavaScriptEnhancements
|
14af4162e86585153cbd4614ad96dff64a0d3192
|
[
"MIT"
] | 690
|
2017-04-11T06:45:01.000Z
|
2022-03-21T23:20:29.000Z
|
src/commands/refactor/convert_to_arrow_function.py
|
PranjalPansuriya/JavaScriptEnhancements
|
14af4162e86585153cbd4614ad96dff64a0d3192
|
[
"MIT"
] | 74
|
2017-11-22T18:05:26.000Z
|
2021-05-05T16:25:31.000Z
|
src/commands/refactor/convert_to_arrow_function.py
|
PranjalPansuriya/JavaScriptEnhancements
|
14af4162e86585153cbd4614ad96dff64a0d3192
|
[
"MIT"
] | 42
|
2017-04-13T10:22:40.000Z
|
2021-05-27T19:19:04.000Z
|
import sublime, sublime_plugin
import os, traceback
from ...libs import util
from ...libs import FlowCLI
class JavascriptEnhancementsRefactorConvertToArrowFunctionCommand(sublime_plugin.TextCommand):
def run(self, edit, **args):
view = self.view
selection = view.sel()[0]
flow_cli = FlowCLI(view)
result = flow_cli.ast()
if result[0]:
body = result[1]["body"]
items = util.nested_lookup("type", ["FunctionExpression"], body)
for item in items:
region = sublime.Region(int(item["range"][0]), int(item["range"][1]))
if region.contains(selection):
text = view.substr(region)
if not text.startswith("function"):
return
index_begin_parameter = 8
text = text[index_begin_parameter:].lstrip()
while text[0] != "(" and len(text) > 0:
text = text[1:].lstrip()
block_statement_region = sublime.Region(int(item["body"]["range"][0]), int(item["body"]["range"][1]))
block_statement = view.substr(block_statement_region)
index = text.index(block_statement)
while text[index - 1] == " " and index - 1 >= 0:
text = text[0:index - 1] + text[index:]
index = index - 1
text = text[0:index] + " => " + text[index:]
view.replace(edit, region, text)
break
else:
sublime.error_message("Cannot convert the function. Some problems occured.")
def is_enabled(self, **args) :
view = self.view
if not util.selection_in_js_scope(view) :
return False
selection = view.sel()[0]
scope = view.scope_name(selection.begin()).strip()
if "meta.block.js" in scope:
region_scope = util.get_region_scope_last_match(view, scope, selection, "meta.block.js")
else:
region_scope = util.get_region_scope_last_match(view, scope, selection, "meta.group.braces.curly.js")
if not region_scope:
return False
return True
def is_visible(self, **args) :
view = self.view
if not util.selection_in_js_scope(view) :
return False
selection = view.sel()[0]
scope = view.scope_name(selection.begin()).strip()
if "meta.block.js" in scope:
region_scope = util.get_region_scope_last_match(view, scope, selection, "meta.block.js")
else:
region_scope = util.get_region_scope_last_match(view, scope, selection, "meta.group.braces.curly.js")
if not region_scope:
return False
return True
| 31.544304
| 111
| 0.634831
| 320
| 2,492
| 4.79375
| 0.259375
| 0.071708
| 0.028683
| 0.046936
| 0.453716
| 0.419817
| 0.419817
| 0.419817
| 0.419817
| 0.419817
| 0
| 0.01051
| 0.236356
| 2,492
| 79
| 112
| 31.544304
| 0.795586
| 0
| 0
| 0.45
| 0
| 0
| 0.08945
| 0.020858
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.066667
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0dde640a5c1e4f5414f4bf02dfa8a2f03ee959e
| 33,374
|
py
|
Python
|
apps/xdmac/xdmac_memory_transfer/firmware/at91bootstrap_sam_a5d2_xult.X/scripts/mpconfig/mpconfig.py
|
Techdisc/Microchip
|
ea8391c689c4badbe2f9ac5181e21bbd5d9d1e54
|
[
"0BSD"
] | 82
|
2015-02-05T10:29:59.000Z
|
2022-03-09T22:13:56.000Z
|
apps/xdmac/xdmac_memory_transfer/firmware/at91bootstrap_sam_a5d2_xult.X/scripts/mpconfig/mpconfig.py
|
Techdisc/Microchip
|
ea8391c689c4badbe2f9ac5181e21bbd5d9d1e54
|
[
"0BSD"
] | 128
|
2015-01-05T00:56:17.000Z
|
2022-03-03T19:06:11.000Z
|
apps/xdmac/xdmac_memory_transfer/firmware/at91bootstrap_sam_a5d2_xult.X/scripts/mpconfig/mpconfig.py
|
Techdisc/Microchip
|
ea8391c689c4badbe2f9ac5181e21bbd5d9d1e54
|
[
"0BSD"
] | 219
|
2015-01-01T11:27:14.000Z
|
2022-03-25T08:33:54.000Z
|
import os
import sys
import base64
import fnmatch
from kconfiglib import Kconfig, expr_value, Symbol, Choice, MENU, COMMENT, BOOL, STRING, INT, HEX
from java.awt import BorderLayout, Dimension, FlowLayout
from java.awt.event import ActionListener, MouseEvent
from javax.swing import BorderFactory, BoxLayout, ImageIcon, JButton, JCheckBox, JFileChooser, JFrame, JLabel, JPanel, JRadioButton, JScrollPane, JSplitPane, JTextArea, JTextField, JTree
from javax.swing.event import ChangeEvent, DocumentListener, TreeExpansionListener, TreeSelectionListener, CellEditorListener
from javax.swing.tree import DefaultTreeModel, DefaultMutableTreeNode, DefaultTreeCellRenderer, TreeCellEditor, TreePath
from events import addActionListener
# For icons in code
from org.python.core.util import StringUtil
if 'knodeinfo' in sys.modules:
del sys.modules["knodeinfo"]
from knodeinfo import getNodeInfoString, getNodeName, setKConfig
class PrintLogger():
def info(self, log_string):
print(log_string)
log = PrintLogger()
# If True, use GIF image data embedded in this file instead of separate GIF
# files. See _load_images().
_USE_EMBEDDED_IMAGES = True
def _load_images():
# Loads GIF images, creating the global _*_img ImageIcon variables.
# Base64-encoded images embedded in this script are used if
# _USE_EMBEDDED_IMAGES is True, and separate image files in the same
# directory as the script otherwise.
#
# Using a global variable indirectly prevents the image from being
# garbage-collected. Passing an image to a Tkinter function isn't enough to
# keep it alive.
def load_image(name, data):
var_name = "_{}_img".format(name)
if _USE_EMBEDDED_IMAGES:
globals()[var_name] = ImageIcon(StringUtil.toBytes(base64.b64decode(data)))
else:
globals()[var_name] = ImageIcon(
file=os.path.join(os.path.dirname(__file__), name + ".gif"))
# Note: Base64 data can be put on the clipboard with
# $ base64 -w0 foo.gif | xclip
load_image("icon", "R0lGODlhMAAwAPEDAAAAAADQAO7u7v///yH5BAUKAAMALAAAAAAwADAAAAL/nI+gy+2Pokyv2jazuZxryQjiSJZmyXxHeLbumH6sEATvW8OLNtf5bfLZRLFITzgEipDJ4mYxYv6A0ubuqYhWk66tVTE4enHer7jcKvt0LLUw6P45lvEprT6c0+v7OBuqhYdHohcoqIbSAHc4ljhDwrh1UlgSydRCWWlp5wiYZvmSuSh4IzrqV6p4cwhkCsmY+nhK6uJ6t1mrOhuJqfu6+WYiCiwl7HtLjNSZZZis/MeM7NY3TaRKS40ooDeoiVqIultsrav92bi9c3a5KkkOsOJZpSS99m4k/0zPng4Gks9JSbB+8DIcoQfnjwpZCHv5W+ip4aQrKrB0uOikYhiMCBw1/uPoQUMBADs=")
load_image("n_bool", "R0lGODdhEAAQAPAAAAgICP///ywAAAAAEAAQAAACIISPacHtvp5kcb5qG85hZ2+BkyiRF8BBaEqtrKkqslEAADs=")
load_image("y_bool", "R0lGODdhEAAQAPEAAAgICADQAP///wAAACwAAAAAEAAQAAACMoSPacLtvlh4YrIYsst2cV19AvaVF9CUXBNJJoum7ymrsKuCnhiupIWjSSjAFuWhSCIKADs=")
load_image("n_tri", "R0lGODlhEAAQAPD/AAEBAf///yH5BAUKAAIALAAAAAAQABAAAAInlI+pBrAKQnCPSUlXvFhznlkfeGwjKZhnJ65h6nrfi6h0st2QXikFADs=")
load_image("m_tri", "R0lGODlhEAAQAPEDAAEBAeQMuv///wAAACH5BAUKAAMALAAAAAAQABAAAAI5nI+pBrAWAhPCjYhiAJQCnWmdoElHGVBoiK5M21ofXFpXRIrgiecqxkuNciZIhNOZFRNI24PhfEoLADs=")
load_image("y_tri", "R0lGODlhEAAQAPEDAAICAgDQAP///wAAACH5BAUKAAMALAAAAAAQABAAAAI0nI+pBrAYBhDCRRUypfmergmgZ4xjMpmaw2zmxk7cCB+pWiVqp4MzDwn9FhGZ5WFjIZeGAgA7")
load_image("m_my", "R0lGODlhEAAQAPEDAAAAAOQMuv///wAAACH5BAUKAAMALAAAAAAQABAAAAI5nIGpxiAPI2ghxFinq/ZygQhc94zgZopmOLYf67anGr+oZdp02emfV5n9MEHN5QhqICETxkABbQ4KADs=")
load_image("y_my", "R0lGODlhEAAQAPH/AAAAAADQAAPRA////yH5BAUKAAQALAAAAAAQABAAAAM+SArcrhCMSSuIM9Q8rxxBWIXawIBkmWonupLd565Um9G1PIs59fKmzw8WnAlusBYR2SEIN6DmAmqBLBxYSAIAOw==")
load_image("n_locked", "R0lGODlhEAAQAPABAAAAAP///yH5BAUKAAEALAAAAAAQABAAAAIgjB8AyKwN04pu0vMutpqqz4Hih4ydlnUpyl2r23pxUAAAOw==")
load_image("m_locked", "R0lGODlhEAAQAPD/AAAAAOQMuiH5BAUKAAIALAAAAAAQABAAAAIylC8AyKwN04ohnGcqqlZmfXDWI26iInZoyiore05walolV39ftxsYHgL9QBBMBGFEFAAAOw==")
load_image("y_locked", "R0lGODlhEAAQAPD/AAAAAADQACH5BAUKAAIALAAAAAAQABAAAAIylC8AyKzNgnlCtoDTwvZwrHydIYpQmR3KWq4uK74IOnp0HQPmnD3cOVlUIAgKsShkFAAAOw==")
load_image("not_selected", "R0lGODlhEAAQAPD/AAAAAP///yH5BAUKAAIALAAAAAAQABAAAAIrlA2px6IBw2IpWglOvTYhzmUbGD3kNZ5QqrKn2YrqigCxZoMelU6No9gdCgA7")
load_image("selected", "R0lGODlhEAAQAPD/AAAAAP///yH5BAUKAAIALAAAAAAQABAAAAIzlA2px6IBw2IpWglOvTah/kTZhimASJomiqonlLov1qptHTsgKSEzh9H8QI0QzNPwmRoFADs=")
load_image("edit", "R0lGODlhEAAQAPIFAAAAAKOLAMuuEPvXCvrxvgAAAAAAAAAAACH5BAUKAAUALAAAAAAQABAAAANCWLqw/gqMBp8cszJxcwVC2FEOEIAi5kVBi3IqWZhuCGMyfdpj2e4pnK+WAshmvxeAcETWlsxPkkBtsqBMa8TIBSQAADs=")
class NodeType():
"""Used to determine what GUI control to use in the visual tree."""
_unknown = 0
_radio = 1
_bool = 2
_tri = 3
_text = 4
_menu = 5
_comment = 6
nodeType = _unknown
def __init__(self, t):
self.nodeType = t
def isType(self, t_list):
return self.nodeType in t_list
def getType(self):
return self.nodeType
class TreeNodeData(object):
"""These are the data objects that goes into the tree data model."""
def __init__ (self, node, tree):
"""Create a TreeNodeData object
Parameters
----------
node : Kconfig.MenuNode
The Kconfiglib node object that this tree node visualizes.
tree : KConfigTree
The tree this node object belongs to. Needed for sending events to the tree.
"""
self.knode = node
self.tree = tree
self.expanded = False
def getNodeType(self):
"""Returns the node type"""
item = self.knode.item
if item == MENU:
return NodeType(NodeType._menu)
if item == COMMENT:
return NodeType(NodeType._comment)
if not item.orig_type:
return NodeType(NodeType._unknown)
if item.orig_type in (STRING, INT, HEX):
return NodeType(NodeType._text)
# BOOL or TRISTATE
if isinstance(item, Symbol) and item.choice:
# Choice symbol in y-mode choice
return NodeType(NodeType._radio)
if len(item.assignable) <= 1:
# Pinned to a single value
if isinstance(item, Choice):
return NodeType(NodeType._menu)
if item.type == BOOL:
return NodeType(NodeType._bool)
if item.assignable == (1, 2):
return NodeType(NodeType._tri)
return NodeType(NodeType._tri)
def getText(self):
"""Return the text to display on the tree node"""
if self.knode and self.knode.prompt:
return self.knode.prompt[0]
return getNodeName(self.knode).strip()
def getValue(self):
"""Returns a string-type value, used for STRING, INT, HEX node types."""
if self.knode.item == MENU or self.knode.item == COMMENT:
return None
return self.knode.item.str_value
def getTriValue(self):
"""Returns a boolean or tristate value. A bool checkbox has the values 0 and 2,
while a tristate has 0, 1 and 2. 0 = False/N, 1 = Module/M, 2 = True/Y"""
if self.knode.item == MENU or self.knode.item == COMMENT:
return None
# log.info(self.getText(), str(self.knode.item.tri_value)))
return self.knode.item.tri_value
def setValue(self, val):
"""Set a string value. Can be a text string, or an integer (or hex) encoded as a string."""
# log.info("TreeNodeData.setValue " + self.getText() + " " + str(val) + " was " + self.getValue())
self.knode.item.set_value(val)
self.tree.updateTree()
def setTriValue(self, n):
"""Set a tristate or bool value. 0 = False/N, 1 = Module/M, 2 = True/Y"""
# log.info("TreeNodeData.setTriValue", self.getText(), n)
self.knode.item.set_value(n)
self.tree.updateTree()
def getVisible(self):
"""Return the visibility state of the node."""
return TreeNodeData.isVisible(self.knode)
@staticmethod
def isVisible(node):
"""Return the visibility state of the node passed as an argument."""
return node.prompt and expr_value(node.prompt[1]) and not \
(node.item == MENU and not expr_value(node.visibility))
def isExpanded(self):
return self.expanded
def setExpanded(self, expanded):
self.expanded = expanded
def search(self, searchString, invisibleMatch):
"""Search all text related to this node for searchString.
If it matches, it will tag the node as a search match.
If invisibleMatch = False and the node is not visible, the search match will be False.
The search match result (bool) is returned."""
if self.getVisible() > 0 or invisibleMatch:
infoText = self.getText()
searchString = "*" + searchString + "*"
self.searchMatch = fnmatch.fnmatch(infoText.lower(), searchString.lower())
else:
self.searchMatch = False
return self.searchMatch
def setSearchMatch(self, match):
"""Tags the node with a search match"""
self.searchMatch = match
def isSearchMatch(self):
return self.searchMatch
def toString(self):
return self.getText() + " = " + str(self.getValue())
class TristateCheckBox(JCheckBox):
"""Custom tristate checkbox implementation."""
serialVersionUID = 1
triState = 0
_load_images()
selected = _y_tri_img
unselected = _n_tri_img
halfselected = _m_tri_img
def __init__(self, eventHandler = None):
"""Creates a TristateCheckBox object
Arguments
---------
eventHandler : ActionListener
If supplied, the event handler will be called when
the tristate checkbox state changes.
"""
JCheckBox.__init__(self)
if eventHandler:
addActionListener(self, eventHandler)
addActionListener(self, self.actionPerformed)
def paint(self, g):
"""Called when the tree needs to paint the checkbox icon."""
if self.triState == 2:
self.setIcon(self.selected)
elif self.triState == 1:
self.setIcon(self.halfselected)
else:
self.setIcon(self.unselected)
JCheckBox.paint(self, g)
def getTriState(self):
"""Return the tristate value (0, 1 or 2)."""
return self.triState
def setTriState(self, tri):
"""Set tristate value (0, 1 or 2)."""
self.triState = tri
def actionPerformed(self, e):
"""Increments the checkbox value when clicked"""
# log.info("actionPerformed()")
tcb = e.getSource()
newVal = (tcb.getTriState() + 1) % 3
tcb.setTriState(newVal)
class CustomCellRenderer(DefaultTreeCellRenderer):
"""Renders the various tree controls (checkbox, tristate checkbox, string values etc.)"""
def __init__(self):
DefaultTreeCellRenderer.__init__(self)
flowLayout = FlowLayout(FlowLayout.LEFT, 0, 0)
self.cbPanel = JPanel(flowLayout)
self.cb = JCheckBox()
self.cb.setBackground(None)
self.cbPanel.add(self.cb)
self.cbLabel = JLabel()
self.cbPanel.add(self.cbLabel)
self.tcbPanel = JPanel(flowLayout)
self.tcb = TristateCheckBox()
self.tcb.setBackground(None)
self.tcbPanel.add(self.tcb)
self.tcbLabel = JLabel()
self.tcbPanel.add(self.tcbLabel)
self.rbPanel = JPanel(flowLayout)
self.rb = JRadioButton()
self.rb.setBackground(None)
self.rbPanel.add(self.rb)
self.rbLabel = JLabel()
self.rbPanel.add(self.rbLabel)
def getTreeCellRendererComponent(self, tree, value, selected, expanded, leaf, row, hasFocus):
"""Return a swing control appropriate for the node type of the supplied value"""
if isinstance(value, DefaultMutableTreeNode):
nodeData = value.getUserObject()
if isinstance(nodeData, TreeNodeData):
t = nodeData.getNodeType()
isEnabled = nodeData.getVisible() > 0
# Boolean checkbox
if t.isType([NodeType._bool]):
self.cbLabel.setText(nodeData.getText())
self.cb.setEnabled(isEnabled)
self.cbLabel.setEnabled(isEnabled)
if nodeData.getTriValue() == 0:
self.cb.setSelected(False)
else:
self.cb.setSelected(True)
control = self.cbPanel
# Tristate chekcbox
elif t.isType([NodeType._tri]):
control = self.tcbPanel
self.tcbLabel.setText(nodeData.getText())
self.tcb.setEnabled(isEnabled)
self.tcbLabel.setEnabled(isEnabled)
self.tcb.setTriState(nodeData.getTriValue())
# Radio button
elif t.isType([NodeType._radio]):
self.rbLabel.setText(nodeData.getText())
self.rb.setEnabled(isEnabled)
self.rbLabel.setEnabled(isEnabled)
if nodeData.getTriValue() == 0:
self.rb.setSelected(False)
else:
self.rb.setSelected(True)
control = self.rbPanel
# Text field
elif t.isType([NodeType._text]):
control = DefaultTreeCellRenderer.getTreeCellRendererComponent(self, tree, value, selected, expanded, leaf, row, hasFocus)
control.setText(nodeData.getText() + ": " + str(nodeData.getValue()))
# Default tree cell (a node with an icon and a label)
else:
control = DefaultTreeCellRenderer.getTreeCellRendererComponent(self, tree, value, selected, expanded, leaf, row, hasFocus)
control.setText(nodeData.getText())
self.setColors(control, nodeData, selected) # Background color for the tree item
# log.info("getTreeCellRendererComponent", t.getType(), isEnabled, "'" + nodeData.getText() + "'")
control.setEnabled(isEnabled)
return control
# log.info("Warning: getTreeCellRendererComponent() fallthrough", nodeData)
return DefaultTreeCellRenderer.getTreeCellRendererComponent(self, tree, value, selected, expanded, leaf, row, hasFocus)
def setColors(self, control, data, selected):
"""Set background color fot the tree item."""
if selected:
control.setForeground(self.getTextSelectionColor())
control.setBackground(self.getBackgroundSelectionColor())
else:
control.setForeground(self.getTextNonSelectionColor())
control.setBackground(self.getBackgroundNonSelectionColor())
class CustomCellEditor(TreeCellEditor, ActionListener):
"""Renders the various tree edit controls (checkbox, tristate checkbox, text box etc.)"""
def __init__(self, tree):
TreeCellEditor.__init__(self)
self.editor = None
self.tree = tree
flowLayout = FlowLayout(FlowLayout.LEFT, 0, 0)
self.cbPanel = JPanel(flowLayout)
self.cb = JCheckBox(actionPerformed = self.checked)
self.cbPanel.add(self.cb)
self.cbLabel = JLabel()
self.cbPanel.add(self.cbLabel)
self.tcbPanel = JPanel(flowLayout)
self.tcb = TristateCheckBox(self.checked)
self.tcbPanel.add(self.tcb)
self.tcbLabel = JLabel()
self.tcbPanel.add(self.tcbLabel)
self.rbPanel = JPanel(flowLayout)
self.rb = JRadioButton(actionPerformed = self.checked)
self.rbPanel.add(self.rb)
self.rbLabel = JLabel()
self.rbPanel.add(self.rbLabel)
self.tfPanel = JPanel(flowLayout)
self.tfLabel = JLabel()
self.tfPanel.add(self.tfLabel)
self.tf = JTextField()
self.tf.setColumns(12)
self.tf.addActionListener(self)
self.tfPanel.add(self.tf)
def addCellEditorListener(self, l):
"""Register for edit events"""
self.listener = l
def isCellEditable(self, event):
if event != None and isinstance(event.getSource(), JTree) and isinstance(event, MouseEvent):
tree = event.getSource()
path = tree.getPathForLocation(event.getX(), event.getY())
userData = path.getLastPathComponent().getUserObject()
if isinstance(userData, TreeNodeData) and (not userData.getNodeType().isType([NodeType._comment, NodeType._menu])) and (userData.getVisible() > 0):
return True
return False
def shouldSelectCell(self, event):
# log.info("shouldSelectCell")
return True
def cancelCellEditing(self):
# log.info("Cancel editing, please!")
# super(CustomCellEditor, self).cancelCellEditing()
pass
def stopCellEditing(self):
# log.info("stopCellEditing")
if self.nodeData.getNodeType().isType([NodeType._text]):
# log.info("stopCellEditing for sure!")
self.nodeData.setValue(str(self.tf.getText()))
return True
def getTreeCellEditorComponent(self, tree, value, selected, expanded, leaf, row):
"""Return a swing edit control appropriate for the node type of the supplied value"""
self.nodeData = self.getNodeUserData(value)
if self.nodeData:
text = self.nodeData.getText()
t = self.nodeData.getNodeType()
# Boolean checkbox
if t.isType([NodeType._bool]):
self.editor = self.cbPanel
self.cbLabel.setText(text)
if self.nodeData.getTriValue() > 0:
self.cb.setSelected(True)
else:
self.cb.setSelected(False)
# Tristate checkbox
elif t.isType([NodeType._tri]):
# log.info("getTreeCellEditorComponent tristate")
self.editor = self.tcbPanel
self.tcbLabel.setText(text)
self.tcb.setTriState(self.nodeData.getTriValue())
# Radio button
elif t.isType([NodeType._radio]):
self.editor = self.rbPanel
self.rbLabel.setText(text)
if self.nodeData.getTriValue() > 0:
self.rb.setSelected(True)
else:
self.rb.setSelected(False)
# Text field
elif t.isType([NodeType._text]):
self.editor = self.tfPanel
self.tfLabel.setText(str(self.nodeData.getText()) + ":")
self.tf.setText(str(self.nodeData.getValue()))
else:
self.editor = self.tcb
self.editor.setText(text)
return self.editor
def getNodeUserData(self, value):
"""Gets the TreeNodeData from the tree node"""
if isinstance(value, DefaultMutableTreeNode):
nodeData = value.getUserObject()
if isinstance(nodeData, TreeNodeData):
return nodeData
return None
def getCellEditorValue(self):
newNode = TreeNodeData(self.nodeData.knode, self.tree)
if isinstance(self.editor, JTextField):
newNode.setValue(str(self.editor.getText()))
return newNode
def checked(self, e):
"""Updates the node data when a checkbox has been clicked"""
control = e.getSource()
if isinstance(control, TristateCheckBox):
# log.info("tristate checked")
self.nodeData.setTriValue(control.getTriState())
else:
# log.info("checkbox checked")
if control.isSelected():
self.nodeData.setValue(2)
else:
self.nodeData.setValue(0)
def actionPerformed(self, event):
""" ENTER pressed in text field, stop editing."""
tf = event.getSource()
self.listener.editingStopped(ChangeEvent(tf))
class KConfigTree(JTree, CellEditorListener):
"""Custom Swing JTree based tree that visualizes a KConfig configuration.
The full KConfig menu structure is put into a shadow tree model. From the shadow model,
a real model is built (updateModel), where hidden nodes are not included. This update model
is what the tree uses to visualize the configuration menu.
Both the shadow and the updated model has the same TreeNodeData with KConfig data.
The expanded state and search result state is kept in the TreeNodeData.
"""
shadowModel = None
isUpdating = False
showAll = False
isSearching = False
def __init__(self, kconf):
self.setCellRenderer(CustomCellRenderer())
self.setCellEditor(CustomCellEditor(self))
self.createKconfShadowModel(kconf)
self.setModel(self.createUpdatedModel())
self.expandRow(0)
self.setEditable(True)
self.setRootVisible(False)
self.setShowsRootHandles(True)
self.setRowHeight(0)
self.addTreeExpansionListener(KConfigTreeExpansionListener())
self.getCellEditor().addCellEditorListener(self)
def editingCanceled(self, event):
"""From CellEditorListener """
# log.info("editingCanceled", self.cellEditor.getCellEditorValue())
pass
def editingStopped(self, event):
"""From CellEditorListener."""
# log.info("editingStopped", self.cellEditor.getCellEditorValue())
self.stopEditing()
def createKconfShadowModel(self, kconf):
"""Create the one and only shadow data model"""
rootNode = DefaultMutableTreeNode(kconf.mainmenu_text)
self.addNodes(rootNode, kconf.top_node.list)
self.shadowModel = DefaultTreeModel(rootNode)
def addNodes(self, parent, node):
"""Recursively traverse the KConfig structure and add to the shadow model"""
while node:
newUiNode = DefaultMutableTreeNode(TreeNodeData(node, self))
parent.add(newUiNode)
if node.list:
self.addNodes(newUiNode, node.list)
node = node.next
def createUpdatedModel(self):
"""When the user does any changes in the tree, the underlaying kconfig structure
will change. Nodes may change visibility and value. The tree control cannot hide nodes,
so a new datamodel must be generated that does not include invisible nodes."""
shadowTreeRoot = self.shadowModel.getRoot()
rootNode = DefaultMutableTreeNode("Root")
self.addVisibleNodes(rootNode, shadowTreeRoot)
return DefaultTreeModel(rootNode)
def addVisibleNodes(self, visibleParent, shadowParent):
"""Adds visible nodes from the shadow tree model to the update tree model.
If there is an active search operation, only search matches will be added.
If showAll is set, all nodes are added regardless of visibility."""
childrenEnum = shadowParent.children()
while childrenEnum.hasMoreElements():
shadowChild = childrenEnum.nextElement()
if shadowChild.getUserObject().getVisible() > 0 or self.showAll:
if not self.isSearching or shadowChild.getUserObject().isSearchMatch():
visibleChild = DefaultMutableTreeNode(shadowChild.getUserObject())
visibleParent.add(visibleChild)
if shadowChild.getChildCount() > 0:
self.addVisibleNodes(visibleChild, shadowChild)
def isPathEditable(self, path):
comp = path.getLastPathComponent()
if isinstance(comp, DefaultMutableTreeNode):
nodeData = comp.getUserObject()
if isinstance(nodeData, TreeNodeData):
return True
return False
def updateTree(self):
"""Call to create a new updated tree model"""
if not self.isUpdating:
# log.info("updateTree()")
self.isUpdating = True
self.setModel(self.createUpdatedModel())
self.updateExpandedState(self.getModel().getRoot())
self.isUpdating = False
def updateExpandedState(self, parent):
"""Scan through the whole tree and expand the tree node
if the node data has the expanded field set to True."""
childrenEnum = parent.children()
while childrenEnum.hasMoreElements():
child = childrenEnum.nextElement()
if child.getUserObject().isExpanded():
self.expandPath(TreePath(child.getPath()))
if child.getChildCount() > 0:
self.updateExpandedState(child)
def setShowAll(self, show):
self.showAll = show
self.updateTree()
def doSearch(self, searchText):
"""Perform a search in the data model with the supplied text."""
if len(searchText) > 0:
self.isSearching = True
self.doSearchBranch(self.shadowModel.getRoot(), searchText)
else:
self.isSearching = False
self.updateTree()
def doSearchBranch(self, shadowParent, searchText):
"""Traverse the tree model searching for the search text"""
match = False
childrenEnum = shadowParent.children()
while childrenEnum.hasMoreElements():
shadowChild = childrenEnum.nextElement()
if shadowChild.getUserObject().search(searchText, self.showAll):
match = True
if shadowChild.getChildCount() > 0:
if self.doSearchBranch(shadowChild, searchText):
shadowChild.getUserObject().setSearchMatch(True)
match = True
return match
class KConfigTreeExpansionListener(TreeExpansionListener):
"""Listener for tree expand/collapse events. Used for storing the expand state
in the node data, so that a new updated tree's branches can be expanded the same way
as in the old tree."""
def treeExpanded(self, e):
if not e.getPath().getLastPathComponent() == e.getSource().getModel().getRoot():
e.getPath().getLastPathComponent().getUserObject().setExpanded(True)
def treeCollapsed(self, e):
if not e.getPath().getLastPathComponent() == e.getSource().getModel().getRoot():
e.getPath().getLastPathComponent().getUserObject().setExpanded(False)
class MPConfig(TreeSelectionListener):
"""The MPConfig component initializes the KConfig library with the requested configuration,
and buildst the GUI, consisting of a "Load" and a "Save as" buttons, a search field, "show all"
checkbox, tree view and information text view."""
def __init__(self, kconfig_file = "Kconfig", config_file=".config", systemLogger = None):
"""[summary]
Parameters
----------
kconfig_file : string (default: "Kconfig")
The Kconfig configuration file
config_file : string (default: ".config")
The save file which will be used for loading and saving the settings
systemLogger (default: None)
A system logger object. If None then print statements are used for logging.
"""
global log
if systemLogger:
log = systemLogger
# Load Kconfig configuration files
self.kconfig = Kconfig(kconfig_file)
setKConfig(self.kconfig)
if os.path.isfile(config_file):
log.info(self.kconfig.load_config(config_file))
elif os.path.isfile(".config"):
log.info(self.kconfig.load_config(".config"))
self.tree = KConfigTree(self.kconfig)
self.tree.addTreeSelectionListener(self.treeSelectionChanged)
jTreeSP = JScrollPane(self.tree)
self.jta = JTextArea()
self.jta.setEditable(False)
jTextSP = JScrollPane(self.jta)
toolPanel = JPanel()
toolPanel.setLayout(BoxLayout(toolPanel, BoxLayout.X_AXIS))
toolPanel.setBorder(BorderFactory.createEmptyBorder(2, 0, 2, 0))
toolPanel.add(JLabel("Search: "))
jSearchPanel = JPanel()
jSearchPanel.setLayout(BoxLayout(jSearchPanel, BoxLayout.X_AXIS))
self.jSearchField = JTextField()
jSearchPanel.setBackground(self.jSearchField.getBackground())
jSearchPanel.setBorder(self.jSearchField.getBorder())
self.jSearchField.setBorder(None)
self.jSearchField.getDocument().addDocumentListener(SearchListener(self.tree))
jSearchPanel.add(self.jSearchField)
clearSearchButton = JButton(u'\u00d7', actionPerformed = self.clearSearch)
d = clearSearchButton.getPreferredSize()
clearSearchButton.setPreferredSize(Dimension(d.height, d.height))
clearSearchButton.setBackground(self.jSearchField.getBackground())
clearSearchButton.setBorder(None)
clearSearchButton.setOpaque(False)
clearSearchButton.setContentAreaFilled(False)
clearSearchButton.setFocusPainted(False)
jSearchPanel.add(clearSearchButton)
toolPanel.add(jSearchPanel)
self.showAllCheckBox = JCheckBox("Show all", actionPerformed = self.OnShowAllCheck)
toolPanel.add(self.showAllCheckBox)
splitPane = JSplitPane(JSplitPane.VERTICAL_SPLIT, jTreeSP, jTextSP)
splitPane.setOneTouchExpandable(True)
splitPane.setDividerLocation(300)
treePanel = JPanel(BorderLayout())
treePanel.add(toolPanel, BorderLayout.NORTH)
treePanel.add(splitPane, BorderLayout.CENTER)
loadSavePanel = JPanel()
loadSavePanel.setLayout(BoxLayout(loadSavePanel, BoxLayout.X_AXIS))
loadSavePanel.add(JButton("Load", actionPerformed=self.loadConfigDialog))
loadSavePanel.add(JButton("Save as", actionPerformed=self.writeConfigDialog))
self.rootPanel = JPanel()
self.rootPanel.setLayout(BorderLayout())
self.rootPanel.add(loadSavePanel, BorderLayout.PAGE_START)
self.rootPanel.add(treePanel, BorderLayout.CENTER)
def clearSearch(self, event):
self.jSearchField.setText("")
def OnShowAllCheck(self, event):
self.tree.setShowAll(self.showAllCheckBox.isSelected())
self.tree.doSearch(self.jSearchField.getText()) # Must repeat the search if one is active
def treeSelectionChanged(self, event):
"""When the user selects a new node in the tree, show info about the selected node
in the info text area below the tree."""
path = event.getNewLeadSelectionPath()
if path:
comp = path.getLastPathComponent()
if isinstance(comp, DefaultMutableTreeNode):
nodeData = comp.getUserObject()
if isinstance(nodeData, TreeNodeData):
self.jta.setText(getNodeInfoString(nodeData.knode))
self.jta.setCaretPosition(0)
def getPane(self):
"""Return the panel containing all the other components that is set up in __init__()."""
return self.rootPanel
def writeConfig(self, fileName):
"""Write the current configuration to the file specified."""
self.kconfig.write_config(fileName) # Save full configuration
#self.kconfig.write_min_config(fileName) # Save minimal configuration
def loadConfig(self, fileName):
"""Load configuration settings from the file specified."""
if os.path.isfile(fileName):
log.info(self.kconfig.load_config(fileName))
self.tree.createKconfShadowModel(self.kconfig)
self.tree.updateTree()
def writeConfigDialog(self, e):
"""Open a file dialog to save configuration"""
fileChooser = JFileChooser(os.getcwd())
retval = fileChooser.showSaveDialog(None)
if retval == JFileChooser.APPROVE_OPTION:
f = fileChooser.getSelectedFile()
self.writeConfig(f.getPath())
def loadConfigDialog(self, e):
"""Open a file dialog to select configuration to load"""
fileChooser = JFileChooser(os.getcwd())
retval = fileChooser.showOpenDialog(None)
if retval == JFileChooser.APPROVE_OPTION:
f = fileChooser.getSelectedFile()
log.info("Selected file: " + f.getPath())
self.loadConfig(f.getPath())
class SearchListener(DocumentListener):
"""Triggered when the user adds or removes characters in the search text field."""
def __init__(self, tree):
self.tree = tree
def changedUpdate(self, e):
doc = e.getDocument()
searchText = doc.getText(0, doc.getLength())
self.tree.doSearch(searchText)
def insertUpdate(self, e):
self.changedUpdate(e)
def removeUpdate(self, e):
self.changedUpdate(e)
if __name__ == "__main__":
# Set default .config file or load it from argv
if len(sys.argv) == 2:
# Specify "Kconfig"
mpconfig = MPConfig(sys.argv[1])
else:
# Specify "Kconfig" and ".config"
mpconfig = MPConfig(sys.argv[1], sys.argv[2])
jframe = JFrame("MPLAB X Kconfig Editor")
jframe.getContentPane().add(mpconfig.getPane())
jframe.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE)
jframe.setSize(500, 800)
jframe.setVisible(True)
| 39.921053
| 442
| 0.651825
| 3,318
| 33,374
| 6.501206
| 0.198011
| 0.008159
| 0.006027
| 0.005285
| 0.179732
| 0.161096
| 0.144221
| 0.127161
| 0.119651
| 0.108433
| 0
| 0.011048
| 0.25415
| 33,374
| 835
| 443
| 39.968862
| 0.855536
| 0.200425
| 0
| 0.241509
| 0
| 0.001887
| 0.087387
| 0.07841
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128302
| false
| 0.003774
| 0.024528
| 0.011321
| 0.277358
| 0.001887
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0df3129d8955fe3aa993705e4ac485becb1c9ed
| 3,495
|
py
|
Python
|
functional_test/test_sqlite.py
|
penguinolog/sqlalchemy_jsonfield
|
552bc52af2f2e9c4ebe85928070e2b1b42d9a5d8
|
[
"Apache-2.0"
] | 17
|
2017-05-19T14:03:15.000Z
|
2022-01-16T08:33:34.000Z
|
functional_test/test_sqlite.py
|
penguinolog/sqlalchemy_jsonfield
|
552bc52af2f2e9c4ebe85928070e2b1b42d9a5d8
|
[
"Apache-2.0"
] | 5
|
2018-08-01T09:55:48.000Z
|
2020-07-06T08:54:00.000Z
|
functional_test/test_sqlite.py
|
penguinolog/sqlalchemy_jsonfield
|
552bc52af2f2e9c4ebe85928070e2b1b42d9a5d8
|
[
"Apache-2.0"
] | 2
|
2018-08-01T09:47:40.000Z
|
2020-07-05T15:31:17.000Z
|
# coding=utf-8
# pylint: disable=missing-docstring, unused-argument
import os.path
import sqlite3
import tempfile
import unittest
import sqlalchemy.ext.declarative
import sqlalchemy.orm
try:
# noinspection PyPackageRequirements
import ujson as json
except ImportError:
import json
import sqlalchemy_jsonfield
# Path to test database
db_path = os.path.join(tempfile.gettempdir(), "test.sqlite3")
# Table name
table_name = "create_test"
# DB Base class
Base = sqlalchemy.ext.declarative.declarative_base()
# Model
class ExampleTable(Base):
__tablename__ = table_name
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
row_name = sqlalchemy.Column(sqlalchemy.Unicode(64), unique=True)
json_record = sqlalchemy.Column(sqlalchemy_jsonfield.JSONField(), nullable=False)
class SQLIteTests(unittest.TestCase):
def setUp(self): # type: () -> None
if os.path.exists(db_path):
os.remove(db_path)
engine = sqlalchemy.create_engine("sqlite:///{}".format(db_path), echo=False)
Base.metadata.create_all(engine)
# noinspection PyPep8Naming
Session = sqlalchemy.orm.sessionmaker(engine)
self.session = Session()
def test_create(self): # type: () -> None
"""Check column type"""
# noinspection PyArgumentList
with sqlite3.connect(database="file:{}?mode=ro".format(db_path), uri=True) as conn:
conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute("PRAGMA TABLE_INFO({})".format(table_name))
collected = c.fetchall()
result = [dict(col) for col in collected]
columns = {info["name"]: info for info in result}
json_record = columns["json_record"]
self.assertIn(
json_record["type"],
("TEXT", "JSON"),
"Unexpected column type: received: {!s}, expected: TEXT|JSON".format(json_record["type"]),
)
def test_operate(self): # type: () -> None
"""Check column data operation"""
test_dict = {"key": "value"}
test_list = ["item0", "item1"]
# fill table
with self.session.transaction:
self.session.add_all(
[
ExampleTable(row_name="dict_record", json_record=test_dict),
ExampleTable(row_name="list_record", json_record=test_list),
]
)
# Validate backward check
dict_record = self.session.query(ExampleTable).filter(ExampleTable.row_name == "dict_record").first()
list_record = self.session.query(ExampleTable).filter(ExampleTable.row_name == "list_record").first()
self.assertEqual(
dict_record.json_record,
test_dict,
"Dict was changed: {!r} -> {!r}".format(test_dict, dict_record.json_record),
)
self.assertEqual(
list_record.json_record, test_list, "List changed {!r} -> {!r}".format(test_list, list_record.json_record)
)
# Low level
# noinspection PyArgumentList
with sqlite3.connect(database="file:{}?mode=ro".format(db_path), uri=True) as conn:
c = conn.cursor()
c.execute("SELECT row_name, json_record FROM {tbl}".format(tbl=table_name))
result = dict(c.fetchall())
self.assertEqual(result["dict_record"], json.dumps(test_dict))
self.assertEqual(result["list_record"], json.dumps(test_list))
| 30.391304
| 118
| 0.631187
| 397
| 3,495
| 5.390428
| 0.322418
| 0.056075
| 0.04486
| 0.037383
| 0.272897
| 0.188785
| 0.136449
| 0.136449
| 0.136449
| 0.081308
| 0
| 0.004178
| 0.246638
| 3,495
| 114
| 119
| 30.657895
| 0.808583
| 0.107582
| 0
| 0.089552
| 0
| 0
| 0.114452
| 0
| 0
| 0
| 0
| 0
| 0.074627
| 1
| 0.044776
| false
| 0
| 0.149254
| 0
| 0.283582
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0e326802c17cadedbdcc95d716b27c009b7245b
| 495
|
py
|
Python
|
generic_op/pool_op.py
|
cap-lab/MidapSim
|
4f92a9f9413c29d7e1f37e863cce90ebdde8b420
|
[
"MIT"
] | 2
|
2021-03-28T16:19:06.000Z
|
2022-02-26T08:58:33.000Z
|
generic_op/pool_op.py
|
cap-lab/MidapSim
|
4f92a9f9413c29d7e1f37e863cce90ebdde8b420
|
[
"MIT"
] | null | null | null |
generic_op/pool_op.py
|
cap-lab/MidapSim
|
4f92a9f9413c29d7e1f37e863cce90ebdde8b420
|
[
"MIT"
] | 1
|
2021-02-22T08:44:20.000Z
|
2021-02-22T08:44:20.000Z
|
from .convpool_op_base import ConvPoolOpBase
class PoolOp(ConvPoolOpBase):
def __init__(
self,
op_type='Pool',
pool_type=None,
global_pooling=False,
**kwargs
):
super(PoolOp, self).__init__(op_type=op_type, **kwargs)
self.global_pooling = global_pooling
if pool_type is not None:
self.type = pool_type
def flip_operation(self):
self.pad_r, self.pad_l = self.pad_l, self.pad_r
| 26.052632
| 63
| 0.60202
| 62
| 495
| 4.419355
| 0.435484
| 0.10219
| 0.058394
| 0.087591
| 0.083942
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.311111
| 495
| 18
| 64
| 27.5
| 0.803519
| 0
| 0
| 0
| 0
| 0
| 0.008081
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.066667
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0ebac4ab996b305d4af158e61ede7d45f0985a2
| 5,002
|
py
|
Python
|
models/special_tensors.py
|
LaudateCorpus1/learning-compressible-subspaces
|
94db8191f5f4d32c1e86834284fcf9f89e4d445b
|
[
"AML"
] | 6
|
2021-11-02T23:10:05.000Z
|
2021-11-26T06:46:21.000Z
|
models/special_tensors.py
|
LaudateCorpus1/learning-compressible-subspaces
|
94db8191f5f4d32c1e86834284fcf9f89e4d445b
|
[
"AML"
] | null | null | null |
models/special_tensors.py
|
LaudateCorpus1/learning-compressible-subspaces
|
94db8191f5f4d32c1e86834284fcf9f89e4d445b
|
[
"AML"
] | 2
|
2021-12-02T00:06:41.000Z
|
2022-03-26T11:33:04.000Z
|
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2021 Apple Inc. All Rights Reserved.
#
"""Utility functions to tag tensors with metadata.
The metadata remains with the tensor under torch operations that don't change
the values, e.g. .clone(), .contiguous(), .permute(), etc.
"""
import collections
import copy
from typing import Any
from typing import Optional
import numpy as np
import torch
QuantizeAffineParams2 = collections.namedtuple(
"QuantizeAffineParams", ["scale", "zero_point", "num_bits"]
)
class _SpecialTensor(torch.Tensor):
"""This class denotes special tensors.
It isn't intended to be used directly, but serves as a helper for tagging
tensors with metadata.
It subclasses torch.Tensor so that isinstance(t, torch.Tensor) returns True
for special tensors. It forbids some of the methods of torch.Tensor, and
overrides a few methods used to create other tensors, to ensure the result
is still special.
"""
_metadata = None
def __getattribute__(self, attr: str) -> Any:
# Disallow new_zeros, new_ones, new_full, etc.
if "new_" in attr:
raise AttributeError(
"Invalid attr {!r} for special tensors".format(attr)
)
return super().__getattribute__(attr)
def detach(self) -> "_SpecialTensor":
ret = super().detach()
ret.__class__ = _SpecialTensor
ret._metadata = self._metadata
return ret
@property
def data(self) -> "_SpecialTensor":
ret = super().data
ret.__class__ = _SpecialTensor
ret._metadata = self._metadata
return ret
def clone(self) -> "_SpecialTensor":
ret = super().clone()
ret.__class__ = _SpecialTensor
ret._metadata = self._metadata
return ret
def cuda(
self, device: Optional[torch.device] = None, non_blocking: bool = False
) -> "_SpecialTensor":
ret = super().cuda()
ret.__class__ = _SpecialTensor
ret._metadata = self._metadata
return ret
def contiguous(self) -> "_SpecialTensor":
ret = super().contiguous()
ret.__class__ = _SpecialTensor
ret._metadata = self._metadata
return ret
def view(self, *args, **kwargs) -> "_SpecialTensor":
ret = super().view(*args, **kwargs)
ret.__class__ = _SpecialTensor
ret._metadata = self._metadata
return ret
def permute(self, *args, **kwargs) -> "_SpecialTensor":
ret = super().permute(*args, **kwargs)
ret.__class__ = _SpecialTensor
ret._metadata = self._metadata
return ret
def __getitem__(self, *args, **kwargs) -> "_SpecialTensor":
ret = super().__getitem__(*args, **kwargs)
ret.__class__ = _SpecialTensor
ret._metadata = self._metadata
return ret
def __copy__(self) -> "_SpecialTensor":
ret = copy.copy(super())
ret.__class__ = _SpecialTensor
ret._metadata = self._metadata
return ret
def _check_type(tensor: torch.Tensor) -> None:
given_type = type(tensor)
if not issubclass(given_type, torch.Tensor):
raise TypeError("invalid type {!r}".format(given_type))
def tag_with_metadata(tensor: torch.Tensor, metadata: Any) -> None:
"""Tag a metadata to a tensor."""
_check_type(tensor)
tensor.__class__ = _SpecialTensor
tensor._metadata = metadata
RepresentibleByQuantizeAffine = collections.namedtuple(
"RepresentibleByQuantizeAffine", ["quant_params"]
)
def mark_quantize_affine(
tensor: torch.Tensor,
scale: float,
zero_point: int,
dtype: np.dtype = np.uint8,
) -> None:
"""Mark a tensor as quantized with affine.
See //xnorai/training/pytorch/extensions/functions:quantize_affine for more
info on this method of quantization.
The tensor itself can be a floating point Tensor. However, its values must
be representible with @scale and @zero_point. This function, for performance
reasons, does not validiate if the tensor is really quantizable as it
claims to be.
Arguments:
tensor (torch.Tensor): The tensor to be marked as affine-quantizable
Tensor.
scale (float): the scale (from quantization parameters).
zero_point (int): The zero_point (from quantization parameters).
dtype (numpy.dtype): Type of tensor when quantized (this is usually
numpy.uint8, which is used for Q8). A ValueError will be thrown if
the input dtype is not one of the following:
{numpy.uint8, numpy.int32}.
"""
allowed_dtypes = [np.uint8, np.int32]
if dtype not in allowed_dtypes:
raise ValueError(
"Provided dtype ({}) is not supported. Please use: {}".format(
dtype, allowed_dtypes
)
)
quant_params = QuantizeAffineParams2(scale, zero_point, dtype)
tag_with_metadata(tensor, RepresentibleByQuantizeAffine(quant_params))
| 31.859873
| 80
| 0.658137
| 580
| 5,002
| 5.448276
| 0.318966
| 0.091139
| 0.05981
| 0.068354
| 0.201266
| 0.201266
| 0.168038
| 0.168038
| 0.168038
| 0.151266
| 0
| 0.003989
| 0.248301
| 5,002
| 156
| 81
| 32.064103
| 0.836436
| 0.318673
| 0
| 0.296703
| 0
| 0
| 0.097324
| 0.00882
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.065934
| 0
| 0.340659
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0f028af70b526fd95e136fc02b10d25bfdd263a
| 2,705
|
py
|
Python
|
porodynhe_example2d.py
|
sfepy/example_largedef_porodyn
|
4116abc7daed195eee15277b2bd564cec3762ac6
|
[
"MIT"
] | null | null | null |
porodynhe_example2d.py
|
sfepy/example_largedef_porodyn
|
4116abc7daed195eee15277b2bd564cec3762ac6
|
[
"MIT"
] | null | null | null |
porodynhe_example2d.py
|
sfepy/example_largedef_porodyn
|
4116abc7daed195eee15277b2bd564cec3762ac6
|
[
"MIT"
] | null | null | null |
# Rohan E., Lukeš V.
# Modeling large-deforming fluid-saturated porous media using
# an Eulerian incremental formulation.
# Advances in Engineering Software, 113:84-95, 2017,
# https://doi.org/10.1016/j.advengsoft.2016.11.003
#
# Run simulation:
#
# ./simple.py example_largedef_porodyn-1/porodynhe_example2d.py
#
# The results are stored in `example_largedef_porodyn-1/results`.
#
import numpy as nm
from porodyn_engine import incremental_algorithm,\
fc_fce, mat_fce, def_problem
import os.path as osp
wdir = osp.dirname(__file__)
def define():
params = {
'mesh_file': 'rect_16x16.vtk',
'mat_store_elem': 75, # element for which material data are stored
'u_store_node': 272, # node for which displacement is stored
'p_store_node': 144, # node for which pressure is stored
'dim': 2, # problem dimension
'dt': 0.01, # time step
't_end': 2.0, # end time
'force': 4e6, # applied force
'save_step': True, # save results in each time step?
'init_mode': False, # calculate initial state?
}
material_params = {
'param': {
'B': nm.eye(params['dim']),
'g': 9.81, # gravitational acceleration
},
'solid': {
'Phi': 0.58, # volume fraction
'lam': 8.4e6, # Lame coefficient
'mu': 5.6e6, # Lame coefficient
'rho': 2700, # density
},
'fluid': {
'kappa': 1e-1, # permeability parameter
'beta': 0.8, # permeability parameter
'rho': 1000, # density
'Kf': 2.2e10, # bulk modulus
},
}
regions = {
'Omega': 'all',
'Left': ('vertices in (x < 0.001)', 'facet'),
'Right': ('vertices in (x > 9.999)', 'facet'),
'Bottom': ('vertices in (y < 0.001)', 'facet'),
'Top_r': ('vertices in (y > 9.999) & (x > 4.999)', 'facet'),
'Top_l': ('vertices in (y > 9.999) & (x < 5.001)', 'facet'),
'ForceRegion': ('copy r.Top_r', 'facet'),
}
ebcs = {
'Fixed_Left_u': ('Left', {'u.0': 0.0}),
'Fixed_Right_u': ('Right', {'u.0': 0.0}),
'Fixed_Bottom_u': ('Bottom', {'u.1': 0.0}),
'Fixed_Top_p': ('Top_l', {'p.0': 0.0}),
}
###############################################
options = {
'output_dir': osp.join(wdir, 'results'),
'parametric_hook': 'incremental_algorithm',
}
filename_mesh = params['mesh_file']
materials, functions, fields, variables, equations, solvers = \
def_problem(params['dt'], params['force'])
return locals()
| 31.823529
| 76
| 0.528651
| 320
| 2,705
| 4.33125
| 0.55625
| 0.010101
| 0.02381
| 0.033189
| 0.036075
| 0.023088
| 0
| 0
| 0
| 0
| 0
| 0.061741
| 0.299445
| 2,705
| 84
| 77
| 32.202381
| 0.669657
| 0.271349
| 0
| 0
| 0
| 0
| 0.274168
| 0.011094
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016949
| false
| 0
| 0.050847
| 0
| 0.084746
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0f0b3100db352f07b237c204da41ba3ca9a0b70
| 260
|
py
|
Python
|
mailcheck/__init__.py
|
shacker/django-mailcheck
|
878dd21dcd599bd3761e225ba0c2717af458c000
|
[
"BSD-3-Clause"
] | 1
|
2019-05-24T12:40:49.000Z
|
2019-05-24T12:40:49.000Z
|
mailcheck/__init__.py
|
shacker/django-mailcheck
|
878dd21dcd599bd3761e225ba0c2717af458c000
|
[
"BSD-3-Clause"
] | null | null | null |
mailcheck/__init__.py
|
shacker/django-mailcheck
|
878dd21dcd599bd3761e225ba0c2717af458c000
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Pluggable Django email backend for capturing outbound mail for QA/review purposes.
"""
__version__ = "1.0"
__author__ = "Scot Hacker"
__email__ = "shacker@birdhouse.org"
__url__ = "https://github.com/shacker/django-mailcheck"
__license__ = "BSD License"
| 23.636364
| 82
| 0.75
| 32
| 260
| 5.46875
| 0.84375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008772
| 0.123077
| 260
| 10
| 83
| 26
| 0.758772
| 0.315385
| 0
| 0
| 0
| 0
| 0.523529
| 0.123529
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0f1710109fd0bcc8c80d8dbd1890e68264eb994
| 4,923
|
py
|
Python
|
nistapttools/histogram_functions.py
|
bcaplins/NIST_APT_TOOLS
|
80c25498e8b069b8ee289a2d09c76c932c054cea
|
[
"Unlicense"
] | null | null | null |
nistapttools/histogram_functions.py
|
bcaplins/NIST_APT_TOOLS
|
80c25498e8b069b8ee289a2d09c76c932c054cea
|
[
"Unlicense"
] | null | null | null |
nistapttools/histogram_functions.py
|
bcaplins/NIST_APT_TOOLS
|
80c25498e8b069b8ee289a2d09c76c932c054cea
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 28 13:41:03 2019
@author: bwc
"""
import numpy as np
def bin_dat(dat,bin_width=0.001,user_roi=[],isBinAligned=False,isDensity=False):
user_roi = np.asarray(user_roi)
roi_supp = (user_roi.size == 2)
# Get roi
if isBinAligned and roi_supp:
lower = np.floor(np.min(user_roi)/bin_width)*bin_width
upper = np.ceil(np.max(user_roi)/bin_width)*bin_width
roi = np.array([lower, upper])
elif isBinAligned and (not roi_supp):
lower = np.floor(np.min(dat)/bin_width)*bin_width
upper = np.ceil(np.max(dat)/bin_width)*bin_width
roi = np.array([lower, upper])
elif (not isBinAligned) and roi_supp:
roi = user_roi
else: # (not isBinAligned) and (not roi_supp):
roi = np.array([np.min(dat), np.max(dat)])
num_bins = int(np.rint((roi[1]/bin_width-roi[0]/bin_width)))
histo = np.histogram(dat,range=(roi[0], roi[1]),bins=num_bins,density=isDensity)
xs = (histo[1][1:]+histo[1][0:-1])/2
ys = histo[0]
return (xs,ys)
def edges_to_centers(*edges):
"""
Convert bin edges to bin centers
Parameters
----------
*edges : bin edges
Returns
-------
centers : list of bin centers
"""
centers = []
for es in edges:
centers.append((es[0:-1]+es[1:])/2)
return centers
def corrhist(epos):
dat = epos['tof']
roi = [0, 5000]
delta = 1
# dat = epos['m2q']
# roi = [0, 100]
# delta = .1
#
# MF = np.mean(epos['tof']/np.sqrt(epos['m2q']))
# dat = np.sqrt(epos['m2q'])*MF
# roi = [0, np.sqrt(250)*MF]
# delta = .001*MF
##
N = int(np.ceil((roi[1]-roi[0])/delta))
corrhist = np.zeros([N,N], dtype=int)
multi_idxs = np.where(epos['ipp']>1)[0]
for multi_idx in multi_idxs:
n_hits = epos['ipp'][multi_idx]
cluster = dat[multi_idx:multi_idx+n_hits]
idx1 = -1
idx2 = -1
for i in range(n_hits):
for j in range(i+1,n_hits):
idx1 = int(np.floor(cluster[i]/delta))
idx2 = int(np.floor(cluster[j]/delta))
if idx1 < N and idx2 < N:
corrhist[idx1,idx2] += 1
return corrhist+corrhist.T-np.diag(np.diag(corrhist))
def dummy():
# Voltage and bowl correct ToF data
from voltage_and_bowl import do_voltage_and_bowl
p_volt = np.array([])
p_bowl = np.array([])
tof_corr, p_volt, p_bowl = do_voltage_and_bowl(epos,p_volt,p_bowl)
epos_vb = epos.copy()
epos_vb['tof'] = tof_corr.copy()
import voltage_and_bowl
tof_vcorr = voltage_and_bowl.mod_full_voltage_correction(p_volt,epos['tof'],epos['v_dc'])
epos_v = epos.copy()
epos_v['tof'] = tof_vcorr.copy()
tof_bcorr = voltage_and_bowl.mod_geometric_bowl_correction(p_bowl,epos['tof'],epos['x_det'],epos['y_det'])
epos_b = epos.copy()
epos_b['tof'] = tof_bcorr.copy()
ROI = [0, None]
ch = histogram_functions.corrhist(epos)
fig1 = plt.figure(num=1)
plt.clf()
plt.imshow(np.log2(1+ch))
plt.title('raw')
fig1.gca().set_xlim(ROI[0],ROI[1])
fig1.gca().set_ylim(ROI[0],ROI[1])
ch = histogram_functions.corrhist(epos_v)
fig2 = plt.figure(num=2)
plt.clf()
plt.imshow(np.log2(1+ch))
plt.title('volt')
fig2.gca().set_xlim(ROI[0],ROI[1])
fig2.gca().set_ylim(ROI[0],ROI[1])
ch = histogram_functions.corrhist(epos_b)
fig3 = plt.figure(num=3)
plt.clf()
plt.imshow(np.log2(1+ch))
plt.title('bowl')
fig3.gca().set_xlim(ROI[0],ROI[1])
fig3.gca().set_ylim(ROI[0],ROI[1])
ch = histogram_functions.corrhist(epos_vb)
fig4 = plt.figure(num=4)
plt.clf()
plt.imshow(np.log10(1+ch))
plt.title('v+b')
# fig4.gca().set_xlim(ROI[0],ROI[1])
# fig4.gca().set_ylim(ROI[0],ROI[1])
idxs = np.where(epos['ipp'] == 2)[0]
fig5 = plt.figure(num=5)
plt.clf()
dts = np.abs(tof_corr[idxs]-tof_corr[idxs+1])
plt.hist(dts,bins=np.arange(0,2000,.5),label='deltaT')
plt.hist(tof_corr[np.r_[idxs,idxs+1]],bins=np.arange(0,2000,.5),label='since t0')
fig66 = plt.figure(num=66)
plt.clf()
dts = np.abs(tof_corr[idxs]-tof_corr[idxs+1])
# sus = np.sqrt(tof_corr[idxs]**2+tof_corr[idxs+1]**2)
# sus = np.fmax(tof_corr[idxs],tof_corr[idxs+1])
sus = (tof_corr[idxs]+tof_corr[idxs+1])/np.sqrt(2)
plt.plot(sus,dts,'.',ms=1,alpha=1)
# fig66.gca().axis('equal')
fig66.gca().set_xlim(0,7000)
fig66.gca().set_ylim(-100, 800)
return
| 24.615
| 110
| 0.549868
| 742
| 4,923
| 3.5
| 0.222372
| 0.023104
| 0.042357
| 0.027724
| 0.326531
| 0.274933
| 0.269542
| 0.189834
| 0.178668
| 0.154024
| 0
| 0.045506
| 0.281333
| 4,923
| 199
| 111
| 24.738693
| 0.688525
| 0.137518
| 0
| 0.13
| 0
| 0
| 0.016742
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.03
| 0
| 0.11
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0f3c763fc8fb9b275792346291c6e8ea034e967
| 1,138
|
py
|
Python
|
bage_utils/inspect_util.py
|
bage79/nlp4kor
|
016a20270774325579fc816a0364fb1695e60b51
|
[
"MIT"
] | 60
|
2017-04-26T04:43:45.000Z
|
2021-11-08T13:01:11.000Z
|
bage_utils/inspect_util.py
|
bage79/nlp4kor
|
016a20270774325579fc816a0364fb1695e60b51
|
[
"MIT"
] | null | null | null |
bage_utils/inspect_util.py
|
bage79/nlp4kor
|
016a20270774325579fc816a0364fb1695e60b51
|
[
"MIT"
] | 17
|
2017-05-21T17:27:20.000Z
|
2021-01-16T22:35:44.000Z
|
import inspect # http://docs.python.org/2/library/inspect.html
from pprint import pprint
from bage_utils.dict_util import DictUtil # @UnusedImport
class InspectUtil(object):
@staticmethod
def summary():
frame = inspect.stack()[1]
d = {'file': frame[1], 'line': frame[2], 'function': frame[3], 'code': frame[4]}
return d
@staticmethod
def all():
frame = inspect.stack()[1]
d = {}
for key in dir(frame[0]):
d[key] = getattr(frame[0], key)
return DictUtil.sort_by_key(d)
@staticmethod
def locals():
frame = inspect.stack()[1]
d = {}
for key in frame[0].f_locals:
d[key] = frame[0].f_locals[key]
return DictUtil.sort_by_key(d)
@staticmethod
def globals():
frame = inspect.stack()[1]
d = {}
for key in frame[0].f_globals:
d[key] = frame[0].f_globals[key]
return DictUtil.sort_by_key(d)
def __test():
pprint(InspectUtil.summary())
pprint(InspectUtil.locals())
if __name__ == '__main__':
pprint(InspectUtil.summary())
# __test()
| 24.212766
| 88
| 0.579086
| 144
| 1,138
| 4.409722
| 0.347222
| 0.056693
| 0.107087
| 0.113386
| 0.388976
| 0.324409
| 0.324409
| 0.28189
| 0.23937
| 0.107087
| 0
| 0.018248
| 0.27768
| 1,138
| 46
| 89
| 24.73913
| 0.754258
| 0.059754
| 0
| 0.457143
| 0
| 0
| 0.026266
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.085714
| 0
| 0.371429
| 0.114286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0f5fb0852a3f468b938572e90b83ad69c9f9511
| 3,914
|
py
|
Python
|
Common.py
|
DongDong-123/zgg_active
|
7b7304bc9391e1d370052087d4ad2e6d05db670c
|
[
"Apache-2.0"
] | null | null | null |
Common.py
|
DongDong-123/zgg_active
|
7b7304bc9391e1d370052087d4ad2e6d05db670c
|
[
"Apache-2.0"
] | null | null | null |
Common.py
|
DongDong-123/zgg_active
|
7b7304bc9391e1d370052087d4ad2e6d05db670c
|
[
"Apache-2.0"
] | null | null | null |
import os
import random
import time
import xlwt
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from front_login import *
from readConfig import ReadConfig
from db import DbOperate
from selenium.webdriver.chrome.options import Options
from mysqldb import connect
chrome_options = Options()
chrome_options.add_argument('--headless')
driver = webdriver.Chrome(chrome_options=chrome_options)
# driver = webdriver.Chrome()
driver.maximize_window()
driver.get(ReadConfig().get_root_url())
driver.get(ReadConfig().get_root_url())
class Common(object):
def __init__(self):
self.driver = driver
# Excel写入
self.row = 0
self.workbook = xlwt.Workbook(encoding='utf-8')
self.booksheet = self.workbook.add_sheet('Sheet1')
self.timetemp = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) # 存储Excel表格文件名编号
# 每个案件的数量
self.number = 1
self.report_path = ReadConfig().save_report()
self.windows = None
self.screen_path = ReadConfig().save_screen()
# 增加案件数量
def number_add(self):
if self.number > 1:
for i in range(self.number):
self.driver.find_element_by_xpath("//a[@class='add']").click()
else:
self.driver.find_element_by_xpath("//a[@class='add']").click()
# 减少案件数量至1
def number_minus(self):
while self.number > 1:
self.driver.find_element_by_xpath("//a[@class='jian']").click()
# 存入数据库
def save_to_mysql(self, parm):
code = 0
if isinstance(parm, list):
parm.append(code)
else:
parm = list(parm)
parm.append(code)
res_code = connect(parm)
print("存储状态", res_code)
# 执行下单
def execute_function(self, callback):
try:
eval("self.{}()".format(callback))
except Exception as e:
print("错误信息:", e)
self.write_error_log(callback)
time.sleep(0.5)
self.write_error_log(str(e))
def write_error_log(self, info):
error_log_path = os.path.join(self.report_path,
"error_log_{}.log".format(time.strftime("%Y-%m-%d", time.localtime())))
with open(error_log_path, "a", encoding="utf-8") as f:
f.write("{}: ".format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) + info + "\n")
# 处理价格字符
def process_price(self, price):
if "¥" in price:
price = price.replace("¥", '')
return price
# 关闭窗口
def closed_windows(self, num):
self.windows = self.driver.window_handles
for n in range(num + 1, len(self.windows)):
self.driver.switch_to.window(self.windows[n])
self.driver.close()
self.windows = self.driver.window_handles
self.driver.switch_to.window(self.windows[num])
# 存储信息
def excel_number(self, infos):
# 获取案件名称、案件号
if infos:
n = 0
for info in infos:
self.booksheet.write(self.row, n, info)
self.booksheet.col(n).width = 300 * 28
n += 1
path = os.path.join(self.report_path, "report_{}.xls".format(self.timetemp))
self.workbook.save(path)
# 窗口截图
def qr_shotscreen(self, windows_handle, name):
current_window = self.driver.current_window_handle
if current_window != windows_handle:
self.driver.switch_to.window(windows_handle)
path = self.screen_path
self.driver.save_screenshot(path + self.timetemp + name + ".png")
print("截图成功")
self.driver.switch_to.window(current_window)
else:
path = self.screen_path
self.driver.save_screenshot(path + self.timetemp +name + ".png")
print("截图成功")
| 32.347107
| 109
| 0.601175
| 484
| 3,914
| 4.71281
| 0.309917
| 0.061377
| 0.028058
| 0.031565
| 0.287155
| 0.266111
| 0.199036
| 0.143797
| 0.128891
| 0.128891
| 0
| 0.006669
| 0.2721
| 3,914
| 120
| 110
| 32.616667
| 0.793261
| 0.030148
| 0
| 0.186813
| 0
| 0
| 0.050767
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.10989
| false
| 0
| 0.120879
| 0
| 0.252747
| 0.043956
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0f64387b72b3f7cb9554217c9f76926a2cb5bad
| 5,367
|
py
|
Python
|
dashboard_generator.py
|
vgm236/exec-dash
|
5c446849ffc0ced5ec6c286d87603afa280f6017
|
[
"MIT"
] | 1
|
2019-06-20T03:14:22.000Z
|
2019-06-20T03:14:22.000Z
|
dashboard_generator.py
|
vgm236/exec-dash
|
5c446849ffc0ced5ec6c286d87603afa280f6017
|
[
"MIT"
] | null | null | null |
dashboard_generator.py
|
vgm236/exec-dash
|
5c446849ffc0ced5ec6c286d87603afa280f6017
|
[
"MIT"
] | null | null | null |
# dashboard_generator.py
import os.path # helps to save in a different folder
import pandas as pd
import itertools
import locale # from https://stackoverflow.com/Questions/320929/currency-formatting-in-python
from os import listdir
from os.path import isfile, join
#for chart generation
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# FILES PATH
save_path = 'C:/Users/Owner/Desktop/NYU-MBA/Programming/Files/monthly-sales/data'
# INTRODUCTION
print("Select one month to report")
print("---------------------------------------------------------------------")
# LISTING FILES (sorted and in a proper list)
onlyfiles = [f for f in listdir(save_path) if isfile(join(save_path, f))] #https://stackoverflow.com/questions/3207219/how-do-i-list-all-files-of-a-directory
onlyfiles.sort()
print(*onlyfiles, sep = "\n") #https://www.geeksforgeeks.org/print-lists-in-python-4-different-ways/
print("---------------------------------------------------------------------")
# REPORT SELECTION
selected_year = input("Please input a year (Example 2018 -- for Year): ")
selected_month = input("Please input a month (Example 01 -- for January): ")
# FILE SELECTED
file_name = "sales-" + selected_year + selected_month + ".csv"
# OPENING SPECIFIC FILE
find_file = os.path.join(save_path, file_name) #find the file
while not os.path.exists(find_file): #correct if does not exist
print("---------------------------------------------------------------------")
print("\n")
print("The file selected do not exist. Please try again")
print("\n")
print("---------------------------------------------------------------------")
exit()
stats = pd.read_csv(find_file)
# PERFORMING THE SUM
total_sales = stats["sales price"].sum()
# FORMATTING TOTAL SALES
locale.setlocale( locale.LC_ALL, '' )
total_sales_format = locale.currency(total_sales, grouping= True)
print("---------------------------------------------------------------------")
# SALES REPORT DATE
if selected_month == "01":
month_name = "JANUARY"
if selected_month == "02":
month_name = "FEBRUARY"
if selected_month == "03":
month_name = "MARCH"
if selected_month == "04":
month_name = "APRIL"
if selected_month == "05":
month_name = "MAY"
if selected_month == "06":
month_name = "JUNE"
if selected_month == "07":
month_name = "JULY"
if selected_month == "08":
month_name = "AUGUST"
if selected_month == "09":
month_name = "SEPTEMBER"
if selected_month == "10":
month_name = "OCTOBER"
if selected_month == "11":
month_name = "NOVEMBER"
if selected_month == "12":
month_name = "DECEMBER"
print("SALES REPORT " + "(" + month_name + " " + selected_year + ")")
# PRINTING TOTAL SALES
print("TOTAL SALES: " + (total_sales_format))
print("\n")
# TOP SELLING PRODUCTS
product_totals = stats.groupby(["product"]).sum()
product_totals = product_totals.sort_values("sales price", ascending=False)
top_sellers = []
rank = 1
for i, row in product_totals.iterrows():
d = {"rank": rank, "name": row.name, "monthly_sales": row["sales price"]}
top_sellers.append(d)
rank = rank + 1
def to_usd(my_price):
return "${0:,.2f}".format(my_price)
print("TOP SELLING PRODUCTS:")
for d in top_sellers:
locale.setlocale( locale.LC_ALL, '' )
print(" " + str(d["rank"]) + ") " + d["name"] +
": " + to_usd(d["monthly_sales"]))
print("\n")
print("---------------------------------------------------------------------")
print("\n")
print("GENERATING BAR CHART...")
print("\n")
print("---------------------------------------------------------------------")
### PRINT BAR CHART
# first two lines are the list comprehensions to make a list of dictionaries into a list)
x = [p["name"] for p in top_sellers] ## VERY IMPORTANT
y = [p["monthly_sales"] for p in top_sellers] ## VERY IMPORTANT
#sorting in the correct order
x.reverse()
y.reverse()
# break charts into two
fig, ax = plt.subplots() # enables us to further customize the figure and/or the axes
#formatting chart
usd_formatter = ticker.FormatStrFormatter('$%1.0f')
ax.xaxis.set_major_formatter(usd_formatter)
# CHART GENERATION
plt.barh(x, y)
plt.title("TOP-SELLING PRODUCTS " + "(" + month_name + " " + selected_year + ")") # AXIS TITLES
plt.ylabel('Sales (USD)') # AXIS TITLES
plt.ylabel("Product") # AXIS TITLES
# formatting numbers
for i, v in enumerate(y):
ax.text(v, i, usd_formatter(v), color='black', fontweight='bold')
#https://matplotlib.org/users/colors.html
#https://matplotlib.org/3.1.0/gallery/pyplots/text_commands.html#sphx-glr-gallery-pyplots-text-commands-py
plt.tight_layout() # ensures all areas of the chart are visible by default (fixes labels getting cut off)
plt.show()
exit()
## FULL SOLUTION PROVIDED BY THE PROFESSOR
# # this section needs to come before the chart construction
# fig, ax = plt.subplots() # enables us to further customize the figure and/or the axes
# usd_formatter = ticker.FormatStrFormatter('$%1.0f')
# ax.xaxis.set_major_formatter(usd_formatter)
#
# # chart construction
# plt.barh(sorted_products, sorted_sales)
# plt.title(chart_title)
# plt.ylabel("Product")
# plt.xlabel("Monthly Sales (USD)")
#
# plt.tight_layout() # ensures all areas of the chart are visible by default (fixes labels getting cut off)
# plt.show()
| 27.80829
| 157
| 0.63406
| 703
| 5,367
| 4.722617
| 0.344239
| 0.054819
| 0.054217
| 0.018072
| 0.173494
| 0.157831
| 0.157831
| 0.140361
| 0.140361
| 0.140361
| 0
| 0.012027
| 0.147941
| 5,367
| 193
| 158
| 27.80829
| 0.713973
| 0.313583
| 0
| 0.177083
| 0
| 0.010417
| 0.300138
| 0.151724
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010417
| false
| 0
| 0.09375
| 0.010417
| 0.114583
| 0.21875
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0f84e0c95d431aa5ccd03662827d19008ac7c6c
| 2,235
|
py
|
Python
|
product_spider/spiders/medicalisotopes_spider.py
|
Pandaaaa906/product_spider
|
cc7f865f53fd3ed68f4869be3ba917c8373dfcf2
|
[
"MIT"
] | null | null | null |
product_spider/spiders/medicalisotopes_spider.py
|
Pandaaaa906/product_spider
|
cc7f865f53fd3ed68f4869be3ba917c8373dfcf2
|
[
"MIT"
] | null | null | null |
product_spider/spiders/medicalisotopes_spider.py
|
Pandaaaa906/product_spider
|
cc7f865f53fd3ed68f4869be3ba917c8373dfcf2
|
[
"MIT"
] | null | null | null |
from urllib.parse import urljoin
from scrapy import Request
from product_spider.items import RawData
from product_spider.utils.functions import strip
from product_spider.utils.spider_mixin import BaseSpider
class MedicalIsotopesSpider(BaseSpider):
name = "medicalisotopes"
base_url = "https://www.medicalisotopes.com/"
start_urls = ['https://www.medicalisotopes.com/productsbycategories.php', ]
def parse(self, response):
a_nodes = response.xpath('//div[contains(@class, "main-content")]//a')
for a in a_nodes:
parent = a.xpath('./text()').get()
url = a.xpath('./@href').get()
yield Request(urljoin(self.base_url, url), callback=self.parse_list, meta={'parent': parent})
def parse_list(self, response):
rel_urls = response.xpath('//td[2]/a/@href').getall()
parent = response.meta.get('parent')
for rel_url in rel_urls:
yield Request(urljoin(self.base_url, rel_url), callback=self.parse_detail, meta={'parent': parent})
next_page = response.xpath('//a[@class="c-page"]/following-sibling::a[text()!="NEXT"]/@href').get()
if next_page:
yield Request(urljoin(self.base_url, next_page), callback=self.parse_list, meta={'parent': parent})
def parse_detail(self, response):
tmp = '//td[contains(text(), {!r})]/following-sibling::td//text()'
package = strip(response.xpath('normalize-space(//td/table//td[1]/text())').get())
d = {
'brand': 'medicalisotopes',
'parent': response.meta.get('parent'),
'cat_no': strip(response.xpath(tmp.format("Catalog Number:")).get()),
'en_name': strip(response.xpath('//th[contains(text(), "Product:")]/following-sibling::th/text()').get()),
'cas': strip(response.xpath(tmp.format("CAS Number:")).get()),
'mf': strip(''.join(response.xpath(tmp.format("Formula:")).getall())),
'mw': strip(response.xpath(tmp.format("Molecular Weight:")).get()),
'info3': package and package.rstrip('\xa0='),
'info4': strip(response.xpath('//td/table//td[2]/text()').get()),
'prd_url': response.url,
}
yield RawData(**d)
| 46.5625
| 118
| 0.619239
| 273
| 2,235
| 4.974359
| 0.326007
| 0.095729
| 0.079529
| 0.064801
| 0.231959
| 0.132548
| 0.066274
| 0.066274
| 0.066274
| 0
| 0
| 0.003333
| 0.194631
| 2,235
| 47
| 119
| 47.553191
| 0.751111
| 0
| 0
| 0
| 0
| 0
| 0.256376
| 0.120358
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.128205
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0fa6b58d78457006cba2d731fe207bcc18728f5
| 9,819
|
py
|
Python
|
smiles_parsers/Smarts.py
|
UnixJunkie/frowns
|
427e4c11a8a4dbe865828d18221899478497795e
|
[
"BSD-3-Clause"
] | null | null | null |
smiles_parsers/Smarts.py
|
UnixJunkie/frowns
|
427e4c11a8a4dbe865828d18221899478497795e
|
[
"BSD-3-Clause"
] | null | null | null |
smiles_parsers/Smarts.py
|
UnixJunkie/frowns
|
427e4c11a8a4dbe865828d18221899478497795e
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/enb python
import string, re
import Handler
#######################
# Define some regular expressions inside a quoted string
# then turn the string into the actual data structure.
# (I found it was easiest to understand when done this way.)
definitions = r"""
# These are the atomic symbols Daylight allows outside of []s
# See "atom_class" for names like "a" and "A"
raw_atom Cl|Br|[cnospBCNOFPSI*]
# For atoms inside of []s
open_bracket \[
close_bracket \]
# See "element_modifiers" for the patterns for element names
# charges, chiralities, H count, etc.
# [235U]
weight \d+
# [#6]
atomic_number #\d+
# [!C]
atom_not !
# & is highest (an "and")
# , is next (an "or")
# ; is lowest (an "and")
# [n&H] [n,H] [c,h;H1]
atom_binary [&,;]
# C.C
dot \.
# - single bond (aliphatic)
# / directional single bond "up"
# \ directional single bond "down"
# /? directional bond "up or unspecified"
# \? directional bond "down or unspecified"
# = double bond
# # triple bond
# : aromatic bond
# ~ any bond (wildcard)
# @ any ring bond
bond [/\\]\??|[=#:~@-]
# *!:* -- not aromatic
bond_not !
# *@;!:* -- same as !:
bond_binary [&;,]
# (C).(C)
open_zero \(
# C(C)
open_branch \(
# [$(*C);$(*CC)]
open_recursive_smarts \$\(
# special cased because it closes open_zero, open_branch, and
# recursive_smarts
close_parens \)
# Ring closures, 1, %5 %99 (and even %00 for what it's worth)
closure \d|%\d\d?
"""
#######################
# Turn the above string into key/value pairs where the
# values are the compiled regular expressions.
info = {}
for line in string.split(definitions, "\n"):
line = string.strip(line)
if not line or line[:1] == "#":
continue
name, pattern = string.split(line)
info[name] = re.compile(pattern)
del line, name, pattern
info["atom_class"] = re.compile(r"""
(?P<raw_aromatic>a)| # Not really sure what these mean
(?P<raw_b_unknown>b)|
(?P<raw_f_unknown>f)|
(?P<raw_h_unknown>h)|
(?P<raw_i_unknown>i)|
(?P<raw_r_unknown>r)|
(?P<raw_aliphatic>A)|
(?P<raw_R_unknown>R)
""", re.X)
# 'H' is used for the hydrogen count, so those searches require a
# special recursive SMARTS definition. Eg, for deuterium or tritium
# [$([2H]),$([3H])]
# This is implemented as a special-case hack. Note: if there's
# an error in the parse string in this section then the error
# location will point to the start of this term, not at the
# character that really caused the error. Can be fixed with an
# 'error_' like I did for the SMILES -- not needed for now. XXX
hydrogen_term_fields = [
"open_recursive_smarts",
"open_bracket",
"weight",
"element",
"positive_count",
"positive_symbols",
"negative_count",
"negative_symbols",
"close_bracket",
"close_recursive_smarts",
]
info["hydrogen_term"] = re.compile(r"""
(?P<open_recursive_smarts>\$\()
(?P<open_bracket>\[)
(?P<weight>\d+)? # optional molecular weight [2H]
(?P<element>H) # Must be a hydrogen
( # optional charge
(?P<positive_count>\+\d+)| # +3
(?P<positive_symbols>\++)| # ++
(?P<negative_count>\-\d+)| # -2
(?P<negative_symbols>\-+)| # ---
)?
(?P<close_bracket>\])
(?P<close_recursive_smarts>\))
""", re.X)
element_symbols_pattern = \
r"C[laroudsemf]?|Os?|N[eaibdpos]?|S[icernbmg]?|P[drmtboau]?|" \
r"H[eofgas]|c|n|o|s|p|A[lrsgutcm]|B[eraik]?|Dy|E[urs]|F[erm]?|" \
r"G[aed]|I[nr]?|Kr?|L[iaur]|M[gnodt]|R[buhenaf]|T[icebmalh]|" \
r"U|V|W|Xe|Yb?|Z[nr]|\*"
info["element_modifier"] = re.compile(r"""
(?P<element>
# This does *not* contain H. Hydrogen searches must be done
# with a special recursive SMARTS. On the other hand, it does
# include the lower case aromatic names.
""" + element_symbols_pattern + r"""
)|
(?P<aromatic>a)| # aromatic
(?P<aliphatic>A)| # Aliphatic
(?P<degree>D\d+)| # Degree<n>
(?P<total_hcount>H\d*)| # total Hydrogen count<n> (defaults to 1)
(?P<imp_hcount>h\d*)| # implicit hydrogen count<n> (defaults to 1)
(?P<ring_membership>R\d*)| # in <n> Rings (no n means any rings)
(?P<ring_size>r\d*)| # in a ring of size <n> (no n means any rings)
(?P<valence>v\d+)| # total bond order of <n>
(?P<connectivity>X\d+)| # <n> total connections
(?P<positive_count>\+\d+)| # +2 +3
(?P<positive_symbols>\++)| # + ++ +++
(?P<negative_count>\-\d+)| # -1 -4
(?P<negative_symbols>\-+)| # -- - -------
# XXX What about chiral_count?
(?P<chiral_named> # The optional '?' means "or unspecified"
@TH[12]\??| # @TH1 @TH2?
@AL[12]\??| # @AL2?
@SP[123]\??| # @SP3 @SP1?
@TB(1[0-9]?|20?|[3-9])\??| # @TH{1 through 20}
@OH(1[0-9]?|2[0-9]?|30?|[4-9])\?? # @OH{1 through 30}
)|
(?P<chiral_symbols>@@?\??) # @ (anticlockwise) or @@ (clockwise)
""", re.X)
# The ')' closes three different open parens. This maps from the
# previous open state to the appropriate close state.
close_parens_states = {
"open_branch": "close_branch",
"open_recursive_smarts": "close_recursive_smarts",
"open_zero": "close_zero",
}
#### Some helpful definitions to reduce clutter and complication
# Possible transitions from the start node. Also visited after
# a '.' disconnect or in a recursive SMARTS.
expecting_start = ("raw_atom", "atom_class", "open_bracket", "open_zero")
# Looking for node definition, like "C" or "a" or "["
expecting_atom = ("raw_atom", "atom_class", "open_bracket")
# Inside of []s: 235U, #6, R, $([2H]), $(*=C), !
expecting_element_start = ("weight", "atomic_number",
"element_modifier", "hydrogen_term",
"open_recursive_smarts", "atom_not")
# the ';' in [n;H1] or the ']' at the end
expecting_element_end = ("atom_binary", "close_bracket")
# All bonds start with a '!' or one of the bond symbols
expecting_bond_start = ("bond", "bond_not")
expecting_raw_term = expecting_atom + expecting_bond_start + \
("close_parens", "open_branch", "dot", "closure")
expecting_modifier = ("element_modifier", "open_recursive_smarts")
table = {
"start": expecting_start,
# (C).(R).[U].([$(*)])
"open_zero": ("raw_atom", "atom_class", "open_bracket"),
# as well as (CC(C))
"close_zero": ("dot", "close_parens"),
# A raw term are the things like 'C', '[U]', '%10', '.', '(', '!#'
"raw_atom": expecting_raw_term,
# An atom_class is a non-specific atom term, like 'A' or 'r'
"atom_class": expecting_raw_term,
# the []s
"open_bracket": expecting_element_start,
"close_bracket": expecting_raw_term,
# Yes, '[!!!!C]' is legal, according to the docs, but it isn't
# supported by the parser, unless you optimze it.
"atom_not": expecting_element_start,
"atom_binary": expecting_element_start,
# "14N", "14a", ...
# Note that weight can only be set once so it isn't a modifier
# Also, "14#6" isn't legal (tested against the toolkit)
"weight": expecting_modifier,
# "#6R2" or "#8," or "#7]"
# The atomic_number can only be set once so it isn't a modifier
"atomic_number": expecting_modifier + expecting_element_end,
# All of these are type of modifiers
"element_modifier": expecting_modifier + expecting_element_end,
"hydrogen_term": expecting_modifier + expecting_element_end,
"close_recursive_smarts": expecting_modifier + expecting_element_end,
# This it the recursive part -- goes back to the beginning
"open_recursive_smarts": expecting_start,
# C=C, C1CCC=1, C~-C, C=(C)C, C=,-C
"bond": expecting_atom + ("closure", "bond", "open_branch",
"bond_binary"),
# C!!=C
"bond_not": expecting_bond_start,
# C=,-C
"bond_binary": expecting_bond_start,
"closure": expecting_raw_term,
"close_branch": expecting_raw_term,
"open_branch": expecting_atom + expecting_bond_start + ("dot",),
# After a "." we can start all over again
"dot": expecting_start,
}
def tokenize(s, handler = Handler.TokenHandler()):
expected = table["start"]
parens_stack = []
n = len(s)
i = 0
handler.begin()
while i < n:
for state in expected:
m = info[state].match(s, i)
if m:
break
else:
handler.error("Unknown character", i, s[i:])
return
if close_parens_states.has_key(state):
parens_stack.append(state)
elif state == "close_parens":
try:
state = close_parens_states[parens_stack.pop()]
except IndexError:
# Too many close parens
handler.error("Too many ')'", i, s[i:])
return
d = m.groupdict()
if d and state == "hydrogen_term":
# Special case the hydrogen term
for field in hydrogen_term_fields:
if d[field] is not None:
handler.add_token(field, i, d[field])
#print " --> New state:", state
else:
name = state
if d:
# There should only be one match
for name, v in d.items():
if v is not None:
break
handler.add_token(name, i, m.group(0))
expected = table[state]
i = m.end(0)
handler.end()
| 31.776699
| 76
| 0.575517
| 1,296
| 9,819
| 4.20679
| 0.279321
| 0.041269
| 0.024395
| 0.024211
| 0.098313
| 0.055759
| 0.033382
| 0.023844
| 0.012106
| 0.012106
| 0
| 0.011402
| 0.267543
| 9,819
| 308
| 77
| 31.87987
| 0.746663
| 0.20389
| 0
| 0.05314
| 0
| 0.043478
| 0.570927
| 0.135646
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004831
| false
| 0
| 0.009662
| 0
| 0.024155
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c0fe21b15a59a46814a6a24b71ed4f6e93699049
| 8,402
|
py
|
Python
|
pyvizio/const.py
|
jezzab/pyvizio
|
8086f9e5aac49d1d99ade02684ca35c05e03a7eb
|
[
"MIT"
] | 72
|
2017-08-08T19:32:12.000Z
|
2022-03-18T03:18:41.000Z
|
pyvizio/const.py
|
raman325/pyvizio
|
9cf45fcc9b409caf223a38d8f79c775742ab4127
|
[
"MIT"
] | 48
|
2017-09-16T16:37:54.000Z
|
2022-01-23T20:43:42.000Z
|
pyvizio/const.py
|
ConnectionMaster/pyvizio
|
0fe4558557917509d3da3bb24f9221f15ba901ce
|
[
"MIT"
] | 42
|
2017-09-04T22:59:21.000Z
|
2022-03-18T03:18:30.000Z
|
"""pyvizio constants."""
DEVICE_CLASS_SPEAKER = "speaker"
DEVICE_CLASS_TV = "tv"
DEVICE_CLASS_CRAVE360 = "crave360"
DEFAULT_DEVICE_ID = "pyvizio"
DEFAULT_DEVICE_CLASS = DEVICE_CLASS_TV
DEFAULT_DEVICE_NAME = "Python Vizio"
DEFAULT_PORTS = [7345, 9000]
DEFAULT_TIMEOUT = 5
MAX_VOLUME = {DEVICE_CLASS_TV: 100, DEVICE_CLASS_SPEAKER: 31, DEVICE_CLASS_CRAVE360: 100}
# Current Input when app is active
INPUT_APPS = ["SMARTCAST", "CAST"]
# App name returned when it is not in app dictionary
UNKNOWN_APP = "_UNKNOWN_APP"
NO_APP_RUNNING = "_NO_APP_RUNNING"
SMARTCAST_HOME = "SmartCast Home"
APP_CAST = "Cast"
# NAME_SPACE values that appear to be equivalent
EQUIVALENT_NAME_SPACES = (2, 4)
APP_HOME = {
"name": SMARTCAST_HOME,
"country": ["*"],
"config": [
{
"NAME_SPACE": 4,
"APP_ID": "1",
"MESSAGE": "http://127.0.0.1:12345/scfs/sctv/main.html",
}
],
}
# No longer needed but kept around in case the external source for APPS is unavailable
APPS = [
{
"name": "Prime Video",
"country": ["*"],
"id": ["33"],
"config": [
{
"APP_ID": "4",
"NAME_SPACE": 4,
"MESSAGE": "https://atv-ext.amazon.com/blast-app-hosting/html5/index.html?deviceTypeID=A3OI4IHTNZQWDD",
},
{"NAME_SPACE": 2, "APP_ID": "4", "MESSAGE": "None"},
],
},
{
"name": "CBS All Access",
"country": ["usa"],
"id": ["9"],
"config": [{"NAME_SPACE": 2, "APP_ID": "37", "MESSAGE": "None"}],
},
{
"name": "CBS News",
"country": ["usa", "can"],
"id": ["56"],
"config": [{"NAME_SPACE": 2, "APP_ID": "42", "MESSAGE": "None"}],
},
{
"name": "Crackle",
"country": ["usa"],
"id": ["8"],
"config": [{"NAME_SPACE": 2, "APP_ID": "5", "MESSAGE": "None"}],
},
{
"name": "Curiosity Stream",
"country": ["usa", "can"],
"id": ["37"],
"config": [{"NAME_SPACE": 2, "APP_ID": "12", "MESSAGE": "None"}],
},
{
"name": "Fandango Now",
"country": ["usa"],
"id": ["24"],
"config": [{"NAME_SPACE": 2, "APP_ID": "7", "MESSAGE": "None"}],
},
{
"name": "FilmRise",
"country": ["usa"],
"id": ["47"],
"config": [{"NAME_SPACE": 2, "APP_ID": "24", "MESSAGE": "None"}],
},
{
"name": "Flixfling",
"country": ["*"],
"id": ["49"],
"config": [{"NAME_SPACE": 2, "APP_ID": "36", "MESSAGE": "None"}],
},
{
"name": "Haystack TV",
"country": ["usa", "can"],
"id": ["35"],
"config": [
{
"NAME_SPACE": 0,
"APP_ID": "898AF734",
"MESSAGE": '{"CAST_NAMESPACE":"urn:x-cast:com.google.cast.media","CAST_MESSAGE":{"type":"LOAD","media":{},"autoplay":true,"currentTime":0,"customData":{"platform":"sctv"}}}',
}
],
},
{
"name": "Hulu",
"country": ["usa"],
"id": ["19"],
"config": [
{
"APP_ID": "3",
"NAME_SPACE": 4,
"MESSAGE": "https://viziosmartcast.app.hulu.com/livingroom/viziosmartcast/1/index.html#initialize",
},
{"NAME_SPACE": 2, "APP_ID": "3", "MESSAGE": "None"},
],
},
{
"name": "iHeartRadio",
"country": ["usa"],
"id": ["11"],
"config": [{"NAME_SPACE": 2, "APP_ID": "6", "MESSAGE": "None"}],
},
{
"name": "NBC",
"country": ["usa"],
"id": ["43"],
"config": [{"NAME_SPACE": 2, "APP_ID": "10", "MESSAGE": "None"}],
},
{
"name": "Netflix",
"country": ["*"],
"id": ["34"],
"config": [{"NAME_SPACE": 3, "APP_ID": "1", "MESSAGE": "None"}],
},
{
"name": "Plex",
"country": ["usa", "can"],
"id": ["40"],
"config": [
{
"APP_ID": "9",
"NAME_SPACE": 4,
"MESSAGE": "https://plex.tv/web/tv/vizio-smartcast",
},
{"NAME_SPACE": 2, "APP_ID": "9", "MESSAGE": "None"},
],
},
{
"name": "Pluto TV",
"country": ["usa"],
"id": ["12"],
"config": [
{"APP_ID": "65", "NAME_SPACE": 4, "MESSAGE": "https://smartcast.pluto.tv"},
{
"NAME_SPACE": 0,
"APP_ID": "E6F74C01",
"MESSAGE": '{"CAST_NAMESPACE":"urn:x-cast:tv.pluto","CAST_MESSAGE":{"command":"initializePlayback","channel":"","episode":"","time":0}}',
},
],
},
{
"name": "RedBox",
"country": ["usa"],
"id": ["55"],
"config": [{"NAME_SPACE": 2, "APP_ID": "41", "MESSAGE": "None"}],
},
{
"name": "TasteIt",
"country": ["*"],
"id": ["52"],
"config": [{"NAME_SPACE": 2, "APP_ID": "26", "MESSAGE": "None"}],
},
{
"name": "Toon Goggles",
"country": ["usa", "can"],
"id": ["46"],
"config": [{"NAME_SPACE": 2, "APP_ID": "21", "MESSAGE": "None"}],
},
{
"name": "Vudu",
"country": ["usa"],
"id": ["6"],
"config": [
{
"APP_ID": "31",
"NAME_SPACE": 4,
"MESSAGE": "https://my.vudu.com/castReceiver/index.html?launch-source=app-icon",
}
],
},
{
"name": "XUMO",
"country": ["usa"],
"id": ["27"],
"config": [
{
"NAME_SPACE": 0,
"APP_ID": "36E1EA1F",
"MESSAGE": '{"CAST_NAMESPACE":"urn:x-cast:com.google.cast.media","CAST_MESSAGE":{"type":"LOAD","media":{},"autoplay":true,"currentTime":0,"customData":{}}}',
}
],
},
{
"name": "YouTubeTV",
"country": ["usa", "mexico"],
"id": ["45"],
"config": [{"NAME_SPACE": 5, "APP_ID": "3", "MESSAGE": "None"}],
},
{
"name": "YouTube",
"country": ["*"],
"id": ["44"],
"config": [{"NAME_SPACE": 5, "APP_ID": "1", "MESSAGE": "None"}],
},
{
"name": "Baeble",
"country": ["usa"],
"id": ["39"],
"config": [{"NAME_SPACE": 2, "APP_ID": "11", "MESSAGE": "None"}],
},
{
"name": "DAZN",
"country": ["usa", "can"],
"id": ["57"],
"config": [{"NAME_SPACE": 2, "APP_ID": "34", "MESSAGE": "None"}],
},
{
"name": "FitFusion by Jillian Michaels",
"country": ["usa", "can"],
"id": ["54"],
"config": [{"NAME_SPACE": 2, "APP_ID": "39", "MESSAGE": "None"}],
},
{
"name": "Newsy",
"country": ["usa", "can"],
"id": ["38"],
"config": [{"NAME_SPACE": 2, "APP_ID": "15", "MESSAGE": "None"}],
},
{
"name": "Cocoro TV",
"country": ["usa", "can"],
"id": ["63"],
"config": [{"NAME_SPACE": 2, "APP_ID": "55", "MESSAGE": "None"}],
},
{
"name": "ConTV",
"country": ["usa", "can"],
"id": ["41"],
"config": [{"NAME_SPACE": 2, "APP_ID": "18", "MESSAGE": "None"}],
},
{
"name": "Dove Channel",
"country": ["usa", "can"],
"id": ["42"],
"config": [{"NAME_SPACE": 2, "APP_ID": "16", "MESSAGE": "None"}],
},
{
"name": "Love Destination",
"country": ["*"],
"id": ["64"],
"config": [{"NAME_SPACE": 2, "APP_ID": "57", "MESSAGE": "None"}],
},
{
"name": "WatchFree",
"country": ["usa"],
"id": ["48"],
"config": [{"NAME_SPACE": 2, "APP_ID": "22", "MESSAGE": "None"}],
},
{
"name": "AsianCrush",
"country": ["usa", "can"],
"id": ["50"],
"config": [
{
"NAME_SPACE": 2,
"APP_ID": "27",
"MESSAGE": "https://html5.asiancrush.com/?ua=viziosmartcast",
}
],
},
{
"name": "Disney+",
"country": ["usa"],
"id": ["51"],
"config": [
{
"NAME_SPACE": 4,
"APP_ID": "75",
"MESSAGE": "https://cd-dmgz.bamgrid.com/bbd/vizio_tv/index.html",
}
],
},
]
| 28.100334
| 190
| 0.416448
| 796
| 8,402
| 4.242462
| 0.261307
| 0.103938
| 0.128813
| 0.096239
| 0.324845
| 0.265916
| 0.061593
| 0.061593
| 0.061593
| 0.061593
| 0
| 0.039769
| 0.341585
| 8,402
| 298
| 191
| 28.194631
| 0.57068
| 0.02797
| 0
| 0.21831
| 0
| 0.021127
| 0.387105
| 0.052219
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d00ab8273f452e2946deb1ce6f8cb6b06b174a7
| 1,858
|
py
|
Python
|
etl/steps/data/garden/owid/latest/covid.py
|
c1x1x00xxPentium/etl
|
4c9c4e466287deefba1aaae12c473c38d9ecb3cd
|
[
"MIT"
] | 5
|
2021-11-01T18:54:52.000Z
|
2022-03-10T17:19:14.000Z
|
etl/steps/data/garden/owid/latest/covid.py
|
c1x1x00xxPentium/etl
|
4c9c4e466287deefba1aaae12c473c38d9ecb3cd
|
[
"MIT"
] | 98
|
2021-09-24T19:29:34.000Z
|
2022-03-31T15:57:18.000Z
|
etl/steps/data/garden/owid/latest/covid.py
|
c1x1x00xxPentium/etl
|
4c9c4e466287deefba1aaae12c473c38d9ecb3cd
|
[
"MIT"
] | 2
|
2021-12-15T07:53:38.000Z
|
2022-02-05T14:50:43.000Z
|
#
# covid19.py
# owid/latest/covid
#
from owid.catalog.meta import License, Source
import datetime as dt
import pandas as pd
from owid.catalog import Dataset, Table
from etl.helpers import downloaded
MEGAFILE_URL = "https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv"
def run(dest_dir: str) -> None:
d = create_dataset(dest_dir)
with downloaded(MEGAFILE_URL) as filename:
df = pd.read_csv(filename)
df["date"] = pd.to_datetime(df.date)
for col in ["iso_code", "continent", "location"]:
df[col] = df[col].astype("category")
df.set_index(["iso_code", "date"], inplace=True)
t = Table(df)
t.metadata.short_name = "covid"
d.add(t)
def create_dataset(dest_dir: str) -> Dataset:
d = Dataset.create_empty(dest_dir)
d.metadata.short_name = "covid19"
d.metadata.namespace = "owid"
d.metadata.sources = [
Source(
name="Multiple sources via Our World In Data",
description="Our complete COVID-19 dataset maintained by Our World in Data. We will update it daily throughout the duration of the COVID-19 pandemic.",
url="https://github.com/owid/covid-19-data/tree/master/public/data",
source_data_url="https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/covid-19-data.csv",
owid_data_url="https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/covid-19-data.csv",
date_accessed=str(dt.date.today()),
publication_date=str(dt.date.today()),
publication_year=dt.date.today().year,
)
]
d.metadata.licenses = [
License(
name="Other (Attribution)",
url="https://github.com/owid/covid-19-data/tree/master/public/data#license",
)
]
d.save()
return d
| 30.966667
| 163
| 0.656082
| 258
| 1,858
| 4.635659
| 0.372093
| 0.052676
| 0.064381
| 0.058528
| 0.314381
| 0.272575
| 0.272575
| 0.272575
| 0.272575
| 0.272575
| 0
| 0.014956
| 0.208288
| 1,858
| 59
| 164
| 31.491525
| 0.798097
| 0.015608
| 0
| 0
| 0
| 0.146341
| 0.360395
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0
| 0.121951
| 0
| 0.195122
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|